Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/pagewalk.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <linux/pgtable.h>
 32#include <asm/cacheflush.h>
 33#include <asm/mmu_context.h>
 34#include <asm/tlbflush.h>
 35
 36#include "internal.h"
 37
 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		unsigned long cp_flags)
 41{
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 44	unsigned long pages = 0;
 45	int target_node = NUMA_NO_NODE;
 46	bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
 47	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 48	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 49	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 50
 51	/*
 52	 * Can be called with only the mmap_lock for reading by
 53	 * prot_numa so we must check the pmd isn't constantly
 54	 * changing from under us from pmd_none to pmd_trans_huge
 55	 * and/or the other way around.
 56	 */
 57	if (pmd_trans_unstable(pmd))
 58		return 0;
 59
 60	/*
 61	 * The pmd points to a regular pte so the pmd can't change
 62	 * from under us even if the mmap_lock is only hold for
 63	 * reading.
 64	 */
 65	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 66
 67	/* Get target node for single threaded private VMAs */
 68	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
 69	    atomic_read(&vma->vm_mm->mm_users) == 1)
 70		target_node = numa_node_id();
 71
 72	flush_tlb_batched_pending(vma->vm_mm);
 73	arch_enter_lazy_mmu_mode();
 74	do {
 75		oldpte = *pte;
 76		if (pte_present(oldpte)) {
 77			pte_t ptent;
 78			bool preserve_write = prot_numa && pte_write(oldpte);
 79
 80			/*
 81			 * Avoid trapping faults against the zero or KSM
 82			 * pages. See similar comment in change_huge_pmd.
 83			 */
 84			if (prot_numa) {
 85				struct page *page;
 86
 87				/* Avoid TLB flush if possible */
 88				if (pte_protnone(oldpte))
 89					continue;
 90
 91				page = vm_normal_page(vma, addr, oldpte);
 92				if (!page || PageKsm(page))
 93					continue;
 94
 95				/* Also skip shared copy-on-write pages */
 96				if (is_cow_mapping(vma->vm_flags) &&
 97				    page_mapcount(page) != 1)
 98					continue;
 99
100				/*
101				 * While migration can move some dirty pages,
102				 * it cannot move them all from MIGRATE_ASYNC
103				 * context.
104				 */
105				if (page_is_file_lru(page) && PageDirty(page))
106					continue;
107
108				/*
109				 * Don't mess with PTEs if page is already on the node
110				 * a single-threaded process is running on.
111				 */
112				if (target_node == page_to_nid(page))
113					continue;
114			}
115
116			oldpte = ptep_modify_prot_start(vma, addr, pte);
117			ptent = pte_modify(oldpte, newprot);
118			if (preserve_write)
119				ptent = pte_mk_savedwrite(ptent);
120
121			if (uffd_wp) {
122				ptent = pte_wrprotect(ptent);
123				ptent = pte_mkuffd_wp(ptent);
124			} else if (uffd_wp_resolve) {
125				/*
126				 * Leave the write bit to be handled
127				 * by PF interrupt handler, then
128				 * things like COW could be properly
129				 * handled.
130				 */
131				ptent = pte_clear_uffd_wp(ptent);
132			}
133
134			/* Avoid taking write faults for known dirty pages */
135			if (dirty_accountable && pte_dirty(ptent) &&
136					(pte_soft_dirty(ptent) ||
137					 !(vma->vm_flags & VM_SOFTDIRTY))) {
138				ptent = pte_mkwrite(ptent);
139			}
140			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
141			pages++;
142		} else if (is_swap_pte(oldpte)) {
143			swp_entry_t entry = pte_to_swp_entry(oldpte);
144			pte_t newpte;
145
146			if (is_writable_migration_entry(entry)) {
147				/*
148				 * A protection check is difficult so
149				 * just be safe and disable write
150				 */
151				entry = make_readable_migration_entry(
152							swp_offset(entry));
153				newpte = swp_entry_to_pte(entry);
154				if (pte_swp_soft_dirty(oldpte))
155					newpte = pte_swp_mksoft_dirty(newpte);
156				if (pte_swp_uffd_wp(oldpte))
157					newpte = pte_swp_mkuffd_wp(newpte);
158			} else if (is_writable_device_private_entry(entry)) {
159				/*
160				 * We do not preserve soft-dirtiness. See
161				 * copy_one_pte() for explanation.
162				 */
163				entry = make_readable_device_private_entry(
164							swp_offset(entry));
165				newpte = swp_entry_to_pte(entry);
166				if (pte_swp_uffd_wp(oldpte))
167					newpte = pte_swp_mkuffd_wp(newpte);
168			} else if (is_writable_device_exclusive_entry(entry)) {
169				entry = make_readable_device_exclusive_entry(
170							swp_offset(entry));
171				newpte = swp_entry_to_pte(entry);
172				if (pte_swp_soft_dirty(oldpte))
173					newpte = pte_swp_mksoft_dirty(newpte);
174				if (pte_swp_uffd_wp(oldpte))
175					newpte = pte_swp_mkuffd_wp(newpte);
176			} else {
177				newpte = oldpte;
178			}
179
180			if (uffd_wp)
181				newpte = pte_swp_mkuffd_wp(newpte);
182			else if (uffd_wp_resolve)
183				newpte = pte_swp_clear_uffd_wp(newpte);
184
185			if (!pte_same(oldpte, newpte)) {
186				set_pte_at(vma->vm_mm, addr, pte, newpte);
187				pages++;
188			}
189		}
190	} while (pte++, addr += PAGE_SIZE, addr != end);
191	arch_leave_lazy_mmu_mode();
192	pte_unmap_unlock(pte - 1, ptl);
193
194	return pages;
195}
196
197/*
198 * Used when setting automatic NUMA hinting protection where it is
199 * critical that a numa hinting PMD is not confused with a bad PMD.
200 */
201static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
202{
203	pmd_t pmdval = pmd_read_atomic(pmd);
204
205	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
207	barrier();
208#endif
209
210	if (pmd_none(pmdval))
211		return 1;
212	if (pmd_trans_huge(pmdval))
213		return 0;
214	if (unlikely(pmd_bad(pmdval))) {
215		pmd_clear_bad(pmd);
216		return 1;
217	}
218
219	return 0;
220}
221
222static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
223		pud_t *pud, unsigned long addr, unsigned long end,
224		pgprot_t newprot, unsigned long cp_flags)
225{
226	pmd_t *pmd;
227	unsigned long next;
228	unsigned long pages = 0;
229	unsigned long nr_huge_updates = 0;
230	struct mmu_notifier_range range;
231
232	range.start = 0;
233
234	pmd = pmd_offset(pud, addr);
235	do {
236		unsigned long this_pages;
237
238		next = pmd_addr_end(addr, end);
239
240		/*
241		 * Automatic NUMA balancing walks the tables with mmap_lock
242		 * held for read. It's possible a parallel update to occur
243		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
244		 * check leading to a false positive and clearing.
245		 * Hence, it's necessary to atomically read the PMD value
246		 * for all the checks.
247		 */
248		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
249		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
250			goto next;
251
252		/* invoke the mmu notifier if the pmd is populated */
253		if (!range.start) {
254			mmu_notifier_range_init(&range,
255				MMU_NOTIFY_PROTECTION_VMA, 0,
256				vma, vma->vm_mm, addr, end);
257			mmu_notifier_invalidate_range_start(&range);
258		}
259
260		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
261			if (next - addr != HPAGE_PMD_SIZE) {
262				__split_huge_pmd(vma, pmd, addr, false, NULL);
263			} else {
264				int nr_ptes = change_huge_pmd(vma, pmd, addr,
265							      newprot, cp_flags);
266
267				if (nr_ptes) {
268					if (nr_ptes == HPAGE_PMD_NR) {
269						pages += HPAGE_PMD_NR;
270						nr_huge_updates++;
271					}
272
273					/* huge pmd was handled */
274					goto next;
275				}
276			}
277			/* fall through, the trans huge pmd just split */
278		}
279		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
280					      cp_flags);
281		pages += this_pages;
282next:
283		cond_resched();
284	} while (pmd++, addr = next, addr != end);
285
286	if (range.start)
287		mmu_notifier_invalidate_range_end(&range);
288
289	if (nr_huge_updates)
290		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
291	return pages;
292}
293
294static inline unsigned long change_pud_range(struct vm_area_struct *vma,
295		p4d_t *p4d, unsigned long addr, unsigned long end,
296		pgprot_t newprot, unsigned long cp_flags)
297{
298	pud_t *pud;
299	unsigned long next;
300	unsigned long pages = 0;
301
302	pud = pud_offset(p4d, addr);
303	do {
304		next = pud_addr_end(addr, end);
305		if (pud_none_or_clear_bad(pud))
306			continue;
307		pages += change_pmd_range(vma, pud, addr, next, newprot,
308					  cp_flags);
309	} while (pud++, addr = next, addr != end);
310
311	return pages;
312}
313
314static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
315		pgd_t *pgd, unsigned long addr, unsigned long end,
316		pgprot_t newprot, unsigned long cp_flags)
317{
318	p4d_t *p4d;
319	unsigned long next;
320	unsigned long pages = 0;
321
322	p4d = p4d_offset(pgd, addr);
323	do {
324		next = p4d_addr_end(addr, end);
325		if (p4d_none_or_clear_bad(p4d))
326			continue;
327		pages += change_pud_range(vma, p4d, addr, next, newprot,
328					  cp_flags);
329	} while (p4d++, addr = next, addr != end);
330
331	return pages;
332}
333
334static unsigned long change_protection_range(struct vm_area_struct *vma,
335		unsigned long addr, unsigned long end, pgprot_t newprot,
336		unsigned long cp_flags)
337{
338	struct mm_struct *mm = vma->vm_mm;
339	pgd_t *pgd;
340	unsigned long next;
341	unsigned long start = addr;
342	unsigned long pages = 0;
343
344	BUG_ON(addr >= end);
345	pgd = pgd_offset(mm, addr);
346	flush_cache_range(vma, addr, end);
347	inc_tlb_flush_pending(mm);
348	do {
349		next = pgd_addr_end(addr, end);
350		if (pgd_none_or_clear_bad(pgd))
351			continue;
352		pages += change_p4d_range(vma, pgd, addr, next, newprot,
353					  cp_flags);
354	} while (pgd++, addr = next, addr != end);
355
356	/* Only flush the TLB if we actually modified any entries: */
357	if (pages)
358		flush_tlb_range(vma, start, end);
359	dec_tlb_flush_pending(mm);
360
361	return pages;
362}
363
364unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
365		       unsigned long end, pgprot_t newprot,
366		       unsigned long cp_flags)
367{
368	unsigned long pages;
369
370	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
371
372	if (is_vm_hugetlb_page(vma))
373		pages = hugetlb_change_protection(vma, start, end, newprot);
374	else
375		pages = change_protection_range(vma, start, end, newprot,
376						cp_flags);
377
378	return pages;
379}
380
381static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
382			       unsigned long next, struct mm_walk *walk)
383{
384	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
385		0 : -EACCES;
386}
387
388static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
389				   unsigned long addr, unsigned long next,
390				   struct mm_walk *walk)
391{
392	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
393		0 : -EACCES;
394}
395
396static int prot_none_test(unsigned long addr, unsigned long next,
397			  struct mm_walk *walk)
398{
399	return 0;
400}
401
402static const struct mm_walk_ops prot_none_walk_ops = {
403	.pte_entry		= prot_none_pte_entry,
404	.hugetlb_entry		= prot_none_hugetlb_entry,
405	.test_walk		= prot_none_test,
406};
407
408int
409mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
410	unsigned long start, unsigned long end, unsigned long newflags)
411{
412	struct mm_struct *mm = vma->vm_mm;
413	unsigned long oldflags = vma->vm_flags;
414	long nrpages = (end - start) >> PAGE_SHIFT;
415	unsigned long charged = 0;
416	pgoff_t pgoff;
417	int error;
418	int dirty_accountable = 0;
419
420	if (newflags == oldflags) {
421		*pprev = vma;
422		return 0;
423	}
424
425	/*
426	 * Do PROT_NONE PFN permission checks here when we can still
427	 * bail out without undoing a lot of state. This is a rather
428	 * uncommon case, so doesn't need to be very optimized.
429	 */
430	if (arch_has_pfn_modify_check() &&
431	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
432	    (newflags & VM_ACCESS_FLAGS) == 0) {
433		pgprot_t new_pgprot = vm_get_page_prot(newflags);
434
435		error = walk_page_range(current->mm, start, end,
436				&prot_none_walk_ops, &new_pgprot);
437		if (error)
438			return error;
439	}
440
441	/*
442	 * If we make a private mapping writable we increase our commit;
443	 * but (without finer accounting) cannot reduce our commit if we
444	 * make it unwritable again. hugetlb mapping were accounted for
445	 * even if read-only so there is no need to account for them here
446	 */
447	if (newflags & VM_WRITE) {
448		/* Check space limits when area turns into data. */
449		if (!may_expand_vm(mm, newflags, nrpages) &&
450				may_expand_vm(mm, oldflags, nrpages))
451			return -ENOMEM;
452		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
453						VM_SHARED|VM_NORESERVE))) {
454			charged = nrpages;
455			if (security_vm_enough_memory_mm(mm, charged))
456				return -ENOMEM;
457			newflags |= VM_ACCOUNT;
458		}
459	}
460
461	/*
462	 * First try to merge with previous and/or next vma.
463	 */
464	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
465	*pprev = vma_merge(mm, *pprev, start, end, newflags,
466			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
467			   vma->vm_userfaultfd_ctx);
468	if (*pprev) {
469		vma = *pprev;
470		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
471		goto success;
472	}
473
474	*pprev = vma;
475
476	if (start != vma->vm_start) {
477		error = split_vma(mm, vma, start, 1);
478		if (error)
479			goto fail;
480	}
481
482	if (end != vma->vm_end) {
483		error = split_vma(mm, vma, end, 0);
484		if (error)
485			goto fail;
486	}
487
488success:
489	/*
490	 * vm_flags and vm_page_prot are protected by the mmap_lock
491	 * held in write mode.
492	 */
493	vma->vm_flags = newflags;
494	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
495	vma_set_page_prot(vma);
496
497	change_protection(vma, start, end, vma->vm_page_prot,
498			  dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
499
500	/*
501	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
502	 * fault on access.
503	 */
504	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
505			(newflags & VM_WRITE)) {
506		populate_vma_page_range(vma, start, end, NULL);
507	}
508
509	vm_stat_account(mm, oldflags, -nrpages);
510	vm_stat_account(mm, newflags, nrpages);
511	perf_event_mmap(vma);
512	return 0;
513
514fail:
515	vm_unacct_memory(charged);
516	return error;
517}
518
519/*
520 * pkey==-1 when doing a legacy mprotect()
521 */
522static int do_mprotect_pkey(unsigned long start, size_t len,
523		unsigned long prot, int pkey)
524{
525	unsigned long nstart, end, tmp, reqprot;
526	struct vm_area_struct *vma, *prev;
527	int error = -EINVAL;
528	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
529	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
530				(prot & PROT_READ);
531
532	start = untagged_addr(start);
533
534	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
535	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
536		return -EINVAL;
537
538	if (start & ~PAGE_MASK)
539		return -EINVAL;
540	if (!len)
541		return 0;
542	len = PAGE_ALIGN(len);
543	end = start + len;
544	if (end <= start)
545		return -ENOMEM;
546	if (!arch_validate_prot(prot, start))
547		return -EINVAL;
548
549	reqprot = prot;
550
551	if (mmap_write_lock_killable(current->mm))
552		return -EINTR;
553
554	/*
555	 * If userspace did not allocate the pkey, do not let
556	 * them use it here.
557	 */
558	error = -EINVAL;
559	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
560		goto out;
561
562	vma = find_vma(current->mm, start);
563	error = -ENOMEM;
564	if (!vma)
565		goto out;
566	prev = vma->vm_prev;
567	if (unlikely(grows & PROT_GROWSDOWN)) {
568		if (vma->vm_start >= end)
569			goto out;
570		start = vma->vm_start;
571		error = -EINVAL;
572		if (!(vma->vm_flags & VM_GROWSDOWN))
573			goto out;
574	} else {
575		if (vma->vm_start > start)
576			goto out;
577		if (unlikely(grows & PROT_GROWSUP)) {
578			end = vma->vm_end;
579			error = -EINVAL;
580			if (!(vma->vm_flags & VM_GROWSUP))
581				goto out;
582		}
583	}
584	if (start > vma->vm_start)
585		prev = vma;
586
587	for (nstart = start ; ; ) {
588		unsigned long mask_off_old_flags;
589		unsigned long newflags;
590		int new_vma_pkey;
591
592		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
593
594		/* Does the application expect PROT_READ to imply PROT_EXEC */
595		if (rier && (vma->vm_flags & VM_MAYEXEC))
596			prot |= PROT_EXEC;
597
598		/*
599		 * Each mprotect() call explicitly passes r/w/x permissions.
600		 * If a permission is not passed to mprotect(), it must be
601		 * cleared from the VMA.
602		 */
603		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
604					VM_FLAGS_CLEAR;
605
606		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
607		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
608		newflags |= (vma->vm_flags & ~mask_off_old_flags);
609
610		/* newflags >> 4 shift VM_MAY% in place of VM_% */
611		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
612			error = -EACCES;
613			goto out;
614		}
615
616		/* Allow architectures to sanity-check the new flags */
617		if (!arch_validate_flags(newflags)) {
618			error = -EINVAL;
619			goto out;
620		}
621
622		error = security_file_mprotect(vma, reqprot, prot);
623		if (error)
624			goto out;
625
626		tmp = vma->vm_end;
627		if (tmp > end)
628			tmp = end;
629
630		if (vma->vm_ops && vma->vm_ops->mprotect) {
631			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
632			if (error)
633				goto out;
634		}
635
636		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
637		if (error)
638			goto out;
639
640		nstart = tmp;
641
642		if (nstart < prev->vm_end)
643			nstart = prev->vm_end;
644		if (nstart >= end)
645			goto out;
646
647		vma = prev->vm_next;
648		if (!vma || vma->vm_start != nstart) {
649			error = -ENOMEM;
650			goto out;
651		}
652		prot = reqprot;
653	}
654out:
655	mmap_write_unlock(current->mm);
656	return error;
657}
658
659SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
660		unsigned long, prot)
661{
662	return do_mprotect_pkey(start, len, prot, -1);
663}
664
665#ifdef CONFIG_ARCH_HAS_PKEYS
666
667SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
668		unsigned long, prot, int, pkey)
669{
670	return do_mprotect_pkey(start, len, prot, pkey);
671}
672
673SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
674{
675	int pkey;
676	int ret;
677
678	/* No flags supported yet. */
679	if (flags)
680		return -EINVAL;
681	/* check for unsupported init values */
682	if (init_val & ~PKEY_ACCESS_MASK)
683		return -EINVAL;
684
685	mmap_write_lock(current->mm);
686	pkey = mm_pkey_alloc(current->mm);
687
688	ret = -ENOSPC;
689	if (pkey == -1)
690		goto out;
691
692	ret = arch_set_user_pkey_access(current, pkey, init_val);
693	if (ret) {
694		mm_pkey_free(current->mm, pkey);
695		goto out;
696	}
697	ret = pkey;
698out:
699	mmap_write_unlock(current->mm);
700	return ret;
701}
702
703SYSCALL_DEFINE1(pkey_free, int, pkey)
704{
705	int ret;
706
707	mmap_write_lock(current->mm);
708	ret = mm_pkey_free(current->mm, pkey);
709	mmap_write_unlock(current->mm);
710
711	/*
712	 * We could provide warnings or errors if any VMA still
713	 * has the pkey set here.
714	 */
715	return ret;
716}
717
718#endif /* CONFIG_ARCH_HAS_PKEYS */
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/pagewalk.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <linux/pgtable.h>
 32#include <asm/cacheflush.h>
 33#include <asm/mmu_context.h>
 34#include <asm/tlbflush.h>
 35
 36#include "internal.h"
 37
 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		unsigned long cp_flags)
 41{
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 44	unsigned long pages = 0;
 45	int target_node = NUMA_NO_NODE;
 46	bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
 47	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 48	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 49	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 50
 51	/*
 52	 * Can be called with only the mmap_lock for reading by
 53	 * prot_numa so we must check the pmd isn't constantly
 54	 * changing from under us from pmd_none to pmd_trans_huge
 55	 * and/or the other way around.
 56	 */
 57	if (pmd_trans_unstable(pmd))
 58		return 0;
 59
 60	/*
 61	 * The pmd points to a regular pte so the pmd can't change
 62	 * from under us even if the mmap_lock is only hold for
 63	 * reading.
 64	 */
 65	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 66
 67	/* Get target node for single threaded private VMAs */
 68	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
 69	    atomic_read(&vma->vm_mm->mm_users) == 1)
 70		target_node = numa_node_id();
 71
 72	flush_tlb_batched_pending(vma->vm_mm);
 73	arch_enter_lazy_mmu_mode();
 74	do {
 75		oldpte = *pte;
 76		if (pte_present(oldpte)) {
 77			pte_t ptent;
 78			bool preserve_write = prot_numa && pte_write(oldpte);
 79
 80			/*
 81			 * Avoid trapping faults against the zero or KSM
 82			 * pages. See similar comment in change_huge_pmd.
 83			 */
 84			if (prot_numa) {
 85				struct page *page;
 86
 87				/* Avoid TLB flush if possible */
 88				if (pte_protnone(oldpte))
 89					continue;
 90
 91				page = vm_normal_page(vma, addr, oldpte);
 92				if (!page || PageKsm(page))
 93					continue;
 94
 95				/* Also skip shared copy-on-write pages */
 96				if (is_cow_mapping(vma->vm_flags) &&
 97				    page_mapcount(page) != 1)
 98					continue;
 99
100				/*
101				 * While migration can move some dirty pages,
102				 * it cannot move them all from MIGRATE_ASYNC
103				 * context.
104				 */
105				if (page_is_file_lru(page) && PageDirty(page))
106					continue;
107
108				/*
109				 * Don't mess with PTEs if page is already on the node
110				 * a single-threaded process is running on.
111				 */
112				if (target_node == page_to_nid(page))
113					continue;
114			}
115
116			oldpte = ptep_modify_prot_start(vma, addr, pte);
117			ptent = pte_modify(oldpte, newprot);
118			if (preserve_write)
119				ptent = pte_mk_savedwrite(ptent);
120
121			if (uffd_wp) {
122				ptent = pte_wrprotect(ptent);
123				ptent = pte_mkuffd_wp(ptent);
124			} else if (uffd_wp_resolve) {
125				/*
126				 * Leave the write bit to be handled
127				 * by PF interrupt handler, then
128				 * things like COW could be properly
129				 * handled.
130				 */
131				ptent = pte_clear_uffd_wp(ptent);
132			}
133
134			/* Avoid taking write faults for known dirty pages */
135			if (dirty_accountable && pte_dirty(ptent) &&
136					(pte_soft_dirty(ptent) ||
137					 !(vma->vm_flags & VM_SOFTDIRTY))) {
138				ptent = pte_mkwrite(ptent);
139			}
140			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
141			pages++;
142		} else if (is_swap_pte(oldpte)) {
143			swp_entry_t entry = pte_to_swp_entry(oldpte);
144			pte_t newpte;
145
146			if (is_write_migration_entry(entry)) {
147				/*
148				 * A protection check is difficult so
149				 * just be safe and disable write
150				 */
151				make_migration_entry_read(&entry);
 
152				newpte = swp_entry_to_pte(entry);
153				if (pte_swp_soft_dirty(oldpte))
154					newpte = pte_swp_mksoft_dirty(newpte);
155				if (pte_swp_uffd_wp(oldpte))
156					newpte = pte_swp_mkuffd_wp(newpte);
157			} else if (is_write_device_private_entry(entry)) {
158				/*
159				 * We do not preserve soft-dirtiness. See
160				 * copy_one_pte() for explanation.
161				 */
162				make_device_private_entry_read(&entry);
 
163				newpte = swp_entry_to_pte(entry);
164				if (pte_swp_uffd_wp(oldpte))
165					newpte = pte_swp_mkuffd_wp(newpte);
 
 
 
 
 
 
 
 
166			} else {
167				newpte = oldpte;
168			}
169
170			if (uffd_wp)
171				newpte = pte_swp_mkuffd_wp(newpte);
172			else if (uffd_wp_resolve)
173				newpte = pte_swp_clear_uffd_wp(newpte);
174
175			if (!pte_same(oldpte, newpte)) {
176				set_pte_at(vma->vm_mm, addr, pte, newpte);
177				pages++;
178			}
179		}
180	} while (pte++, addr += PAGE_SIZE, addr != end);
181	arch_leave_lazy_mmu_mode();
182	pte_unmap_unlock(pte - 1, ptl);
183
184	return pages;
185}
186
187/*
188 * Used when setting automatic NUMA hinting protection where it is
189 * critical that a numa hinting PMD is not confused with a bad PMD.
190 */
191static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
192{
193	pmd_t pmdval = pmd_read_atomic(pmd);
194
195	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
196#ifdef CONFIG_TRANSPARENT_HUGEPAGE
197	barrier();
198#endif
199
200	if (pmd_none(pmdval))
201		return 1;
202	if (pmd_trans_huge(pmdval))
203		return 0;
204	if (unlikely(pmd_bad(pmdval))) {
205		pmd_clear_bad(pmd);
206		return 1;
207	}
208
209	return 0;
210}
211
212static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
213		pud_t *pud, unsigned long addr, unsigned long end,
214		pgprot_t newprot, unsigned long cp_flags)
215{
216	pmd_t *pmd;
217	unsigned long next;
218	unsigned long pages = 0;
219	unsigned long nr_huge_updates = 0;
220	struct mmu_notifier_range range;
221
222	range.start = 0;
223
224	pmd = pmd_offset(pud, addr);
225	do {
226		unsigned long this_pages;
227
228		next = pmd_addr_end(addr, end);
229
230		/*
231		 * Automatic NUMA balancing walks the tables with mmap_lock
232		 * held for read. It's possible a parallel update to occur
233		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
234		 * check leading to a false positive and clearing.
235		 * Hence, it's necessary to atomically read the PMD value
236		 * for all the checks.
237		 */
238		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
239		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
240			goto next;
241
242		/* invoke the mmu notifier if the pmd is populated */
243		if (!range.start) {
244			mmu_notifier_range_init(&range,
245				MMU_NOTIFY_PROTECTION_VMA, 0,
246				vma, vma->vm_mm, addr, end);
247			mmu_notifier_invalidate_range_start(&range);
248		}
249
250		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
251			if (next - addr != HPAGE_PMD_SIZE) {
252				__split_huge_pmd(vma, pmd, addr, false, NULL);
253			} else {
254				int nr_ptes = change_huge_pmd(vma, pmd, addr,
255							      newprot, cp_flags);
256
257				if (nr_ptes) {
258					if (nr_ptes == HPAGE_PMD_NR) {
259						pages += HPAGE_PMD_NR;
260						nr_huge_updates++;
261					}
262
263					/* huge pmd was handled */
264					goto next;
265				}
266			}
267			/* fall through, the trans huge pmd just split */
268		}
269		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
270					      cp_flags);
271		pages += this_pages;
272next:
273		cond_resched();
274	} while (pmd++, addr = next, addr != end);
275
276	if (range.start)
277		mmu_notifier_invalidate_range_end(&range);
278
279	if (nr_huge_updates)
280		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
281	return pages;
282}
283
284static inline unsigned long change_pud_range(struct vm_area_struct *vma,
285		p4d_t *p4d, unsigned long addr, unsigned long end,
286		pgprot_t newprot, unsigned long cp_flags)
287{
288	pud_t *pud;
289	unsigned long next;
290	unsigned long pages = 0;
291
292	pud = pud_offset(p4d, addr);
293	do {
294		next = pud_addr_end(addr, end);
295		if (pud_none_or_clear_bad(pud))
296			continue;
297		pages += change_pmd_range(vma, pud, addr, next, newprot,
298					  cp_flags);
299	} while (pud++, addr = next, addr != end);
300
301	return pages;
302}
303
304static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
305		pgd_t *pgd, unsigned long addr, unsigned long end,
306		pgprot_t newprot, unsigned long cp_flags)
307{
308	p4d_t *p4d;
309	unsigned long next;
310	unsigned long pages = 0;
311
312	p4d = p4d_offset(pgd, addr);
313	do {
314		next = p4d_addr_end(addr, end);
315		if (p4d_none_or_clear_bad(p4d))
316			continue;
317		pages += change_pud_range(vma, p4d, addr, next, newprot,
318					  cp_flags);
319	} while (p4d++, addr = next, addr != end);
320
321	return pages;
322}
323
324static unsigned long change_protection_range(struct vm_area_struct *vma,
325		unsigned long addr, unsigned long end, pgprot_t newprot,
326		unsigned long cp_flags)
327{
328	struct mm_struct *mm = vma->vm_mm;
329	pgd_t *pgd;
330	unsigned long next;
331	unsigned long start = addr;
332	unsigned long pages = 0;
333
334	BUG_ON(addr >= end);
335	pgd = pgd_offset(mm, addr);
336	flush_cache_range(vma, addr, end);
337	inc_tlb_flush_pending(mm);
338	do {
339		next = pgd_addr_end(addr, end);
340		if (pgd_none_or_clear_bad(pgd))
341			continue;
342		pages += change_p4d_range(vma, pgd, addr, next, newprot,
343					  cp_flags);
344	} while (pgd++, addr = next, addr != end);
345
346	/* Only flush the TLB if we actually modified any entries: */
347	if (pages)
348		flush_tlb_range(vma, start, end);
349	dec_tlb_flush_pending(mm);
350
351	return pages;
352}
353
354unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
355		       unsigned long end, pgprot_t newprot,
356		       unsigned long cp_flags)
357{
358	unsigned long pages;
359
360	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
361
362	if (is_vm_hugetlb_page(vma))
363		pages = hugetlb_change_protection(vma, start, end, newprot);
364	else
365		pages = change_protection_range(vma, start, end, newprot,
366						cp_flags);
367
368	return pages;
369}
370
371static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
372			       unsigned long next, struct mm_walk *walk)
373{
374	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
375		0 : -EACCES;
376}
377
378static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
379				   unsigned long addr, unsigned long next,
380				   struct mm_walk *walk)
381{
382	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
383		0 : -EACCES;
384}
385
386static int prot_none_test(unsigned long addr, unsigned long next,
387			  struct mm_walk *walk)
388{
389	return 0;
390}
391
392static const struct mm_walk_ops prot_none_walk_ops = {
393	.pte_entry		= prot_none_pte_entry,
394	.hugetlb_entry		= prot_none_hugetlb_entry,
395	.test_walk		= prot_none_test,
396};
397
398int
399mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
400	unsigned long start, unsigned long end, unsigned long newflags)
401{
402	struct mm_struct *mm = vma->vm_mm;
403	unsigned long oldflags = vma->vm_flags;
404	long nrpages = (end - start) >> PAGE_SHIFT;
405	unsigned long charged = 0;
406	pgoff_t pgoff;
407	int error;
408	int dirty_accountable = 0;
409
410	if (newflags == oldflags) {
411		*pprev = vma;
412		return 0;
413	}
414
415	/*
416	 * Do PROT_NONE PFN permission checks here when we can still
417	 * bail out without undoing a lot of state. This is a rather
418	 * uncommon case, so doesn't need to be very optimized.
419	 */
420	if (arch_has_pfn_modify_check() &&
421	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
422	    (newflags & VM_ACCESS_FLAGS) == 0) {
423		pgprot_t new_pgprot = vm_get_page_prot(newflags);
424
425		error = walk_page_range(current->mm, start, end,
426				&prot_none_walk_ops, &new_pgprot);
427		if (error)
428			return error;
429	}
430
431	/*
432	 * If we make a private mapping writable we increase our commit;
433	 * but (without finer accounting) cannot reduce our commit if we
434	 * make it unwritable again. hugetlb mapping were accounted for
435	 * even if read-only so there is no need to account for them here
436	 */
437	if (newflags & VM_WRITE) {
438		/* Check space limits when area turns into data. */
439		if (!may_expand_vm(mm, newflags, nrpages) &&
440				may_expand_vm(mm, oldflags, nrpages))
441			return -ENOMEM;
442		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
443						VM_SHARED|VM_NORESERVE))) {
444			charged = nrpages;
445			if (security_vm_enough_memory_mm(mm, charged))
446				return -ENOMEM;
447			newflags |= VM_ACCOUNT;
448		}
449	}
450
451	/*
452	 * First try to merge with previous and/or next vma.
453	 */
454	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
455	*pprev = vma_merge(mm, *pprev, start, end, newflags,
456			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
457			   vma->vm_userfaultfd_ctx);
458	if (*pprev) {
459		vma = *pprev;
460		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
461		goto success;
462	}
463
464	*pprev = vma;
465
466	if (start != vma->vm_start) {
467		error = split_vma(mm, vma, start, 1);
468		if (error)
469			goto fail;
470	}
471
472	if (end != vma->vm_end) {
473		error = split_vma(mm, vma, end, 0);
474		if (error)
475			goto fail;
476	}
477
478success:
479	/*
480	 * vm_flags and vm_page_prot are protected by the mmap_lock
481	 * held in write mode.
482	 */
483	vma->vm_flags = newflags;
484	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
485	vma_set_page_prot(vma);
486
487	change_protection(vma, start, end, vma->vm_page_prot,
488			  dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
489
490	/*
491	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
492	 * fault on access.
493	 */
494	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
495			(newflags & VM_WRITE)) {
496		populate_vma_page_range(vma, start, end, NULL);
497	}
498
499	vm_stat_account(mm, oldflags, -nrpages);
500	vm_stat_account(mm, newflags, nrpages);
501	perf_event_mmap(vma);
502	return 0;
503
504fail:
505	vm_unacct_memory(charged);
506	return error;
507}
508
509/*
510 * pkey==-1 when doing a legacy mprotect()
511 */
512static int do_mprotect_pkey(unsigned long start, size_t len,
513		unsigned long prot, int pkey)
514{
515	unsigned long nstart, end, tmp, reqprot;
516	struct vm_area_struct *vma, *prev;
517	int error = -EINVAL;
518	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
519	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
520				(prot & PROT_READ);
521
522	start = untagged_addr(start);
523
524	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
525	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
526		return -EINVAL;
527
528	if (start & ~PAGE_MASK)
529		return -EINVAL;
530	if (!len)
531		return 0;
532	len = PAGE_ALIGN(len);
533	end = start + len;
534	if (end <= start)
535		return -ENOMEM;
536	if (!arch_validate_prot(prot, start))
537		return -EINVAL;
538
539	reqprot = prot;
540
541	if (mmap_write_lock_killable(current->mm))
542		return -EINTR;
543
544	/*
545	 * If userspace did not allocate the pkey, do not let
546	 * them use it here.
547	 */
548	error = -EINVAL;
549	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
550		goto out;
551
552	vma = find_vma(current->mm, start);
553	error = -ENOMEM;
554	if (!vma)
555		goto out;
556	prev = vma->vm_prev;
557	if (unlikely(grows & PROT_GROWSDOWN)) {
558		if (vma->vm_start >= end)
559			goto out;
560		start = vma->vm_start;
561		error = -EINVAL;
562		if (!(vma->vm_flags & VM_GROWSDOWN))
563			goto out;
564	} else {
565		if (vma->vm_start > start)
566			goto out;
567		if (unlikely(grows & PROT_GROWSUP)) {
568			end = vma->vm_end;
569			error = -EINVAL;
570			if (!(vma->vm_flags & VM_GROWSUP))
571				goto out;
572		}
573	}
574	if (start > vma->vm_start)
575		prev = vma;
576
577	for (nstart = start ; ; ) {
578		unsigned long mask_off_old_flags;
579		unsigned long newflags;
580		int new_vma_pkey;
581
582		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
583
584		/* Does the application expect PROT_READ to imply PROT_EXEC */
585		if (rier && (vma->vm_flags & VM_MAYEXEC))
586			prot |= PROT_EXEC;
587
588		/*
589		 * Each mprotect() call explicitly passes r/w/x permissions.
590		 * If a permission is not passed to mprotect(), it must be
591		 * cleared from the VMA.
592		 */
593		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
594					VM_FLAGS_CLEAR;
595
596		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
597		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
598		newflags |= (vma->vm_flags & ~mask_off_old_flags);
599
600		/* newflags >> 4 shift VM_MAY% in place of VM_% */
601		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
602			error = -EACCES;
603			goto out;
604		}
605
 
 
 
 
 
 
606		error = security_file_mprotect(vma, reqprot, prot);
607		if (error)
608			goto out;
609
610		tmp = vma->vm_end;
611		if (tmp > end)
612			tmp = end;
 
 
 
 
 
 
 
613		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
614		if (error)
615			goto out;
 
616		nstart = tmp;
617
618		if (nstart < prev->vm_end)
619			nstart = prev->vm_end;
620		if (nstart >= end)
621			goto out;
622
623		vma = prev->vm_next;
624		if (!vma || vma->vm_start != nstart) {
625			error = -ENOMEM;
626			goto out;
627		}
628		prot = reqprot;
629	}
630out:
631	mmap_write_unlock(current->mm);
632	return error;
633}
634
635SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
636		unsigned long, prot)
637{
638	return do_mprotect_pkey(start, len, prot, -1);
639}
640
641#ifdef CONFIG_ARCH_HAS_PKEYS
642
643SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
644		unsigned long, prot, int, pkey)
645{
646	return do_mprotect_pkey(start, len, prot, pkey);
647}
648
649SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
650{
651	int pkey;
652	int ret;
653
654	/* No flags supported yet. */
655	if (flags)
656		return -EINVAL;
657	/* check for unsupported init values */
658	if (init_val & ~PKEY_ACCESS_MASK)
659		return -EINVAL;
660
661	mmap_write_lock(current->mm);
662	pkey = mm_pkey_alloc(current->mm);
663
664	ret = -ENOSPC;
665	if (pkey == -1)
666		goto out;
667
668	ret = arch_set_user_pkey_access(current, pkey, init_val);
669	if (ret) {
670		mm_pkey_free(current->mm, pkey);
671		goto out;
672	}
673	ret = pkey;
674out:
675	mmap_write_unlock(current->mm);
676	return ret;
677}
678
679SYSCALL_DEFINE1(pkey_free, int, pkey)
680{
681	int ret;
682
683	mmap_write_lock(current->mm);
684	ret = mm_pkey_free(current->mm, pkey);
685	mmap_write_unlock(current->mm);
686
687	/*
688	 * We could provie warnings or errors if any VMA still
689	 * has the pkey set here.
690	 */
691	return ret;
692}
693
694#endif /* CONFIG_ARCH_HAS_PKEYS */