Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  mm/mprotect.c
  3 *
  4 *  (C) Copyright 1994 Linus Torvalds
  5 *  (C) Copyright 2002 Christoph Hellwig
  6 *
  7 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  8 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/shm.h>
 14#include <linux/mman.h>
 15#include <linux/fs.h>
 16#include <linux/highmem.h>
 17#include <linux/security.h>
 18#include <linux/mempolicy.h>
 19#include <linux/personality.h>
 20#include <linux/syscalls.h>
 21#include <linux/swap.h>
 22#include <linux/swapops.h>
 23#include <linux/mmu_notifier.h>
 24#include <linux/migrate.h>
 25#include <linux/perf_event.h>
 26#include <asm/uaccess.h>
 27#include <asm/pgtable.h>
 
 
 
 
 
 
 
 28#include <asm/cacheflush.h>
 
 29#include <asm/tlbflush.h>
 
 30
 31#ifndef pgprot_modify
 32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 
 
 33{
 34	return newprot;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35}
 36#endif
 37
 38static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		int dirty_accountable)
 41{
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 46	arch_enter_lazy_mmu_mode();
 47	do {
 48		oldpte = *pte;
 49		if (pte_present(oldpte)) {
 50			pte_t ptent;
 51
 52			ptent = ptep_modify_prot_start(mm, addr, pte);
 53			ptent = pte_modify(ptent, newprot);
 54
 55			/*
 56			 * Avoid taking write faults for pages we know to be
 57			 * dirty.
 58			 */
 59			if (dirty_accountable && pte_dirty(ptent))
 60				ptent = pte_mkwrite(ptent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61
 62			ptep_modify_prot_commit(mm, addr, pte, ptent);
 63		} else if (PAGE_MIGRATION && !pte_file(oldpte)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64			swp_entry_t entry = pte_to_swp_entry(oldpte);
 
 
 
 
 65
 66			if (is_write_migration_entry(entry)) {
 67				/*
 68				 * A protection check is difficult so
 69				 * just be safe and disable write
 70				 */
 71				make_migration_entry_read(&entry);
 72				set_pte_at(mm, addr, pte,
 73					swp_entry_to_pte(entry));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74			}
 75		}
 76	} while (pte++, addr += PAGE_SIZE, addr != end);
 77	arch_leave_lazy_mmu_mode();
 78	pte_unmap_unlock(pte - 1, ptl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79}
 80
 81static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 82		unsigned long addr, unsigned long end, pgprot_t newprot,
 83		int dirty_accountable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84{
 85	pmd_t *pmd;
 86	unsigned long next;
 
 
 87
 88	pmd = pmd_offset(pud, addr);
 89	do {
 
 
 
 90		next = pmd_addr_end(addr, end);
 91		if (pmd_trans_huge(*pmd)) {
 92			if (next - addr != HPAGE_PMD_SIZE)
 93				split_huge_page_pmd(vma->vm_mm, pmd);
 94			else if (change_huge_pmd(vma, pmd, addr, newprot))
 95				continue;
 96			/* fall through */
 97		}
 98		if (pmd_none_or_clear_bad(pmd))
 99			continue;
100		change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
101				 dirty_accountable);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	} while (pmd++, addr = next, addr != end);
 
 
 
 
103}
104
105static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
106		unsigned long addr, unsigned long end, pgprot_t newprot,
107		int dirty_accountable)
108{
109	pud_t *pud;
 
110	unsigned long next;
 
 
 
111
112	pud = pud_offset(pgd, addr);
113	do {
 
114		next = pud_addr_end(addr, end);
115		if (pud_none_or_clear_bad(pud))
 
 
 
 
 
 
 
116			continue;
117		change_pmd_range(vma, pud, addr, next, newprot,
118				 dirty_accountable);
119	} while (pud++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120}
121
122static void change_protection(struct vm_area_struct *vma,
123		unsigned long addr, unsigned long end, pgprot_t newprot,
124		int dirty_accountable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125{
126	struct mm_struct *mm = vma->vm_mm;
127	pgd_t *pgd;
128	unsigned long next;
129	unsigned long start = addr;
130
131	BUG_ON(addr >= end);
132	pgd = pgd_offset(mm, addr);
133	flush_cache_range(vma, addr, end);
134	do {
135		next = pgd_addr_end(addr, end);
 
 
 
 
 
136		if (pgd_none_or_clear_bad(pgd))
137			continue;
138		change_pud_range(vma, pgd, addr, next, newprot,
139				 dirty_accountable);
140	} while (pgd++, addr = next, addr != end);
141	flush_tlb_range(vma, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
143
 
 
 
 
 
 
 
144int
145mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
146	unsigned long start, unsigned long end, unsigned long newflags)
 
147{
148	struct mm_struct *mm = vma->vm_mm;
149	unsigned long oldflags = vma->vm_flags;
150	long nrpages = (end - start) >> PAGE_SHIFT;
 
151	unsigned long charged = 0;
152	pgoff_t pgoff;
153	int error;
154	int dirty_accountable = 0;
 
 
155
156	if (newflags == oldflags) {
157		*pprev = vma;
158		return 0;
159	}
160
161	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162	 * If we make a private mapping writable we increase our commit;
163	 * but (without finer accounting) cannot reduce our commit if we
164	 * make it unwritable again. hugetlb mapping were accounted for
165	 * even if read-only so there is no need to account for them here
 
 
 
166	 */
167	if (newflags & VM_WRITE) {
 
 
 
 
168		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
169						VM_SHARED|VM_NORESERVE))) {
170			charged = nrpages;
171			if (security_vm_enough_memory(charged))
172				return -ENOMEM;
173			newflags |= VM_ACCOUNT;
174		}
 
 
 
175	}
176
177	/*
178	 * First try to merge with previous and/or next vma.
179	 */
180	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
181	*pprev = vma_merge(mm, *pprev, start, end, newflags,
182			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
183	if (*pprev) {
184		vma = *pprev;
185		goto success;
186	}
187
188	*pprev = vma;
189
190	if (start != vma->vm_start) {
191		error = split_vma(mm, vma, start, 1);
192		if (error)
193			goto fail;
194	}
 
 
 
 
195
196	if (end != vma->vm_end) {
197		error = split_vma(mm, vma, end, 0);
198		if (error)
199			goto fail;
200	}
201
202success:
203	/*
204	 * vm_flags and vm_page_prot are protected by the mmap_sem
205	 * held in write mode.
206	 */
207	vma->vm_flags = newflags;
208	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
209					  vm_get_page_prot(newflags));
210
211	if (vma_wants_writenotify(vma)) {
212		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
213		dirty_accountable = 1;
214	}
215
216	mmu_notifier_invalidate_range_start(mm, start, end);
217	if (is_vm_hugetlb_page(vma))
218		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
219	else
220		change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
221	mmu_notifier_invalidate_range_end(mm, start, end);
222	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
223	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
224	perf_event_mmap(vma);
225	return 0;
226
227fail:
228	vm_unacct_memory(charged);
229	return error;
230}
231
232SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
233		unsigned long, prot)
 
 
 
234{
235	unsigned long vm_flags, nstart, end, tmp, reqprot;
236	struct vm_area_struct *vma, *prev;
237	int error = -EINVAL;
238	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
 
 
 
 
 
 
 
239	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
240	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
241		return -EINVAL;
242
243	if (start & ~PAGE_MASK)
244		return -EINVAL;
245	if (!len)
246		return 0;
247	len = PAGE_ALIGN(len);
248	end = start + len;
249	if (end <= start)
250		return -ENOMEM;
251	if (!arch_validate_prot(prot))
252		return -EINVAL;
253
254	reqprot = prot;
255	/*
256	 * Does the application expect PROT_READ to imply PROT_EXEC:
257	 */
258	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
259		prot |= PROT_EXEC;
260
261	vm_flags = calc_vm_prot_bits(prot);
 
262
263	down_write(&current->mm->mmap_sem);
 
 
 
 
 
 
264
265	vma = find_vma_prev(current->mm, start, &prev);
 
266	error = -ENOMEM;
267	if (!vma)
268		goto out;
 
269	if (unlikely(grows & PROT_GROWSDOWN)) {
270		if (vma->vm_start >= end)
271			goto out;
272		start = vma->vm_start;
273		error = -EINVAL;
274		if (!(vma->vm_flags & VM_GROWSDOWN))
275			goto out;
276	}
277	else {
278		if (vma->vm_start > start)
279			goto out;
280		if (unlikely(grows & PROT_GROWSUP)) {
281			end = vma->vm_end;
282			error = -EINVAL;
283			if (!(vma->vm_flags & VM_GROWSUP))
284				goto out;
285		}
286	}
 
 
287	if (start > vma->vm_start)
288		prev = vma;
289
290	for (nstart = start ; ; ) {
 
 
 
 
291		unsigned long newflags;
 
292
293		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
 
 
294
295		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
 
 
 
 
 
 
 
 
 
 
 
 
 
296
297		/* newflags >> 4 shift VM_MAY% in place of VM_% */
298		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
299			error = -EACCES;
300			goto out;
 
 
 
 
 
 
 
 
 
 
 
301		}
302
303		error = security_file_mprotect(vma, reqprot, prot);
304		if (error)
305			goto out;
306
307		tmp = vma->vm_end;
308		if (tmp > end)
309			tmp = end;
310		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 
 
 
 
 
 
 
311		if (error)
312			goto out;
 
 
313		nstart = tmp;
 
 
 
314
315		if (nstart < prev->vm_end)
316			nstart = prev->vm_end;
317		if (nstart >= end)
318			goto out;
319
320		vma = prev->vm_next;
321		if (!vma || vma->vm_start != nstart) {
322			error = -ENOMEM;
323			goto out;
324		}
325	}
326out:
327	up_write(&current->mm->mmap_sem);
328	return error;
329}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/pagewalk.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <linux/pgtable.h>
 32#include <linux/sched/sysctl.h>
 33#include <linux/userfaultfd_k.h>
 34#include <linux/memory-tiers.h>
 35#include <uapi/linux/mman.h>
 36#include <asm/cacheflush.h>
 37#include <asm/mmu_context.h>
 38#include <asm/tlbflush.h>
 39#include <asm/tlb.h>
 40
 41#include "internal.h"
 42
 43bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
 44			     pte_t pte)
 45{
 46	struct page *page;
 47
 48	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
 49		return false;
 50
 51	/* Don't touch entries that are not even readable. */
 52	if (pte_protnone(pte))
 53		return false;
 54
 55	/* Do we need write faults for softdirty tracking? */
 56	if (pte_needs_soft_dirty_wp(vma, pte))
 57		return false;
 58
 59	/* Do we need write faults for uffd-wp tracking? */
 60	if (userfaultfd_pte_wp(vma, pte))
 61		return false;
 62
 63	if (!(vma->vm_flags & VM_SHARED)) {
 64		/*
 65		 * Writable MAP_PRIVATE mapping: We can only special-case on
 66		 * exclusive anonymous pages, because we know that our
 67		 * write-fault handler similarly would map them writable without
 68		 * any additional checks while holding the PT lock.
 69		 */
 70		page = vm_normal_page(vma, addr, pte);
 71		return page && PageAnon(page) && PageAnonExclusive(page);
 72	}
 73
 74	VM_WARN_ON_ONCE(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte));
 75
 76	/*
 77	 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
 78	 * needs a real write-fault for writenotify
 79	 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
 80	 * FS was already notified and we can simply mark the PTE writable
 81	 * just like the write-fault handler would do.
 82	 */
 83	return pte_dirty(pte);
 84}
 
 85
 86static long change_pte_range(struct mmu_gather *tlb,
 87		struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
 88		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
 89{
 90	pte_t *pte, oldpte;
 91	spinlock_t *ptl;
 92	long pages = 0;
 93	int target_node = NUMA_NO_NODE;
 94	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 95	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 96	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 97
 98	tlb_change_page_size(tlb, PAGE_SIZE);
 99	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
100	if (!pte)
101		return -EAGAIN;
102
103	/* Get target node for single threaded private VMAs */
104	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
105	    atomic_read(&vma->vm_mm->mm_users) == 1)
106		target_node = numa_node_id();
107
108	flush_tlb_batched_pending(vma->vm_mm);
109	arch_enter_lazy_mmu_mode();
110	do {
111		oldpte = ptep_get(pte);
112		if (pte_present(oldpte)) {
113			pte_t ptent;
114
 
 
 
115			/*
116			 * Avoid trapping faults against the zero or KSM
117			 * pages. See similar comment in change_huge_pmd.
118			 */
119			if (prot_numa) {
120				struct folio *folio;
121				int nid;
122				bool toptier;
123
124				/* Avoid TLB flush if possible */
125				if (pte_protnone(oldpte))
126					continue;
127
128				folio = vm_normal_folio(vma, addr, oldpte);
129				if (!folio || folio_is_zone_device(folio) ||
130				    folio_test_ksm(folio))
131					continue;
132
133				/* Also skip shared copy-on-write pages */
134				if (is_cow_mapping(vma->vm_flags) &&
135				    (folio_maybe_dma_pinned(folio) ||
136				     folio_likely_mapped_shared(folio)))
137					continue;
138
139				/*
140				 * While migration can move some dirty pages,
141				 * it cannot move them all from MIGRATE_ASYNC
142				 * context.
143				 */
144				if (folio_is_file_lru(folio) &&
145				    folio_test_dirty(folio))
146					continue;
147
148				/*
149				 * Don't mess with PTEs if page is already on the node
150				 * a single-threaded process is running on.
151				 */
152				nid = folio_nid(folio);
153				if (target_node == nid)
154					continue;
155				toptier = node_is_toptier(nid);
156
157				/*
158				 * Skip scanning top tier node if normal numa
159				 * balancing is disabled
160				 */
161				if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
162				    toptier)
163					continue;
164				if (folio_use_access_time(folio))
165					folio_xchg_access_time(folio,
166						jiffies_to_msecs(jiffies));
167			}
168
169			oldpte = ptep_modify_prot_start(vma, addr, pte);
170			ptent = pte_modify(oldpte, newprot);
171
172			if (uffd_wp)
173				ptent = pte_mkuffd_wp(ptent);
174			else if (uffd_wp_resolve)
175				ptent = pte_clear_uffd_wp(ptent);
176
177			/*
178			 * In some writable, shared mappings, we might want
179			 * to catch actual write access -- see
180			 * vma_wants_writenotify().
181			 *
182			 * In all writable, private mappings, we have to
183			 * properly handle COW.
184			 *
185			 * In both cases, we can sometimes still change PTEs
186			 * writable and avoid the write-fault handler, for
187			 * example, if a PTE is already dirty and no other
188			 * COW or special handling is required.
189			 */
190			if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
191			    !pte_write(ptent) &&
192			    can_change_pte_writable(vma, addr, ptent))
193				ptent = pte_mkwrite(ptent, vma);
194
195			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
196			if (pte_needs_flush(oldpte, ptent))
197				tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
198			pages++;
199		} else if (is_swap_pte(oldpte)) {
200			swp_entry_t entry = pte_to_swp_entry(oldpte);
201			pte_t newpte;
202
203			if (is_writable_migration_entry(entry)) {
204				struct folio *folio = pfn_swap_entry_folio(entry);
205
 
206				/*
207				 * A protection check is difficult so
208				 * just be safe and disable write
209				 */
210				if (folio_test_anon(folio))
211					entry = make_readable_exclusive_migration_entry(
212							     swp_offset(entry));
213				else
214					entry = make_readable_migration_entry(swp_offset(entry));
215				newpte = swp_entry_to_pte(entry);
216				if (pte_swp_soft_dirty(oldpte))
217					newpte = pte_swp_mksoft_dirty(newpte);
218			} else if (is_writable_device_private_entry(entry)) {
219				/*
220				 * We do not preserve soft-dirtiness. See
221				 * copy_nonpresent_pte() for explanation.
222				 */
223				entry = make_readable_device_private_entry(
224							swp_offset(entry));
225				newpte = swp_entry_to_pte(entry);
226				if (pte_swp_uffd_wp(oldpte))
227					newpte = pte_swp_mkuffd_wp(newpte);
228			} else if (is_writable_device_exclusive_entry(entry)) {
229				entry = make_readable_device_exclusive_entry(
230							swp_offset(entry));
231				newpte = swp_entry_to_pte(entry);
232				if (pte_swp_soft_dirty(oldpte))
233					newpte = pte_swp_mksoft_dirty(newpte);
234				if (pte_swp_uffd_wp(oldpte))
235					newpte = pte_swp_mkuffd_wp(newpte);
236			} else if (is_pte_marker_entry(entry)) {
237				/*
238				 * Ignore error swap entries unconditionally,
239				 * because any access should sigbus/sigsegv
240				 * anyway.
241				 */
242				if (is_poisoned_swp_entry(entry) ||
243				    is_guard_swp_entry(entry))
244					continue;
245				/*
246				 * If this is uffd-wp pte marker and we'd like
247				 * to unprotect it, drop it; the next page
248				 * fault will trigger without uffd trapping.
249				 */
250				if (uffd_wp_resolve) {
251					pte_clear(vma->vm_mm, addr, pte);
252					pages++;
253				}
254				continue;
255			} else {
256				newpte = oldpte;
257			}
258
259			if (uffd_wp)
260				newpte = pte_swp_mkuffd_wp(newpte);
261			else if (uffd_wp_resolve)
262				newpte = pte_swp_clear_uffd_wp(newpte);
263
264			if (!pte_same(oldpte, newpte)) {
265				set_pte_at(vma->vm_mm, addr, pte, newpte);
266				pages++;
267			}
268		} else {
269			/* It must be an none page, or what else?.. */
270			WARN_ON_ONCE(!pte_none(oldpte));
271
272			/*
273			 * Nobody plays with any none ptes besides
274			 * userfaultfd when applying the protections.
275			 */
276			if (likely(!uffd_wp))
277				continue;
278
279			if (userfaultfd_wp_use_markers(vma)) {
280				/*
281				 * For file-backed mem, we need to be able to
282				 * wr-protect a none pte, because even if the
283				 * pte is none, the page/swap cache could
284				 * exist.  Doing that by install a marker.
285				 */
286				set_pte_at(vma->vm_mm, addr, pte,
287					   make_pte_marker(PTE_MARKER_UFFD_WP));
288				pages++;
289			}
290		}
291	} while (pte++, addr += PAGE_SIZE, addr != end);
292	arch_leave_lazy_mmu_mode();
293	pte_unmap_unlock(pte - 1, ptl);
294
295	return pages;
296}
297
298/*
299 * Return true if we want to split THPs into PTE mappings in change
300 * protection procedure, false otherwise.
301 */
302static inline bool
303pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
304{
305	/*
306	 * pte markers only resides in pte level, if we need pte markers,
307	 * we need to split.  For example, we cannot wr-protect a file thp
308	 * (e.g. 2M shmem) because file thp is handled differently when
309	 * split by erasing the pmd so far.
310	 */
311	return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
312}
313
314/*
315 * Return true if we want to populate pgtables in change protection
316 * procedure, false otherwise
317 */
318static inline bool
319pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
320{
321	/* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
322	if (!(cp_flags & MM_CP_UFFD_WP))
323		return false;
324
325	/* Populate if the userfaultfd mode requires pte markers */
326	return userfaultfd_wp_use_markers(vma);
327}
328
329/*
330 * Populate the pgtable underneath for whatever reason if requested.
331 * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
332 * allocation failures during page faults by kicking OOM and returning
333 * error.
334 */
335#define  change_pmd_prepare(vma, pmd, cp_flags)				\
336	({								\
337		long err = 0;						\
338		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
339			if (pte_alloc(vma->vm_mm, pmd))			\
340				err = -ENOMEM;				\
341		}							\
342		err;							\
343	})
344
345/*
346 * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
347 * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
348 * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
349 */
350#define  change_prepare(vma, high, low, addr, cp_flags)			\
351	  ({								\
352		long err = 0;						\
353		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
354			low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
355			if (p == NULL)					\
356				err = -ENOMEM;				\
357		}							\
358		err;							\
359	})
360
361static inline long change_pmd_range(struct mmu_gather *tlb,
362		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
363		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
364{
365	pmd_t *pmd;
366	unsigned long next;
367	long pages = 0;
368	unsigned long nr_huge_updates = 0;
369
370	pmd = pmd_offset(pud, addr);
371	do {
372		long ret;
373		pmd_t _pmd;
374again:
375		next = pmd_addr_end(addr, end);
376
377		ret = change_pmd_prepare(vma, pmd, cp_flags);
378		if (ret) {
379			pages = ret;
380			break;
 
381		}
382
383		if (pmd_none(*pmd))
384			goto next;
385
386		_pmd = pmdp_get_lockless(pmd);
387		if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
388			if ((next - addr != HPAGE_PMD_SIZE) ||
389			    pgtable_split_needed(vma, cp_flags)) {
390				__split_huge_pmd(vma, pmd, addr, false, NULL);
391				/*
392				 * For file-backed, the pmd could have been
393				 * cleared; make sure pmd populated if
394				 * necessary, then fall-through to pte level.
395				 */
396				ret = change_pmd_prepare(vma, pmd, cp_flags);
397				if (ret) {
398					pages = ret;
399					break;
400				}
401			} else {
402				ret = change_huge_pmd(tlb, vma, pmd,
403						addr, newprot, cp_flags);
404				if (ret) {
405					if (ret == HPAGE_PMD_NR) {
406						pages += HPAGE_PMD_NR;
407						nr_huge_updates++;
408					}
409
410					/* huge pmd was handled */
411					goto next;
412				}
413			}
414			/* fall through, the trans huge pmd just split */
415		}
416
417		ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
418				       cp_flags);
419		if (ret < 0)
420			goto again;
421		pages += ret;
422next:
423		cond_resched();
424	} while (pmd++, addr = next, addr != end);
425
426	if (nr_huge_updates)
427		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
428	return pages;
429}
430
431static inline long change_pud_range(struct mmu_gather *tlb,
432		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
433		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
434{
435	struct mmu_notifier_range range;
436	pud_t *pudp, pud;
437	unsigned long next;
438	long pages = 0, ret;
439
440	range.start = 0;
441
442	pudp = pud_offset(p4d, addr);
443	do {
444again:
445		next = pud_addr_end(addr, end);
446		ret = change_prepare(vma, pudp, pmd, addr, cp_flags);
447		if (ret) {
448			pages = ret;
449			break;
450		}
451
452		pud = READ_ONCE(*pudp);
453		if (pud_none(pud))
454			continue;
455
456		if (!range.start) {
457			mmu_notifier_range_init(&range,
458						MMU_NOTIFY_PROTECTION_VMA, 0,
459						vma->vm_mm, addr, end);
460			mmu_notifier_invalidate_range_start(&range);
461		}
462
463		if (pud_leaf(pud)) {
464			if ((next - addr != PUD_SIZE) ||
465			    pgtable_split_needed(vma, cp_flags)) {
466				__split_huge_pud(vma, pudp, addr);
467				goto again;
468			} else {
469				ret = change_huge_pud(tlb, vma, pudp,
470						      addr, newprot, cp_flags);
471				if (ret == 0)
472					goto again;
473				/* huge pud was handled */
474				if (ret == HPAGE_PUD_NR)
475					pages += HPAGE_PUD_NR;
476				continue;
477			}
478		}
479
480		pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot,
481					  cp_flags);
482	} while (pudp++, addr = next, addr != end);
483
484	if (range.start)
485		mmu_notifier_invalidate_range_end(&range);
486
487	return pages;
488}
489
490static inline long change_p4d_range(struct mmu_gather *tlb,
491		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
492		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
493{
494	p4d_t *p4d;
495	unsigned long next;
496	long pages = 0, ret;
497
498	p4d = p4d_offset(pgd, addr);
499	do {
500		next = p4d_addr_end(addr, end);
501		ret = change_prepare(vma, p4d, pud, addr, cp_flags);
502		if (ret)
503			return ret;
504		if (p4d_none_or_clear_bad(p4d))
505			continue;
506		pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
507					  cp_flags);
508	} while (p4d++, addr = next, addr != end);
509
510	return pages;
511}
512
513static long change_protection_range(struct mmu_gather *tlb,
514		struct vm_area_struct *vma, unsigned long addr,
515		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
516{
517	struct mm_struct *mm = vma->vm_mm;
518	pgd_t *pgd;
519	unsigned long next;
520	long pages = 0, ret;
521
522	BUG_ON(addr >= end);
523	pgd = pgd_offset(mm, addr);
524	tlb_start_vma(tlb, vma);
525	do {
526		next = pgd_addr_end(addr, end);
527		ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
528		if (ret) {
529			pages = ret;
530			break;
531		}
532		if (pgd_none_or_clear_bad(pgd))
533			continue;
534		pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
535					  cp_flags);
536	} while (pgd++, addr = next, addr != end);
537
538	tlb_end_vma(tlb, vma);
539
540	return pages;
541}
542
543long change_protection(struct mmu_gather *tlb,
544		       struct vm_area_struct *vma, unsigned long start,
545		       unsigned long end, unsigned long cp_flags)
546{
547	pgprot_t newprot = vma->vm_page_prot;
548	long pages;
549
550	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
551
552#ifdef CONFIG_NUMA_BALANCING
553	/*
554	 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
555	 * are expected to reflect their requirements via VMA flags such that
556	 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
557	 */
558	if (cp_flags & MM_CP_PROT_NUMA)
559		newprot = PAGE_NONE;
560#else
561	WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA);
562#endif
563
564	if (is_vm_hugetlb_page(vma))
565		pages = hugetlb_change_protection(vma, start, end, newprot,
566						  cp_flags);
567	else
568		pages = change_protection_range(tlb, vma, start, end, newprot,
569						cp_flags);
570
571	return pages;
572}
573
574static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
575			       unsigned long next, struct mm_walk *walk)
576{
577	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
578				  *(pgprot_t *)(walk->private)) ?
579		0 : -EACCES;
580}
581
582static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
583				   unsigned long addr, unsigned long next,
584				   struct mm_walk *walk)
585{
586	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
587				  *(pgprot_t *)(walk->private)) ?
588		0 : -EACCES;
589}
590
591static int prot_none_test(unsigned long addr, unsigned long next,
592			  struct mm_walk *walk)
593{
594	return 0;
595}
596
597static const struct mm_walk_ops prot_none_walk_ops = {
598	.pte_entry		= prot_none_pte_entry,
599	.hugetlb_entry		= prot_none_hugetlb_entry,
600	.test_walk		= prot_none_test,
601	.walk_lock		= PGWALK_WRLOCK,
602};
603
604int
605mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
606	       struct vm_area_struct *vma, struct vm_area_struct **pprev,
607	       unsigned long start, unsigned long end, unsigned long newflags)
608{
609	struct mm_struct *mm = vma->vm_mm;
610	unsigned long oldflags = vma->vm_flags;
611	long nrpages = (end - start) >> PAGE_SHIFT;
612	unsigned int mm_cp_flags = 0;
613	unsigned long charged = 0;
 
614	int error;
615
616	if (!can_modify_vma(vma))
617		return -EPERM;
618
619	if (newflags == oldflags) {
620		*pprev = vma;
621		return 0;
622	}
623
624	/*
625	 * Do PROT_NONE PFN permission checks here when we can still
626	 * bail out without undoing a lot of state. This is a rather
627	 * uncommon case, so doesn't need to be very optimized.
628	 */
629	if (arch_has_pfn_modify_check() &&
630	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
631	    (newflags & VM_ACCESS_FLAGS) == 0) {
632		pgprot_t new_pgprot = vm_get_page_prot(newflags);
633
634		error = walk_page_range(current->mm, start, end,
635				&prot_none_walk_ops, &new_pgprot);
636		if (error)
637			return error;
638	}
639
640	/*
641	 * If we make a private mapping writable we increase our commit;
642	 * but (without finer accounting) cannot reduce our commit if we
643	 * make it unwritable again except in the anonymous case where no
644	 * anon_vma has yet to be assigned.
645	 *
646	 * hugetlb mapping were accounted for even if read-only so there is
647	 * no need to account for them here.
648	 */
649	if (newflags & VM_WRITE) {
650		/* Check space limits when area turns into data. */
651		if (!may_expand_vm(mm, newflags, nrpages) &&
652				may_expand_vm(mm, oldflags, nrpages))
653			return -ENOMEM;
654		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
655						VM_SHARED|VM_NORESERVE))) {
656			charged = nrpages;
657			if (security_vm_enough_memory_mm(mm, charged))
658				return -ENOMEM;
659			newflags |= VM_ACCOUNT;
660		}
661	} else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
662		   !vma->anon_vma) {
663		newflags &= ~VM_ACCOUNT;
664	}
665
666	vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
667	if (IS_ERR(vma)) {
668		error = PTR_ERR(vma);
669		goto fail;
 
 
 
 
 
670	}
671
672	*pprev = vma;
673
674	/*
675	 * vm_flags and vm_page_prot are protected by the mmap_lock
676	 * held in write mode.
677	 */
678	vma_start_write(vma);
679	vm_flags_reset(vma, newflags);
680	if (vma_wants_manual_pte_write_upgrade(vma))
681		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
682	vma_set_page_prot(vma);
683
684	change_protection(tlb, vma, start, end, mm_cp_flags);
685
686	if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT))
687		vm_unacct_memory(nrpages);
 
688
 
689	/*
690	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
691	 * fault on access.
692	 */
693	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
694			(newflags & VM_WRITE)) {
695		populate_vma_page_range(vma, start, end, NULL);
 
 
 
 
696	}
697
698	vm_stat_account(mm, oldflags, -nrpages);
699	vm_stat_account(mm, newflags, nrpages);
 
 
 
 
 
 
700	perf_event_mmap(vma);
701	return 0;
702
703fail:
704	vm_unacct_memory(charged);
705	return error;
706}
707
708/*
709 * pkey==-1 when doing a legacy mprotect()
710 */
711static int do_mprotect_pkey(unsigned long start, size_t len,
712		unsigned long prot, int pkey)
713{
714	unsigned long nstart, end, tmp, reqprot;
715	struct vm_area_struct *vma, *prev;
716	int error;
717	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
718	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
719				(prot & PROT_READ);
720	struct mmu_gather tlb;
721	struct vma_iterator vmi;
722
723	start = untagged_addr(start);
724
725	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
726	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
727		return -EINVAL;
728
729	if (start & ~PAGE_MASK)
730		return -EINVAL;
731	if (!len)
732		return 0;
733	len = PAGE_ALIGN(len);
734	end = start + len;
735	if (end <= start)
736		return -ENOMEM;
737	if (!arch_validate_prot(prot, start))
738		return -EINVAL;
739
740	reqprot = prot;
 
 
 
 
 
741
742	if (mmap_write_lock_killable(current->mm))
743		return -EINTR;
744
745	/*
746	 * If userspace did not allocate the pkey, do not let
747	 * them use it here.
748	 */
749	error = -EINVAL;
750	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
751		goto out;
752
753	vma_iter_init(&vmi, current->mm, start);
754	vma = vma_find(&vmi, end);
755	error = -ENOMEM;
756	if (!vma)
757		goto out;
758
759	if (unlikely(grows & PROT_GROWSDOWN)) {
760		if (vma->vm_start >= end)
761			goto out;
762		start = vma->vm_start;
763		error = -EINVAL;
764		if (!(vma->vm_flags & VM_GROWSDOWN))
765			goto out;
766	} else {
 
767		if (vma->vm_start > start)
768			goto out;
769		if (unlikely(grows & PROT_GROWSUP)) {
770			end = vma->vm_end;
771			error = -EINVAL;
772			if (!(vma->vm_flags & VM_GROWSUP))
773				goto out;
774		}
775	}
776
777	prev = vma_prev(&vmi);
778	if (start > vma->vm_start)
779		prev = vma;
780
781	tlb_gather_mmu(&tlb, current->mm);
782	nstart = start;
783	tmp = vma->vm_start;
784	for_each_vma_range(vmi, vma, end) {
785		unsigned long mask_off_old_flags;
786		unsigned long newflags;
787		int new_vma_pkey;
788
789		if (vma->vm_start != tmp) {
790			error = -ENOMEM;
791			break;
792		}
793
794		/* Does the application expect PROT_READ to imply PROT_EXEC */
795		if (rier && (vma->vm_flags & VM_MAYEXEC))
796			prot |= PROT_EXEC;
797
798		/*
799		 * Each mprotect() call explicitly passes r/w/x permissions.
800		 * If a permission is not passed to mprotect(), it must be
801		 * cleared from the VMA.
802		 */
803		mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
804
805		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
806		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
807		newflags |= (vma->vm_flags & ~mask_off_old_flags);
808
809		/* newflags >> 4 shift VM_MAY% in place of VM_% */
810		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
811			error = -EACCES;
812			break;
813		}
814
815		if (map_deny_write_exec(vma->vm_flags, newflags)) {
816			error = -EACCES;
817			break;
818		}
819
820		/* Allow architectures to sanity-check the new flags */
821		if (!arch_validate_flags(newflags)) {
822			error = -EINVAL;
823			break;
824		}
825
826		error = security_file_mprotect(vma, reqprot, prot);
827		if (error)
828			break;
829
830		tmp = vma->vm_end;
831		if (tmp > end)
832			tmp = end;
833
834		if (vma->vm_ops && vma->vm_ops->mprotect) {
835			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
836			if (error)
837				break;
838		}
839
840		error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);
841		if (error)
842			break;
843
844		tmp = vma_iter_end(&vmi);
845		nstart = tmp;
846		prot = reqprot;
847	}
848	tlb_finish_mmu(&tlb);
849
850	if (!error && tmp < end)
851		error = -ENOMEM;
 
 
852
 
 
 
 
 
 
853out:
854	mmap_write_unlock(current->mm);
855	return error;
856}
857
858SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
859		unsigned long, prot)
860{
861	return do_mprotect_pkey(start, len, prot, -1);
862}
863
864#ifdef CONFIG_ARCH_HAS_PKEYS
865
866SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
867		unsigned long, prot, int, pkey)
868{
869	return do_mprotect_pkey(start, len, prot, pkey);
870}
871
872SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
873{
874	int pkey;
875	int ret;
876
877	/* No flags supported yet. */
878	if (flags)
879		return -EINVAL;
880	/* check for unsupported init values */
881	if (init_val & ~PKEY_ACCESS_MASK)
882		return -EINVAL;
883
884	mmap_write_lock(current->mm);
885	pkey = mm_pkey_alloc(current->mm);
886
887	ret = -ENOSPC;
888	if (pkey == -1)
889		goto out;
890
891	ret = arch_set_user_pkey_access(current, pkey, init_val);
892	if (ret) {
893		mm_pkey_free(current->mm, pkey);
894		goto out;
895	}
896	ret = pkey;
897out:
898	mmap_write_unlock(current->mm);
899	return ret;
900}
901
902SYSCALL_DEFINE1(pkey_free, int, pkey)
903{
904	int ret;
905
906	mmap_write_lock(current->mm);
907	ret = mm_pkey_free(current->mm, pkey);
908	mmap_write_unlock(current->mm);
909
910	/*
911	 * We could provide warnings or errors if any VMA still
912	 * has the pkey set here.
913	 */
914	return ret;
915}
916
917#endif /* CONFIG_ARCH_HAS_PKEYS */