Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  mm/mprotect.c
  3 *
  4 *  (C) Copyright 1994 Linus Torvalds
  5 *  (C) Copyright 2002 Christoph Hellwig
  6 *
  7 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  8 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/shm.h>
 14#include <linux/mman.h>
 15#include <linux/fs.h>
 16#include <linux/highmem.h>
 17#include <linux/security.h>
 18#include <linux/mempolicy.h>
 19#include <linux/personality.h>
 20#include <linux/syscalls.h>
 21#include <linux/swap.h>
 22#include <linux/swapops.h>
 23#include <linux/mmu_notifier.h>
 24#include <linux/migrate.h>
 25#include <linux/perf_event.h>
 26#include <asm/uaccess.h>
 27#include <asm/pgtable.h>
 
 
 
 28#include <asm/cacheflush.h>
 
 29#include <asm/tlbflush.h>
 30
 31#ifndef pgprot_modify
 32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 33{
 34	return newprot;
 35}
 36#endif
 37
 38static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		int dirty_accountable)
 41{
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 
 
 
 
 
 46	arch_enter_lazy_mmu_mode();
 47	do {
 48		oldpte = *pte;
 49		if (pte_present(oldpte)) {
 50			pte_t ptent;
 51
 52			ptent = ptep_modify_prot_start(mm, addr, pte);
 53			ptent = pte_modify(ptent, newprot);
 54
 55			/*
 56			 * Avoid taking write faults for pages we know to be
 57			 * dirty.
 58			 */
 59			if (dirty_accountable && pte_dirty(ptent))
 60				ptent = pte_mkwrite(ptent);
 61
 62			ptep_modify_prot_commit(mm, addr, pte, ptent);
 63		} else if (PAGE_MIGRATION && !pte_file(oldpte)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64			swp_entry_t entry = pte_to_swp_entry(oldpte);
 
 65
 66			if (is_write_migration_entry(entry)) {
 67				/*
 68				 * A protection check is difficult so
 69				 * just be safe and disable write
 70				 */
 71				make_migration_entry_read(&entry);
 72				set_pte_at(mm, addr, pte,
 73					swp_entry_to_pte(entry));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74			}
 75		}
 76	} while (pte++, addr += PAGE_SIZE, addr != end);
 77	arch_leave_lazy_mmu_mode();
 78	pte_unmap_unlock(pte - 1, ptl);
 
 
 79}
 80
 81static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 82		unsigned long addr, unsigned long end, pgprot_t newprot,
 83		int dirty_accountable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84{
 85	pmd_t *pmd;
 86	unsigned long next;
 
 
 
 
 
 87
 88	pmd = pmd_offset(pud, addr);
 89	do {
 
 
 90		next = pmd_addr_end(addr, end);
 91		if (pmd_trans_huge(*pmd)) {
 92			if (next - addr != HPAGE_PMD_SIZE)
 93				split_huge_page_pmd(vma->vm_mm, pmd);
 94			else if (change_huge_pmd(vma, pmd, addr, newprot))
 95				continue;
 96			/* fall through */
 
 
 
 
 
 
 
 
 
 
 
 
 
 97		}
 98		if (pmd_none_or_clear_bad(pmd))
 99			continue;
100		change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
101				 dirty_accountable);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	} while (pmd++, addr = next, addr != end);
 
 
 
 
 
 
 
103}
104
105static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
106		unsigned long addr, unsigned long end, pgprot_t newprot,
107		int dirty_accountable)
108{
109	pud_t *pud;
110	unsigned long next;
 
111
112	pud = pud_offset(pgd, addr);
113	do {
114		next = pud_addr_end(addr, end);
115		if (pud_none_or_clear_bad(pud))
116			continue;
117		change_pmd_range(vma, pud, addr, next, newprot,
118				 dirty_accountable);
119	} while (pud++, addr = next, addr != end);
 
 
120}
121
122static void change_protection(struct vm_area_struct *vma,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123		unsigned long addr, unsigned long end, pgprot_t newprot,
124		int dirty_accountable)
125{
126	struct mm_struct *mm = vma->vm_mm;
127	pgd_t *pgd;
128	unsigned long next;
129	unsigned long start = addr;
 
130
131	BUG_ON(addr >= end);
132	pgd = pgd_offset(mm, addr);
133	flush_cache_range(vma, addr, end);
 
134	do {
135		next = pgd_addr_end(addr, end);
136		if (pgd_none_or_clear_bad(pgd))
137			continue;
138		change_pud_range(vma, pgd, addr, next, newprot,
139				 dirty_accountable);
140	} while (pgd++, addr = next, addr != end);
141	flush_tlb_range(vma, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144int
145mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
146	unsigned long start, unsigned long end, unsigned long newflags)
147{
148	struct mm_struct *mm = vma->vm_mm;
149	unsigned long oldflags = vma->vm_flags;
150	long nrpages = (end - start) >> PAGE_SHIFT;
151	unsigned long charged = 0;
152	pgoff_t pgoff;
153	int error;
154	int dirty_accountable = 0;
155
156	if (newflags == oldflags) {
157		*pprev = vma;
158		return 0;
159	}
160
161	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162	 * If we make a private mapping writable we increase our commit;
163	 * but (without finer accounting) cannot reduce our commit if we
164	 * make it unwritable again. hugetlb mapping were accounted for
165	 * even if read-only so there is no need to account for them here
166	 */
167	if (newflags & VM_WRITE) {
 
 
 
 
168		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
169						VM_SHARED|VM_NORESERVE))) {
170			charged = nrpages;
171			if (security_vm_enough_memory(charged))
172				return -ENOMEM;
173			newflags |= VM_ACCOUNT;
174		}
175	}
176
177	/*
178	 * First try to merge with previous and/or next vma.
179	 */
180	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
181	*pprev = vma_merge(mm, *pprev, start, end, newflags,
182			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
 
183	if (*pprev) {
184		vma = *pprev;
 
185		goto success;
186	}
187
188	*pprev = vma;
189
190	if (start != vma->vm_start) {
191		error = split_vma(mm, vma, start, 1);
192		if (error)
193			goto fail;
194	}
195
196	if (end != vma->vm_end) {
197		error = split_vma(mm, vma, end, 0);
198		if (error)
199			goto fail;
200	}
201
202success:
203	/*
204	 * vm_flags and vm_page_prot are protected by the mmap_sem
205	 * held in write mode.
206	 */
207	vma->vm_flags = newflags;
208	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
209					  vm_get_page_prot(newflags));
210
211	if (vma_wants_writenotify(vma)) {
212		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
213		dirty_accountable = 1;
 
 
 
 
 
 
 
214	}
215
216	mmu_notifier_invalidate_range_start(mm, start, end);
217	if (is_vm_hugetlb_page(vma))
218		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
219	else
220		change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
221	mmu_notifier_invalidate_range_end(mm, start, end);
222	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
223	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
224	perf_event_mmap(vma);
225	return 0;
226
227fail:
228	vm_unacct_memory(charged);
229	return error;
230}
231
232SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
233		unsigned long, prot)
 
 
 
234{
235	unsigned long vm_flags, nstart, end, tmp, reqprot;
236	struct vm_area_struct *vma, *prev;
237	int error = -EINVAL;
238	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
 
 
 
 
 
239	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
240	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
241		return -EINVAL;
242
243	if (start & ~PAGE_MASK)
244		return -EINVAL;
245	if (!len)
246		return 0;
247	len = PAGE_ALIGN(len);
248	end = start + len;
249	if (end <= start)
250		return -ENOMEM;
251	if (!arch_validate_prot(prot))
252		return -EINVAL;
253
254	reqprot = prot;
255	/*
256	 * Does the application expect PROT_READ to imply PROT_EXEC:
257	 */
258	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
259		prot |= PROT_EXEC;
260
261	vm_flags = calc_vm_prot_bits(prot);
 
262
263	down_write(&current->mm->mmap_sem);
 
 
 
 
 
 
264
265	vma = find_vma_prev(current->mm, start, &prev);
266	error = -ENOMEM;
267	if (!vma)
268		goto out;
 
269	if (unlikely(grows & PROT_GROWSDOWN)) {
270		if (vma->vm_start >= end)
271			goto out;
272		start = vma->vm_start;
273		error = -EINVAL;
274		if (!(vma->vm_flags & VM_GROWSDOWN))
275			goto out;
276	}
277	else {
278		if (vma->vm_start > start)
279			goto out;
280		if (unlikely(grows & PROT_GROWSUP)) {
281			end = vma->vm_end;
282			error = -EINVAL;
283			if (!(vma->vm_flags & VM_GROWSUP))
284				goto out;
285		}
286	}
287	if (start > vma->vm_start)
288		prev = vma;
289
290	for (nstart = start ; ; ) {
 
291		unsigned long newflags;
 
292
293		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
294
295		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
297		/* newflags >> 4 shift VM_MAY% in place of VM_% */
298		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
299			error = -EACCES;
300			goto out;
301		}
302
 
 
 
 
 
 
303		error = security_file_mprotect(vma, reqprot, prot);
304		if (error)
305			goto out;
306
307		tmp = vma->vm_end;
308		if (tmp > end)
309			tmp = end;
 
 
 
 
 
 
 
310		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
311		if (error)
312			goto out;
 
313		nstart = tmp;
314
315		if (nstart < prev->vm_end)
316			nstart = prev->vm_end;
317		if (nstart >= end)
318			goto out;
319
320		vma = prev->vm_next;
321		if (!vma || vma->vm_start != nstart) {
322			error = -ENOMEM;
323			goto out;
324		}
 
325	}
326out:
327	up_write(&current->mm->mmap_sem);
328	return error;
329}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/pagewalk.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <linux/pgtable.h>
 32#include <asm/cacheflush.h>
 33#include <asm/mmu_context.h>
 34#include <asm/tlbflush.h>
 35
 36#include "internal.h"
 
 
 
 
 
 37
 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		unsigned long cp_flags)
 41{
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 44	unsigned long pages = 0;
 45	int target_node = NUMA_NO_NODE;
 46	bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
 47	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 48	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 49	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 50
 51	/*
 52	 * Can be called with only the mmap_lock for reading by
 53	 * prot_numa so we must check the pmd isn't constantly
 54	 * changing from under us from pmd_none to pmd_trans_huge
 55	 * and/or the other way around.
 56	 */
 57	if (pmd_trans_unstable(pmd))
 58		return 0;
 59
 60	/*
 61	 * The pmd points to a regular pte so the pmd can't change
 62	 * from under us even if the mmap_lock is only hold for
 63	 * reading.
 64	 */
 65	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 66
 67	/* Get target node for single threaded private VMAs */
 68	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
 69	    atomic_read(&vma->vm_mm->mm_users) == 1)
 70		target_node = numa_node_id();
 71
 72	flush_tlb_batched_pending(vma->vm_mm);
 73	arch_enter_lazy_mmu_mode();
 74	do {
 75		oldpte = *pte;
 76		if (pte_present(oldpte)) {
 77			pte_t ptent;
 78			bool preserve_write = prot_numa && pte_write(oldpte);
 
 
 79
 80			/*
 81			 * Avoid trapping faults against the zero or KSM
 82			 * pages. See similar comment in change_huge_pmd.
 83			 */
 84			if (prot_numa) {
 85				struct page *page;
 86
 87				/* Avoid TLB flush if possible */
 88				if (pte_protnone(oldpte))
 89					continue;
 90
 91				page = vm_normal_page(vma, addr, oldpte);
 92				if (!page || PageKsm(page))
 93					continue;
 94
 95				/* Also skip shared copy-on-write pages */
 96				if (is_cow_mapping(vma->vm_flags) &&
 97				    page_mapcount(page) != 1)
 98					continue;
 99
100				/*
101				 * While migration can move some dirty pages,
102				 * it cannot move them all from MIGRATE_ASYNC
103				 * context.
104				 */
105				if (page_is_file_lru(page) && PageDirty(page))
106					continue;
107
108				/*
109				 * Don't mess with PTEs if page is already on the node
110				 * a single-threaded process is running on.
111				 */
112				if (target_node == page_to_nid(page))
113					continue;
114			}
115
116			oldpte = ptep_modify_prot_start(vma, addr, pte);
117			ptent = pte_modify(oldpte, newprot);
118			if (preserve_write)
119				ptent = pte_mk_savedwrite(ptent);
120
121			if (uffd_wp) {
122				ptent = pte_wrprotect(ptent);
123				ptent = pte_mkuffd_wp(ptent);
124			} else if (uffd_wp_resolve) {
125				/*
126				 * Leave the write bit to be handled
127				 * by PF interrupt handler, then
128				 * things like COW could be properly
129				 * handled.
130				 */
131				ptent = pte_clear_uffd_wp(ptent);
132			}
133
134			/* Avoid taking write faults for known dirty pages */
135			if (dirty_accountable && pte_dirty(ptent) &&
136					(pte_soft_dirty(ptent) ||
137					 !(vma->vm_flags & VM_SOFTDIRTY))) {
138				ptent = pte_mkwrite(ptent);
139			}
140			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
141			pages++;
142		} else if (is_swap_pte(oldpte)) {
143			swp_entry_t entry = pte_to_swp_entry(oldpte);
144			pte_t newpte;
145
146			if (is_writable_migration_entry(entry)) {
147				/*
148				 * A protection check is difficult so
149				 * just be safe and disable write
150				 */
151				entry = make_readable_migration_entry(
152							swp_offset(entry));
153				newpte = swp_entry_to_pte(entry);
154				if (pte_swp_soft_dirty(oldpte))
155					newpte = pte_swp_mksoft_dirty(newpte);
156				if (pte_swp_uffd_wp(oldpte))
157					newpte = pte_swp_mkuffd_wp(newpte);
158			} else if (is_writable_device_private_entry(entry)) {
159				/*
160				 * We do not preserve soft-dirtiness. See
161				 * copy_one_pte() for explanation.
162				 */
163				entry = make_readable_device_private_entry(
164							swp_offset(entry));
165				newpte = swp_entry_to_pte(entry);
166				if (pte_swp_uffd_wp(oldpte))
167					newpte = pte_swp_mkuffd_wp(newpte);
168			} else if (is_writable_device_exclusive_entry(entry)) {
169				entry = make_readable_device_exclusive_entry(
170							swp_offset(entry));
171				newpte = swp_entry_to_pte(entry);
172				if (pte_swp_soft_dirty(oldpte))
173					newpte = pte_swp_mksoft_dirty(newpte);
174				if (pte_swp_uffd_wp(oldpte))
175					newpte = pte_swp_mkuffd_wp(newpte);
176			} else {
177				newpte = oldpte;
178			}
179
180			if (uffd_wp)
181				newpte = pte_swp_mkuffd_wp(newpte);
182			else if (uffd_wp_resolve)
183				newpte = pte_swp_clear_uffd_wp(newpte);
184
185			if (!pte_same(oldpte, newpte)) {
186				set_pte_at(vma->vm_mm, addr, pte, newpte);
187				pages++;
188			}
189		}
190	} while (pte++, addr += PAGE_SIZE, addr != end);
191	arch_leave_lazy_mmu_mode();
192	pte_unmap_unlock(pte - 1, ptl);
193
194	return pages;
195}
196
197/*
198 * Used when setting automatic NUMA hinting protection where it is
199 * critical that a numa hinting PMD is not confused with a bad PMD.
200 */
201static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
202{
203	pmd_t pmdval = pmd_read_atomic(pmd);
204
205	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
207	barrier();
208#endif
209
210	if (pmd_none(pmdval))
211		return 1;
212	if (pmd_trans_huge(pmdval))
213		return 0;
214	if (unlikely(pmd_bad(pmdval))) {
215		pmd_clear_bad(pmd);
216		return 1;
217	}
218
219	return 0;
220}
221
222static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
223		pud_t *pud, unsigned long addr, unsigned long end,
224		pgprot_t newprot, unsigned long cp_flags)
225{
226	pmd_t *pmd;
227	unsigned long next;
228	unsigned long pages = 0;
229	unsigned long nr_huge_updates = 0;
230	struct mmu_notifier_range range;
231
232	range.start = 0;
233
234	pmd = pmd_offset(pud, addr);
235	do {
236		unsigned long this_pages;
237
238		next = pmd_addr_end(addr, end);
239
240		/*
241		 * Automatic NUMA balancing walks the tables with mmap_lock
242		 * held for read. It's possible a parallel update to occur
243		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
244		 * check leading to a false positive and clearing.
245		 * Hence, it's necessary to atomically read the PMD value
246		 * for all the checks.
247		 */
248		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
249		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
250			goto next;
251
252		/* invoke the mmu notifier if the pmd is populated */
253		if (!range.start) {
254			mmu_notifier_range_init(&range,
255				MMU_NOTIFY_PROTECTION_VMA, 0,
256				vma, vma->vm_mm, addr, end);
257			mmu_notifier_invalidate_range_start(&range);
258		}
259
260		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
261			if (next - addr != HPAGE_PMD_SIZE) {
262				__split_huge_pmd(vma, pmd, addr, false, NULL);
263			} else {
264				int nr_ptes = change_huge_pmd(vma, pmd, addr,
265							      newprot, cp_flags);
266
267				if (nr_ptes) {
268					if (nr_ptes == HPAGE_PMD_NR) {
269						pages += HPAGE_PMD_NR;
270						nr_huge_updates++;
271					}
272
273					/* huge pmd was handled */
274					goto next;
275				}
276			}
277			/* fall through, the trans huge pmd just split */
278		}
279		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
280					      cp_flags);
281		pages += this_pages;
282next:
283		cond_resched();
284	} while (pmd++, addr = next, addr != end);
285
286	if (range.start)
287		mmu_notifier_invalidate_range_end(&range);
288
289	if (nr_huge_updates)
290		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
291	return pages;
292}
293
294static inline unsigned long change_pud_range(struct vm_area_struct *vma,
295		p4d_t *p4d, unsigned long addr, unsigned long end,
296		pgprot_t newprot, unsigned long cp_flags)
297{
298	pud_t *pud;
299	unsigned long next;
300	unsigned long pages = 0;
301
302	pud = pud_offset(p4d, addr);
303	do {
304		next = pud_addr_end(addr, end);
305		if (pud_none_or_clear_bad(pud))
306			continue;
307		pages += change_pmd_range(vma, pud, addr, next, newprot,
308					  cp_flags);
309	} while (pud++, addr = next, addr != end);
310
311	return pages;
312}
313
314static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
315		pgd_t *pgd, unsigned long addr, unsigned long end,
316		pgprot_t newprot, unsigned long cp_flags)
317{
318	p4d_t *p4d;
319	unsigned long next;
320	unsigned long pages = 0;
321
322	p4d = p4d_offset(pgd, addr);
323	do {
324		next = p4d_addr_end(addr, end);
325		if (p4d_none_or_clear_bad(p4d))
326			continue;
327		pages += change_pud_range(vma, p4d, addr, next, newprot,
328					  cp_flags);
329	} while (p4d++, addr = next, addr != end);
330
331	return pages;
332}
333
334static unsigned long change_protection_range(struct vm_area_struct *vma,
335		unsigned long addr, unsigned long end, pgprot_t newprot,
336		unsigned long cp_flags)
337{
338	struct mm_struct *mm = vma->vm_mm;
339	pgd_t *pgd;
340	unsigned long next;
341	unsigned long start = addr;
342	unsigned long pages = 0;
343
344	BUG_ON(addr >= end);
345	pgd = pgd_offset(mm, addr);
346	flush_cache_range(vma, addr, end);
347	inc_tlb_flush_pending(mm);
348	do {
349		next = pgd_addr_end(addr, end);
350		if (pgd_none_or_clear_bad(pgd))
351			continue;
352		pages += change_p4d_range(vma, pgd, addr, next, newprot,
353					  cp_flags);
354	} while (pgd++, addr = next, addr != end);
355
356	/* Only flush the TLB if we actually modified any entries: */
357	if (pages)
358		flush_tlb_range(vma, start, end);
359	dec_tlb_flush_pending(mm);
360
361	return pages;
362}
363
364unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
365		       unsigned long end, pgprot_t newprot,
366		       unsigned long cp_flags)
367{
368	unsigned long pages;
369
370	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
371
372	if (is_vm_hugetlb_page(vma))
373		pages = hugetlb_change_protection(vma, start, end, newprot);
374	else
375		pages = change_protection_range(vma, start, end, newprot,
376						cp_flags);
377
378	return pages;
379}
380
381static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
382			       unsigned long next, struct mm_walk *walk)
383{
384	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
385		0 : -EACCES;
386}
387
388static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
389				   unsigned long addr, unsigned long next,
390				   struct mm_walk *walk)
391{
392	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
393		0 : -EACCES;
394}
395
396static int prot_none_test(unsigned long addr, unsigned long next,
397			  struct mm_walk *walk)
398{
399	return 0;
400}
401
402static const struct mm_walk_ops prot_none_walk_ops = {
403	.pte_entry		= prot_none_pte_entry,
404	.hugetlb_entry		= prot_none_hugetlb_entry,
405	.test_walk		= prot_none_test,
406};
407
408int
409mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
410	unsigned long start, unsigned long end, unsigned long newflags)
411{
412	struct mm_struct *mm = vma->vm_mm;
413	unsigned long oldflags = vma->vm_flags;
414	long nrpages = (end - start) >> PAGE_SHIFT;
415	unsigned long charged = 0;
416	pgoff_t pgoff;
417	int error;
418	int dirty_accountable = 0;
419
420	if (newflags == oldflags) {
421		*pprev = vma;
422		return 0;
423	}
424
425	/*
426	 * Do PROT_NONE PFN permission checks here when we can still
427	 * bail out without undoing a lot of state. This is a rather
428	 * uncommon case, so doesn't need to be very optimized.
429	 */
430	if (arch_has_pfn_modify_check() &&
431	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
432	    (newflags & VM_ACCESS_FLAGS) == 0) {
433		pgprot_t new_pgprot = vm_get_page_prot(newflags);
434
435		error = walk_page_range(current->mm, start, end,
436				&prot_none_walk_ops, &new_pgprot);
437		if (error)
438			return error;
439	}
440
441	/*
442	 * If we make a private mapping writable we increase our commit;
443	 * but (without finer accounting) cannot reduce our commit if we
444	 * make it unwritable again. hugetlb mapping were accounted for
445	 * even if read-only so there is no need to account for them here
446	 */
447	if (newflags & VM_WRITE) {
448		/* Check space limits when area turns into data. */
449		if (!may_expand_vm(mm, newflags, nrpages) &&
450				may_expand_vm(mm, oldflags, nrpages))
451			return -ENOMEM;
452		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
453						VM_SHARED|VM_NORESERVE))) {
454			charged = nrpages;
455			if (security_vm_enough_memory_mm(mm, charged))
456				return -ENOMEM;
457			newflags |= VM_ACCOUNT;
458		}
459	}
460
461	/*
462	 * First try to merge with previous and/or next vma.
463	 */
464	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
465	*pprev = vma_merge(mm, *pprev, start, end, newflags,
466			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
467			   vma->vm_userfaultfd_ctx);
468	if (*pprev) {
469		vma = *pprev;
470		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
471		goto success;
472	}
473
474	*pprev = vma;
475
476	if (start != vma->vm_start) {
477		error = split_vma(mm, vma, start, 1);
478		if (error)
479			goto fail;
480	}
481
482	if (end != vma->vm_end) {
483		error = split_vma(mm, vma, end, 0);
484		if (error)
485			goto fail;
486	}
487
488success:
489	/*
490	 * vm_flags and vm_page_prot are protected by the mmap_lock
491	 * held in write mode.
492	 */
493	vma->vm_flags = newflags;
494	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
495	vma_set_page_prot(vma);
496
497	change_protection(vma, start, end, vma->vm_page_prot,
498			  dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
499
500	/*
501	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
502	 * fault on access.
503	 */
504	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
505			(newflags & VM_WRITE)) {
506		populate_vma_page_range(vma, start, end, NULL);
507	}
508
509	vm_stat_account(mm, oldflags, -nrpages);
510	vm_stat_account(mm, newflags, nrpages);
 
 
 
 
 
 
511	perf_event_mmap(vma);
512	return 0;
513
514fail:
515	vm_unacct_memory(charged);
516	return error;
517}
518
519/*
520 * pkey==-1 when doing a legacy mprotect()
521 */
522static int do_mprotect_pkey(unsigned long start, size_t len,
523		unsigned long prot, int pkey)
524{
525	unsigned long nstart, end, tmp, reqprot;
526	struct vm_area_struct *vma, *prev;
527	int error = -EINVAL;
528	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
529	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
530				(prot & PROT_READ);
531
532	start = untagged_addr(start);
533
534	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
535	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
536		return -EINVAL;
537
538	if (start & ~PAGE_MASK)
539		return -EINVAL;
540	if (!len)
541		return 0;
542	len = PAGE_ALIGN(len);
543	end = start + len;
544	if (end <= start)
545		return -ENOMEM;
546	if (!arch_validate_prot(prot, start))
547		return -EINVAL;
548
549	reqprot = prot;
 
 
 
 
 
550
551	if (mmap_write_lock_killable(current->mm))
552		return -EINTR;
553
554	/*
555	 * If userspace did not allocate the pkey, do not let
556	 * them use it here.
557	 */
558	error = -EINVAL;
559	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
560		goto out;
561
562	vma = find_vma(current->mm, start);
563	error = -ENOMEM;
564	if (!vma)
565		goto out;
566	prev = vma->vm_prev;
567	if (unlikely(grows & PROT_GROWSDOWN)) {
568		if (vma->vm_start >= end)
569			goto out;
570		start = vma->vm_start;
571		error = -EINVAL;
572		if (!(vma->vm_flags & VM_GROWSDOWN))
573			goto out;
574	} else {
 
575		if (vma->vm_start > start)
576			goto out;
577		if (unlikely(grows & PROT_GROWSUP)) {
578			end = vma->vm_end;
579			error = -EINVAL;
580			if (!(vma->vm_flags & VM_GROWSUP))
581				goto out;
582		}
583	}
584	if (start > vma->vm_start)
585		prev = vma;
586
587	for (nstart = start ; ; ) {
588		unsigned long mask_off_old_flags;
589		unsigned long newflags;
590		int new_vma_pkey;
591
592		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
593
594		/* Does the application expect PROT_READ to imply PROT_EXEC */
595		if (rier && (vma->vm_flags & VM_MAYEXEC))
596			prot |= PROT_EXEC;
597
598		/*
599		 * Each mprotect() call explicitly passes r/w/x permissions.
600		 * If a permission is not passed to mprotect(), it must be
601		 * cleared from the VMA.
602		 */
603		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
604					VM_FLAGS_CLEAR;
605
606		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
607		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
608		newflags |= (vma->vm_flags & ~mask_off_old_flags);
609
610		/* newflags >> 4 shift VM_MAY% in place of VM_% */
611		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
612			error = -EACCES;
613			goto out;
614		}
615
616		/* Allow architectures to sanity-check the new flags */
617		if (!arch_validate_flags(newflags)) {
618			error = -EINVAL;
619			goto out;
620		}
621
622		error = security_file_mprotect(vma, reqprot, prot);
623		if (error)
624			goto out;
625
626		tmp = vma->vm_end;
627		if (tmp > end)
628			tmp = end;
629
630		if (vma->vm_ops && vma->vm_ops->mprotect) {
631			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
632			if (error)
633				goto out;
634		}
635
636		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
637		if (error)
638			goto out;
639
640		nstart = tmp;
641
642		if (nstart < prev->vm_end)
643			nstart = prev->vm_end;
644		if (nstart >= end)
645			goto out;
646
647		vma = prev->vm_next;
648		if (!vma || vma->vm_start != nstart) {
649			error = -ENOMEM;
650			goto out;
651		}
652		prot = reqprot;
653	}
654out:
655	mmap_write_unlock(current->mm);
656	return error;
657}
658
659SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
660		unsigned long, prot)
661{
662	return do_mprotect_pkey(start, len, prot, -1);
663}
664
665#ifdef CONFIG_ARCH_HAS_PKEYS
666
667SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
668		unsigned long, prot, int, pkey)
669{
670	return do_mprotect_pkey(start, len, prot, pkey);
671}
672
673SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
674{
675	int pkey;
676	int ret;
677
678	/* No flags supported yet. */
679	if (flags)
680		return -EINVAL;
681	/* check for unsupported init values */
682	if (init_val & ~PKEY_ACCESS_MASK)
683		return -EINVAL;
684
685	mmap_write_lock(current->mm);
686	pkey = mm_pkey_alloc(current->mm);
687
688	ret = -ENOSPC;
689	if (pkey == -1)
690		goto out;
691
692	ret = arch_set_user_pkey_access(current, pkey, init_val);
693	if (ret) {
694		mm_pkey_free(current->mm, pkey);
695		goto out;
696	}
697	ret = pkey;
698out:
699	mmap_write_unlock(current->mm);
700	return ret;
701}
702
703SYSCALL_DEFINE1(pkey_free, int, pkey)
704{
705	int ret;
706
707	mmap_write_lock(current->mm);
708	ret = mm_pkey_free(current->mm, pkey);
709	mmap_write_unlock(current->mm);
710
711	/*
712	 * We could provide warnings or errors if any VMA still
713	 * has the pkey set here.
714	 */
715	return ret;
716}
717
718#endif /* CONFIG_ARCH_HAS_PKEYS */