Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/mm.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <asm/pgtable.h>
 32#include <asm/cacheflush.h>
 33#include <asm/mmu_context.h>
 34#include <asm/tlbflush.h>
 35
 36#include "internal.h"
 
 
 
 
 
 37
 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		int dirty_accountable, int prot_numa)
 41{
 42	struct mm_struct *mm = vma->vm_mm;
 43	pte_t *pte, oldpte;
 44	spinlock_t *ptl;
 45	unsigned long pages = 0;
 46	int target_node = NUMA_NO_NODE;
 47
 48	/*
 49	 * Can be called with only the mmap_sem for reading by
 50	 * prot_numa so we must check the pmd isn't constantly
 51	 * changing from under us from pmd_none to pmd_trans_huge
 52	 * and/or the other way around.
 53	 */
 54	if (pmd_trans_unstable(pmd))
 55		return 0;
 56
 57	/*
 58	 * The pmd points to a regular pte so the pmd can't change
 59	 * from under us even if the mmap_sem is only hold for
 60	 * reading.
 61	 */
 62	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 63
 64	/* Get target node for single threaded private VMAs */
 65	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
 66	    atomic_read(&vma->vm_mm->mm_users) == 1)
 67		target_node = numa_node_id();
 68
 69	flush_tlb_batched_pending(vma->vm_mm);
 70	arch_enter_lazy_mmu_mode();
 71	do {
 72		oldpte = *pte;
 73		if (pte_present(oldpte)) {
 74			pte_t ptent;
 75			bool preserve_write = prot_numa && pte_write(oldpte);
 76
 77			/*
 78			 * Avoid trapping faults against the zero or KSM
 79			 * pages. See similar comment in change_huge_pmd.
 80			 */
 81			if (prot_numa) {
 82				struct page *page;
 83
 84				page = vm_normal_page(vma, addr, oldpte);
 85				if (!page || PageKsm(page))
 86					continue;
 87
 88				/* Also skip shared copy-on-write pages */
 89				if (is_cow_mapping(vma->vm_flags) &&
 90				    page_mapcount(page) != 1)
 91					continue;
 92
 93				/*
 94				 * While migration can move some dirty pages,
 95				 * it cannot move them all from MIGRATE_ASYNC
 96				 * context.
 97				 */
 98				if (page_is_file_cache(page) && PageDirty(page))
 99					continue;
100
101				/* Avoid TLB flush if possible */
102				if (pte_protnone(oldpte))
103					continue;
104
105				/*
106				 * Don't mess with PTEs if page is already on the node
107				 * a single-threaded process is running on.
108				 */
109				if (target_node == page_to_nid(page))
110					continue;
111			}
112
113			ptent = ptep_modify_prot_start(mm, addr, pte);
114			ptent = pte_modify(ptent, newprot);
115			if (preserve_write)
116				ptent = pte_mk_savedwrite(ptent);
117
118			/* Avoid taking write faults for known dirty pages */
119			if (dirty_accountable && pte_dirty(ptent) &&
120					(pte_soft_dirty(ptent) ||
121					 !(vma->vm_flags & VM_SOFTDIRTY))) {
 
122				ptent = pte_mkwrite(ptent);
123			}
124			ptep_modify_prot_commit(mm, addr, pte, ptent);
125			pages++;
126		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
127			swp_entry_t entry = pte_to_swp_entry(oldpte);
128
129			if (is_write_migration_entry(entry)) {
130				pte_t newpte;
131				/*
132				 * A protection check is difficult so
133				 * just be safe and disable write
134				 */
135				make_migration_entry_read(&entry);
136				newpte = swp_entry_to_pte(entry);
137				if (pte_swp_soft_dirty(oldpte))
138					newpte = pte_swp_mksoft_dirty(newpte);
139				set_pte_at(mm, addr, pte, newpte);
140
141				pages++;
142			}
143
144			if (is_write_device_private_entry(entry)) {
145				pte_t newpte;
146
147				/*
148				 * We do not preserve soft-dirtiness. See
149				 * copy_one_pte() for explanation.
150				 */
151				make_device_private_entry_read(&entry);
152				newpte = swp_entry_to_pte(entry);
153				set_pte_at(mm, addr, pte, newpte);
154
155				pages++;
156			}
157		}
158	} while (pte++, addr += PAGE_SIZE, addr != end);
159	arch_leave_lazy_mmu_mode();
160	pte_unmap_unlock(pte - 1, ptl);
161
162	return pages;
163}
164
165static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
166		pud_t *pud, unsigned long addr, unsigned long end,
167		pgprot_t newprot, int dirty_accountable, int prot_numa)
168{
169	pmd_t *pmd;
170	struct mm_struct *mm = vma->vm_mm;
171	unsigned long next;
172	unsigned long pages = 0;
173	unsigned long nr_huge_updates = 0;
174	unsigned long mni_start = 0;
175
176	pmd = pmd_offset(pud, addr);
177	do {
178		unsigned long this_pages;
179
180		next = pmd_addr_end(addr, end);
181		if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
182				&& pmd_none_or_clear_bad(pmd))
183			goto next;
184
185		/* invoke the mmu notifier if the pmd is populated */
186		if (!mni_start) {
187			mni_start = addr;
188			mmu_notifier_invalidate_range_start(mm, mni_start, end);
189		}
190
191		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
192			if (next - addr != HPAGE_PMD_SIZE) {
193				__split_huge_pmd(vma, pmd, addr, false, NULL);
194			} else {
195				int nr_ptes = change_huge_pmd(vma, pmd, addr,
196						newprot, prot_numa);
197
198				if (nr_ptes) {
199					if (nr_ptes == HPAGE_PMD_NR) {
200						pages += HPAGE_PMD_NR;
201						nr_huge_updates++;
202					}
203
204					/* huge pmd was handled */
205					goto next;
206				}
207			}
208			/* fall through, the trans huge pmd just split */
209		}
210		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
211				 dirty_accountable, prot_numa);
212		pages += this_pages;
213next:
214		cond_resched();
215	} while (pmd++, addr = next, addr != end);
216
217	if (mni_start)
218		mmu_notifier_invalidate_range_end(mm, mni_start, end);
219
220	if (nr_huge_updates)
221		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
222	return pages;
223}
224
225static inline unsigned long change_pud_range(struct vm_area_struct *vma,
226		p4d_t *p4d, unsigned long addr, unsigned long end,
227		pgprot_t newprot, int dirty_accountable, int prot_numa)
228{
229	pud_t *pud;
230	unsigned long next;
231	unsigned long pages = 0;
232
233	pud = pud_offset(p4d, addr);
234	do {
235		next = pud_addr_end(addr, end);
236		if (pud_none_or_clear_bad(pud))
237			continue;
238		pages += change_pmd_range(vma, pud, addr, next, newprot,
239				 dirty_accountable, prot_numa);
240	} while (pud++, addr = next, addr != end);
241
242	return pages;
243}
244
245static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
246		pgd_t *pgd, unsigned long addr, unsigned long end,
247		pgprot_t newprot, int dirty_accountable, int prot_numa)
248{
249	p4d_t *p4d;
250	unsigned long next;
251	unsigned long pages = 0;
252
253	p4d = p4d_offset(pgd, addr);
254	do {
255		next = p4d_addr_end(addr, end);
256		if (p4d_none_or_clear_bad(p4d))
257			continue;
258		pages += change_pud_range(vma, p4d, addr, next, newprot,
259				 dirty_accountable, prot_numa);
260	} while (p4d++, addr = next, addr != end);
261
262	return pages;
263}
264
265static unsigned long change_protection_range(struct vm_area_struct *vma,
266		unsigned long addr, unsigned long end, pgprot_t newprot,
267		int dirty_accountable, int prot_numa)
268{
269	struct mm_struct *mm = vma->vm_mm;
270	pgd_t *pgd;
271	unsigned long next;
272	unsigned long start = addr;
273	unsigned long pages = 0;
274
275	BUG_ON(addr >= end);
276	pgd = pgd_offset(mm, addr);
277	flush_cache_range(vma, addr, end);
278	inc_tlb_flush_pending(mm);
279	do {
280		next = pgd_addr_end(addr, end);
281		if (pgd_none_or_clear_bad(pgd))
282			continue;
283		pages += change_p4d_range(vma, pgd, addr, next, newprot,
284				 dirty_accountable, prot_numa);
285	} while (pgd++, addr = next, addr != end);
286
287	/* Only flush the TLB if we actually modified any entries: */
288	if (pages)
289		flush_tlb_range(vma, start, end);
290	dec_tlb_flush_pending(mm);
291
292	return pages;
293}
294
295unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
296		       unsigned long end, pgprot_t newprot,
297		       int dirty_accountable, int prot_numa)
298{
299	unsigned long pages;
300
301	if (is_vm_hugetlb_page(vma))
302		pages = hugetlb_change_protection(vma, start, end, newprot);
303	else
304		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
305
306	return pages;
307}
308
309int
310mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
311	unsigned long start, unsigned long end, unsigned long newflags)
312{
313	struct mm_struct *mm = vma->vm_mm;
314	unsigned long oldflags = vma->vm_flags;
315	long nrpages = (end - start) >> PAGE_SHIFT;
316	unsigned long charged = 0;
317	pgoff_t pgoff;
318	int error;
319	int dirty_accountable = 0;
320
321	if (newflags == oldflags) {
322		*pprev = vma;
323		return 0;
324	}
325
326	/*
327	 * If we make a private mapping writable we increase our commit;
328	 * but (without finer accounting) cannot reduce our commit if we
329	 * make it unwritable again. hugetlb mapping were accounted for
330	 * even if read-only so there is no need to account for them here
331	 */
332	if (newflags & VM_WRITE) {
333		/* Check space limits when area turns into data. */
334		if (!may_expand_vm(mm, newflags, nrpages) &&
335				may_expand_vm(mm, oldflags, nrpages))
336			return -ENOMEM;
337		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
338						VM_SHARED|VM_NORESERVE))) {
339			charged = nrpages;
340			if (security_vm_enough_memory_mm(mm, charged))
341				return -ENOMEM;
342			newflags |= VM_ACCOUNT;
343		}
344	}
345
346	/*
347	 * First try to merge with previous and/or next vma.
348	 */
349	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
350	*pprev = vma_merge(mm, *pprev, start, end, newflags,
351			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
352			   vma->vm_userfaultfd_ctx);
353	if (*pprev) {
354		vma = *pprev;
355		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
356		goto success;
357	}
358
359	*pprev = vma;
360
361	if (start != vma->vm_start) {
362		error = split_vma(mm, vma, start, 1);
363		if (error)
364			goto fail;
365	}
366
367	if (end != vma->vm_end) {
368		error = split_vma(mm, vma, end, 0);
369		if (error)
370			goto fail;
371	}
372
373success:
374	/*
375	 * vm_flags and vm_page_prot are protected by the mmap_sem
376	 * held in write mode.
377	 */
378	vma->vm_flags = newflags;
379	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
380	vma_set_page_prot(vma);
381
382	change_protection(vma, start, end, vma->vm_page_prot,
383			  dirty_accountable, 0);
384
385	/*
386	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
387	 * fault on access.
388	 */
389	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
390			(newflags & VM_WRITE)) {
391		populate_vma_page_range(vma, start, end, NULL);
392	}
393
394	vm_stat_account(mm, oldflags, -nrpages);
395	vm_stat_account(mm, newflags, nrpages);
 
 
 
 
 
 
396	perf_event_mmap(vma);
397	return 0;
398
399fail:
400	vm_unacct_memory(charged);
401	return error;
402}
403
404/*
405 * pkey==-1 when doing a legacy mprotect()
406 */
407static int do_mprotect_pkey(unsigned long start, size_t len,
408		unsigned long prot, int pkey)
409{
410	unsigned long nstart, end, tmp, reqprot;
411	struct vm_area_struct *vma, *prev;
412	int error = -EINVAL;
413	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
414	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
415				(prot & PROT_READ);
416
417	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
418	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
419		return -EINVAL;
420
421	if (start & ~PAGE_MASK)
422		return -EINVAL;
423	if (!len)
424		return 0;
425	len = PAGE_ALIGN(len);
426	end = start + len;
427	if (end <= start)
428		return -ENOMEM;
429	if (!arch_validate_prot(prot, start))
430		return -EINVAL;
431
432	reqprot = prot;
433
434	if (down_write_killable(&current->mm->mmap_sem))
435		return -EINTR;
436
437	/*
438	 * If userspace did not allocate the pkey, do not let
439	 * them use it here.
440	 */
441	error = -EINVAL;
442	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
443		goto out;
 
 
 
444
445	vma = find_vma(current->mm, start);
446	error = -ENOMEM;
447	if (!vma)
448		goto out;
449	prev = vma->vm_prev;
450	if (unlikely(grows & PROT_GROWSDOWN)) {
451		if (vma->vm_start >= end)
452			goto out;
453		start = vma->vm_start;
454		error = -EINVAL;
455		if (!(vma->vm_flags & VM_GROWSDOWN))
456			goto out;
457	} else {
 
458		if (vma->vm_start > start)
459			goto out;
460		if (unlikely(grows & PROT_GROWSUP)) {
461			end = vma->vm_end;
462			error = -EINVAL;
463			if (!(vma->vm_flags & VM_GROWSUP))
464				goto out;
465		}
466	}
467	if (start > vma->vm_start)
468		prev = vma;
469
470	for (nstart = start ; ; ) {
471		unsigned long mask_off_old_flags;
472		unsigned long newflags;
473		int new_vma_pkey;
474
475		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
476
477		/* Does the application expect PROT_READ to imply PROT_EXEC */
478		if (rier && (vma->vm_flags & VM_MAYEXEC))
479			prot |= PROT_EXEC;
480
481		/*
482		 * Each mprotect() call explicitly passes r/w/x permissions.
483		 * If a permission is not passed to mprotect(), it must be
484		 * cleared from the VMA.
485		 */
486		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
487					VM_FLAGS_CLEAR;
488
489		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
490		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
491		newflags |= (vma->vm_flags & ~mask_off_old_flags);
492
493		/* newflags >> 4 shift VM_MAY% in place of VM_% */
494		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
495			error = -EACCES;
496			goto out;
497		}
498
499		error = security_file_mprotect(vma, reqprot, prot);
500		if (error)
501			goto out;
502
503		tmp = vma->vm_end;
504		if (tmp > end)
505			tmp = end;
506		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
507		if (error)
508			goto out;
509		nstart = tmp;
510
511		if (nstart < prev->vm_end)
512			nstart = prev->vm_end;
513		if (nstart >= end)
514			goto out;
515
516		vma = prev->vm_next;
517		if (!vma || vma->vm_start != nstart) {
518			error = -ENOMEM;
519			goto out;
520		}
521		prot = reqprot;
522	}
523out:
524	up_write(&current->mm->mmap_sem);
525	return error;
526}
527
528SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
529		unsigned long, prot)
530{
531	return do_mprotect_pkey(start, len, prot, -1);
532}
533
534#ifdef CONFIG_ARCH_HAS_PKEYS
535
536SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
537		unsigned long, prot, int, pkey)
538{
539	return do_mprotect_pkey(start, len, prot, pkey);
540}
541
542SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
543{
544	int pkey;
545	int ret;
546
547	/* No flags supported yet. */
548	if (flags)
549		return -EINVAL;
550	/* check for unsupported init values */
551	if (init_val & ~PKEY_ACCESS_MASK)
552		return -EINVAL;
553
554	down_write(&current->mm->mmap_sem);
555	pkey = mm_pkey_alloc(current->mm);
556
557	ret = -ENOSPC;
558	if (pkey == -1)
559		goto out;
560
561	ret = arch_set_user_pkey_access(current, pkey, init_val);
562	if (ret) {
563		mm_pkey_free(current->mm, pkey);
564		goto out;
565	}
566	ret = pkey;
567out:
568	up_write(&current->mm->mmap_sem);
569	return ret;
570}
571
572SYSCALL_DEFINE1(pkey_free, int, pkey)
573{
574	int ret;
575
576	down_write(&current->mm->mmap_sem);
577	ret = mm_pkey_free(current->mm, pkey);
578	up_write(&current->mm->mmap_sem);
579
580	/*
581	 * We could provie warnings or errors if any VMA still
582	 * has the pkey set here.
583	 */
584	return ret;
585}
586
587#endif /* CONFIG_ARCH_HAS_PKEYS */
v3.1
 
  1/*
  2 *  mm/mprotect.c
  3 *
  4 *  (C) Copyright 1994 Linus Torvalds
  5 *  (C) Copyright 2002 Christoph Hellwig
  6 *
  7 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  8 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/shm.h>
 14#include <linux/mman.h>
 15#include <linux/fs.h>
 16#include <linux/highmem.h>
 17#include <linux/security.h>
 18#include <linux/mempolicy.h>
 19#include <linux/personality.h>
 20#include <linux/syscalls.h>
 21#include <linux/swap.h>
 22#include <linux/swapops.h>
 23#include <linux/mmu_notifier.h>
 24#include <linux/migrate.h>
 25#include <linux/perf_event.h>
 26#include <asm/uaccess.h>
 
 
 
 27#include <asm/pgtable.h>
 28#include <asm/cacheflush.h>
 
 29#include <asm/tlbflush.h>
 30
 31#ifndef pgprot_modify
 32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 33{
 34	return newprot;
 35}
 36#endif
 37
 38static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		int dirty_accountable)
 41{
 
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 
 
 
 
 
 
 
 
 
 
 
 44
 45	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 
 
 
 
 
 
 
 
 
 
 
 
 46	arch_enter_lazy_mmu_mode();
 47	do {
 48		oldpte = *pte;
 49		if (pte_present(oldpte)) {
 50			pte_t ptent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51
 52			ptent = ptep_modify_prot_start(mm, addr, pte);
 53			ptent = pte_modify(ptent, newprot);
 
 
 54
 55			/*
 56			 * Avoid taking write faults for pages we know to be
 57			 * dirty.
 58			 */
 59			if (dirty_accountable && pte_dirty(ptent))
 60				ptent = pte_mkwrite(ptent);
 61
 62			ptep_modify_prot_commit(mm, addr, pte, ptent);
 63		} else if (PAGE_MIGRATION && !pte_file(oldpte)) {
 
 64			swp_entry_t entry = pte_to_swp_entry(oldpte);
 65
 66			if (is_write_migration_entry(entry)) {
 
 67				/*
 68				 * A protection check is difficult so
 69				 * just be safe and disable write
 70				 */
 71				make_migration_entry_read(&entry);
 72				set_pte_at(mm, addr, pte,
 73					swp_entry_to_pte(entry));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74			}
 75		}
 76	} while (pte++, addr += PAGE_SIZE, addr != end);
 77	arch_leave_lazy_mmu_mode();
 78	pte_unmap_unlock(pte - 1, ptl);
 
 
 79}
 80
 81static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 82		unsigned long addr, unsigned long end, pgprot_t newprot,
 83		int dirty_accountable)
 84{
 85	pmd_t *pmd;
 
 86	unsigned long next;
 
 
 
 87
 88	pmd = pmd_offset(pud, addr);
 89	do {
 
 
 90		next = pmd_addr_end(addr, end);
 91		if (pmd_trans_huge(*pmd)) {
 92			if (next - addr != HPAGE_PMD_SIZE)
 93				split_huge_page_pmd(vma->vm_mm, pmd);
 94			else if (change_huge_pmd(vma, pmd, addr, newprot))
 95				continue;
 96			/* fall through */
 
 
 97		}
 98		if (pmd_none_or_clear_bad(pmd))
 99			continue;
100		change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
101				 dirty_accountable);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	} while (pmd++, addr = next, addr != end);
 
 
 
 
 
 
 
103}
104
105static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
106		unsigned long addr, unsigned long end, pgprot_t newprot,
107		int dirty_accountable)
108{
109	pud_t *pud;
110	unsigned long next;
 
111
112	pud = pud_offset(pgd, addr);
113	do {
114		next = pud_addr_end(addr, end);
115		if (pud_none_or_clear_bad(pud))
116			continue;
117		change_pmd_range(vma, pud, addr, next, newprot,
118				 dirty_accountable);
119	} while (pud++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120}
121
122static void change_protection(struct vm_area_struct *vma,
123		unsigned long addr, unsigned long end, pgprot_t newprot,
124		int dirty_accountable)
125{
126	struct mm_struct *mm = vma->vm_mm;
127	pgd_t *pgd;
128	unsigned long next;
129	unsigned long start = addr;
 
130
131	BUG_ON(addr >= end);
132	pgd = pgd_offset(mm, addr);
133	flush_cache_range(vma, addr, end);
 
134	do {
135		next = pgd_addr_end(addr, end);
136		if (pgd_none_or_clear_bad(pgd))
137			continue;
138		change_pud_range(vma, pgd, addr, next, newprot,
139				 dirty_accountable);
140	} while (pgd++, addr = next, addr != end);
141	flush_tlb_range(vma, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
143
144int
145mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
146	unsigned long start, unsigned long end, unsigned long newflags)
147{
148	struct mm_struct *mm = vma->vm_mm;
149	unsigned long oldflags = vma->vm_flags;
150	long nrpages = (end - start) >> PAGE_SHIFT;
151	unsigned long charged = 0;
152	pgoff_t pgoff;
153	int error;
154	int dirty_accountable = 0;
155
156	if (newflags == oldflags) {
157		*pprev = vma;
158		return 0;
159	}
160
161	/*
162	 * If we make a private mapping writable we increase our commit;
163	 * but (without finer accounting) cannot reduce our commit if we
164	 * make it unwritable again. hugetlb mapping were accounted for
165	 * even if read-only so there is no need to account for them here
166	 */
167	if (newflags & VM_WRITE) {
 
 
 
 
168		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
169						VM_SHARED|VM_NORESERVE))) {
170			charged = nrpages;
171			if (security_vm_enough_memory(charged))
172				return -ENOMEM;
173			newflags |= VM_ACCOUNT;
174		}
175	}
176
177	/*
178	 * First try to merge with previous and/or next vma.
179	 */
180	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
181	*pprev = vma_merge(mm, *pprev, start, end, newflags,
182			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
 
183	if (*pprev) {
184		vma = *pprev;
 
185		goto success;
186	}
187
188	*pprev = vma;
189
190	if (start != vma->vm_start) {
191		error = split_vma(mm, vma, start, 1);
192		if (error)
193			goto fail;
194	}
195
196	if (end != vma->vm_end) {
197		error = split_vma(mm, vma, end, 0);
198		if (error)
199			goto fail;
200	}
201
202success:
203	/*
204	 * vm_flags and vm_page_prot are protected by the mmap_sem
205	 * held in write mode.
206	 */
207	vma->vm_flags = newflags;
208	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
209					  vm_get_page_prot(newflags));
210
211	if (vma_wants_writenotify(vma)) {
212		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
213		dirty_accountable = 1;
 
 
 
 
 
 
 
214	}
215
216	mmu_notifier_invalidate_range_start(mm, start, end);
217	if (is_vm_hugetlb_page(vma))
218		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
219	else
220		change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
221	mmu_notifier_invalidate_range_end(mm, start, end);
222	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
223	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
224	perf_event_mmap(vma);
225	return 0;
226
227fail:
228	vm_unacct_memory(charged);
229	return error;
230}
231
232SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
233		unsigned long, prot)
 
 
 
234{
235	unsigned long vm_flags, nstart, end, tmp, reqprot;
236	struct vm_area_struct *vma, *prev;
237	int error = -EINVAL;
238	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
 
 
 
239	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
240	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
241		return -EINVAL;
242
243	if (start & ~PAGE_MASK)
244		return -EINVAL;
245	if (!len)
246		return 0;
247	len = PAGE_ALIGN(len);
248	end = start + len;
249	if (end <= start)
250		return -ENOMEM;
251	if (!arch_validate_prot(prot))
252		return -EINVAL;
253
254	reqprot = prot;
 
 
 
 
255	/*
256	 * Does the application expect PROT_READ to imply PROT_EXEC:
 
257	 */
258	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
259		prot |= PROT_EXEC;
260
261	vm_flags = calc_vm_prot_bits(prot);
262
263	down_write(&current->mm->mmap_sem);
264
265	vma = find_vma_prev(current->mm, start, &prev);
266	error = -ENOMEM;
267	if (!vma)
268		goto out;
 
269	if (unlikely(grows & PROT_GROWSDOWN)) {
270		if (vma->vm_start >= end)
271			goto out;
272		start = vma->vm_start;
273		error = -EINVAL;
274		if (!(vma->vm_flags & VM_GROWSDOWN))
275			goto out;
276	}
277	else {
278		if (vma->vm_start > start)
279			goto out;
280		if (unlikely(grows & PROT_GROWSUP)) {
281			end = vma->vm_end;
282			error = -EINVAL;
283			if (!(vma->vm_flags & VM_GROWSUP))
284				goto out;
285		}
286	}
287	if (start > vma->vm_start)
288		prev = vma;
289
290	for (nstart = start ; ; ) {
 
291		unsigned long newflags;
 
292
293		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
294
295		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
297		/* newflags >> 4 shift VM_MAY% in place of VM_% */
298		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
299			error = -EACCES;
300			goto out;
301		}
302
303		error = security_file_mprotect(vma, reqprot, prot);
304		if (error)
305			goto out;
306
307		tmp = vma->vm_end;
308		if (tmp > end)
309			tmp = end;
310		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
311		if (error)
312			goto out;
313		nstart = tmp;
314
315		if (nstart < prev->vm_end)
316			nstart = prev->vm_end;
317		if (nstart >= end)
318			goto out;
319
320		vma = prev->vm_next;
321		if (!vma || vma->vm_start != nstart) {
322			error = -ENOMEM;
323			goto out;
324		}
 
325	}
326out:
327	up_write(&current->mm->mmap_sem);
328	return error;
329}