Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 *   linux/mm/fremap.c
  3 * 
  4 * Explicit pagetable population and nonlinear (random) mappings support.
  5 *
  6 * started by Ingo Molnar, Copyright (C) 2002, 2003
  7 */
  8#include <linux/export.h>
  9#include <linux/backing-dev.h>
 10#include <linux/mm.h>
 11#include <linux/swap.h>
 12#include <linux/file.h>
 13#include <linux/mman.h>
 14#include <linux/pagemap.h>
 15#include <linux/swapops.h>
 16#include <linux/rmap.h>
 17#include <linux/syscalls.h>
 18#include <linux/mmu_notifier.h>
 19
 20#include <asm/mmu_context.h>
 21#include <asm/cacheflush.h>
 22#include <asm/tlbflush.h>
 23
 24#include "internal.h"
 25
 26static int mm_counter(struct page *page)
 27{
 28	return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
 29}
 30
 31static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 32			unsigned long addr, pte_t *ptep)
 33{
 34	pte_t pte = *ptep;
 35	struct page *page;
 36	swp_entry_t entry;
 37
 38	if (pte_present(pte)) {
 39		flush_cache_page(vma, addr, pte_pfn(pte));
 40		pte = ptep_clear_flush(vma, addr, ptep);
 41		page = vm_normal_page(vma, addr, pte);
 42		if (page) {
 43			if (pte_dirty(pte))
 44				set_page_dirty(page);
 45			update_hiwater_rss(mm);
 46			dec_mm_counter(mm, mm_counter(page));
 47			page_remove_rmap(page);
 48			page_cache_release(page);
 49		}
 50	} else {	/* zap_pte() is not called when pte_none() */
 51		if (!pte_file(pte)) {
 52			update_hiwater_rss(mm);
 53			entry = pte_to_swp_entry(pte);
 54			if (non_swap_entry(entry)) {
 55				if (is_migration_entry(entry)) {
 56					page = migration_entry_to_page(entry);
 57					dec_mm_counter(mm, mm_counter(page));
 58				}
 59			} else {
 60				free_swap_and_cache(entry);
 61				dec_mm_counter(mm, MM_SWAPENTS);
 62			}
 63		}
 64		pte_clear_not_present_full(mm, addr, ptep, 0);
 65	}
 66}
 67
 68/*
 69 * Install a file pte to a given virtual memory address, release any
 70 * previously existing mapping.
 71 */
 72static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 73		unsigned long addr, unsigned long pgoff, pgprot_t prot)
 74{
 75	int err = -ENOMEM;
 76	pte_t *pte, ptfile;
 77	spinlock_t *ptl;
 78
 79	pte = get_locked_pte(mm, addr, &ptl);
 80	if (!pte)
 81		goto out;
 82
 83	ptfile = pgoff_to_pte(pgoff);
 84
 85	if (!pte_none(*pte)) {
 86		if (pte_present(*pte) && pte_soft_dirty(*pte))
 87			pte_file_mksoft_dirty(ptfile);
 88		zap_pte(mm, vma, addr, pte);
 89	}
 90
 91	set_pte_at(mm, addr, pte, ptfile);
 92	/*
 93	 * We don't need to run update_mmu_cache() here because the "file pte"
 94	 * being installed by install_file_pte() is not a real pte - it's a
 95	 * non-present entry (like a swap entry), noting what file offset should
 96	 * be mapped there when there's a fault (in a non-linear vma where
 97	 * that's not obvious).
 98	 */
 99	pte_unmap_unlock(pte, ptl);
100	err = 0;
101out:
102	return err;
103}
104
105int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
106			     unsigned long size, pgoff_t pgoff)
107{
108	struct mm_struct *mm = vma->vm_mm;
109	int err;
110
111	do {
112		err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
113		if (err)
114			return err;
115
116		size -= PAGE_SIZE;
117		addr += PAGE_SIZE;
118		pgoff++;
119	} while (size);
120
121	return 0;
122}
123EXPORT_SYMBOL(generic_file_remap_pages);
124
125/**
126 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
127 * @start: start of the remapped virtual memory range
128 * @size: size of the remapped virtual memory range
129 * @prot: new protection bits of the range (see NOTE)
130 * @pgoff: to-be-mapped page of the backing store file
131 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
132 *
133 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
134 * (shared backing store file).
135 *
136 * This syscall works purely via pagetables, so it's the most efficient
137 * way to map the same (large) file into a given virtual window. Unlike
138 * mmap()/mremap() it does not create any new vmas. The new mappings are
139 * also safe across swapout.
140 *
141 * NOTE: the @prot parameter right now is ignored (but must be zero),
142 * and the vma's default protection is used. Arbitrary protections
143 * might be implemented in the future.
144 */
145SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
146		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
147{
148	struct mm_struct *mm = current->mm;
149	struct address_space *mapping;
150	struct vm_area_struct *vma;
151	int err = -EINVAL;
152	int has_write_lock = 0;
153	vm_flags_t vm_flags = 0;
154
155	if (prot)
156		return err;
157	/*
158	 * Sanitize the syscall parameters:
159	 */
160	start = start & PAGE_MASK;
161	size = size & PAGE_MASK;
162
163	/* Does the address range wrap, or is the span zero-sized? */
164	if (start + size <= start)
165		return err;
166
167	/* Does pgoff wrap? */
168	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
169		return err;
170
171	/* Can we represent this offset inside this architecture's pte's? */
172#if PTE_FILE_MAX_BITS < BITS_PER_LONG
173	if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
174		return err;
175#endif
176
177	/* We need down_write() to change vma->vm_flags. */
178	down_read(&mm->mmap_sem);
179 retry:
180	vma = find_vma(mm, start);
181
182	/*
183	 * Make sure the vma is shared, that it supports prefaulting,
184	 * and that the remapped range is valid and fully within
185	 * the single existing vma.
186	 */
187	if (!vma || !(vma->vm_flags & VM_SHARED))
188		goto out;
189
190	if (!vma->vm_ops || !vma->vm_ops->remap_pages)
191		goto out;
192
193	if (start < vma->vm_start || start + size > vma->vm_end)
194		goto out;
195
196	/* Must set VM_NONLINEAR before any pages are populated. */
197	if (!(vma->vm_flags & VM_NONLINEAR)) {
198		/*
199		 * vm_private_data is used as a swapout cursor
200		 * in a VM_NONLINEAR vma.
201		 */
202		if (vma->vm_private_data)
203			goto out;
204
205		/* Don't need a nonlinear mapping, exit success */
206		if (pgoff == linear_page_index(vma, start)) {
207			err = 0;
208			goto out;
209		}
210
211		if (!has_write_lock) {
212get_write_lock:
213			up_read(&mm->mmap_sem);
214			down_write(&mm->mmap_sem);
215			has_write_lock = 1;
216			goto retry;
217		}
218		mapping = vma->vm_file->f_mapping;
219		/*
220		 * page_mkclean doesn't work on nonlinear vmas, so if
221		 * dirty pages need to be accounted, emulate with linear
222		 * vmas.
223		 */
224		if (mapping_cap_account_dirty(mapping)) {
225			unsigned long addr;
226			struct file *file = get_file(vma->vm_file);
227			/* mmap_region may free vma; grab the info now */
228			vm_flags = vma->vm_flags;
229
230			addr = mmap_region(file, start, size, vm_flags, pgoff);
231			fput(file);
232			if (IS_ERR_VALUE(addr)) {
233				err = addr;
234			} else {
235				BUG_ON(addr != start);
236				err = 0;
237			}
238			goto out_freed;
239		}
240		mutex_lock(&mapping->i_mmap_mutex);
241		flush_dcache_mmap_lock(mapping);
242		vma->vm_flags |= VM_NONLINEAR;
243		vma_interval_tree_remove(vma, &mapping->i_mmap);
244		vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
245		flush_dcache_mmap_unlock(mapping);
246		mutex_unlock(&mapping->i_mmap_mutex);
247	}
248
249	if (vma->vm_flags & VM_LOCKED) {
250		/*
251		 * drop PG_Mlocked flag for over-mapped range
252		 */
253		if (!has_write_lock)
254			goto get_write_lock;
255		vm_flags = vma->vm_flags;
256		munlock_vma_pages_range(vma, start, start + size);
257		vma->vm_flags = vm_flags;
258	}
259
260	mmu_notifier_invalidate_range_start(mm, start, start + size);
261	err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
262	mmu_notifier_invalidate_range_end(mm, start, start + size);
263
264	/*
265	 * We can't clear VM_NONLINEAR because we'd have to do
266	 * it after ->populate completes, and that would prevent
267	 * downgrading the lock.  (Locks can't be upgraded).
268	 */
269
270out:
271	if (vma)
272		vm_flags = vma->vm_flags;
273out_freed:
274	if (likely(!has_write_lock))
275		up_read(&mm->mmap_sem);
276	else
277		up_write(&mm->mmap_sem);
278	if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
279		mm_populate(start, size);
280
281	return err;
282}