Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	linux/mm/mincore.c
  4 *
  5 * Copyright (C) 1994-2006  Linus Torvalds
  6 */
  7
  8/*
  9 * The mincore() system call.
 10 */
 11#include <linux/pagemap.h>
 12#include <linux/gfp.h>
 13#include <linux/mm.h>
 14#include <linux/mman.h>
 15#include <linux/syscalls.h>
 16#include <linux/swap.h>
 17#include <linux/swapops.h>
 18#include <linux/shmem_fs.h>
 19#include <linux/hugetlb.h>
 20
 21#include <linux/uaccess.h>
 22#include <asm/pgtable.h>
 23
 24static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
 25			unsigned long end, struct mm_walk *walk)
 26{
 27#ifdef CONFIG_HUGETLB_PAGE
 28	unsigned char present;
 29	unsigned char *vec = walk->private;
 30
 31	/*
 32	 * Hugepages under user process are always in RAM and never
 33	 * swapped out, but theoretically it needs to be checked.
 34	 */
 35	present = pte && !huge_pte_none(huge_ptep_get(pte));
 36	for (; addr != end; vec++, addr += PAGE_SIZE)
 37		*vec = present;
 38	walk->private = vec;
 39#else
 40	BUG();
 41#endif
 42	return 0;
 43}
 44
 45/*
 46 * Later we can get more picky about what "in core" means precisely.
 47 * For now, simply check to see if the page is in the page cache,
 48 * and is up to date; i.e. that no page-in operation would be required
 49 * at this time if an application were to map and access this page.
 50 */
 51static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
 52{
 53	unsigned char present = 0;
 54	struct page *page;
 55
 56	/*
 57	 * When tmpfs swaps out a page from a file, any process mapping that
 58	 * file will not get a swp_entry_t in its pte, but rather it is like
 59	 * any other file mapping (ie. marked !present and faulted in with
 60	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
 61	 */
 62#ifdef CONFIG_SWAP
 63	if (shmem_mapping(mapping)) {
 64		page = find_get_entry(mapping, pgoff);
 65		/*
 66		 * shmem/tmpfs may return swap: account for swapcache
 67		 * page too.
 68		 */
 69		if (radix_tree_exceptional_entry(page)) {
 70			swp_entry_t swp = radix_to_swp_entry(page);
 71			page = find_get_page(swap_address_space(swp),
 72					     swp_offset(swp));
 73		}
 74	} else
 75		page = find_get_page(mapping, pgoff);
 76#else
 77	page = find_get_page(mapping, pgoff);
 78#endif
 79	if (page) {
 80		present = PageUptodate(page);
 81		put_page(page);
 82	}
 83
 84	return present;
 85}
 86
 87static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
 88				struct vm_area_struct *vma, unsigned char *vec)
 89{
 90	unsigned long nr = (end - addr) >> PAGE_SHIFT;
 91	int i;
 92
 93	if (vma->vm_file) {
 94		pgoff_t pgoff;
 95
 96		pgoff = linear_page_index(vma, addr);
 97		for (i = 0; i < nr; i++, pgoff++)
 98			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
 99	} else {
100		for (i = 0; i < nr; i++)
101			vec[i] = 0;
102	}
103	return nr;
104}
105
106static int mincore_unmapped_range(unsigned long addr, unsigned long end,
107				   struct mm_walk *walk)
108{
109	walk->private += __mincore_unmapped_range(addr, end,
110						  walk->vma, walk->private);
111	return 0;
112}
113
114static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
115			struct mm_walk *walk)
116{
117	spinlock_t *ptl;
118	struct vm_area_struct *vma = walk->vma;
119	pte_t *ptep;
120	unsigned char *vec = walk->private;
121	int nr = (end - addr) >> PAGE_SHIFT;
122
123	ptl = pmd_trans_huge_lock(pmd, vma);
124	if (ptl) {
125		memset(vec, 1, nr);
126		spin_unlock(ptl);
127		goto out;
128	}
129
130	if (pmd_trans_unstable(pmd)) {
131		__mincore_unmapped_range(addr, end, vma, vec);
132		goto out;
133	}
134
135	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
136	for (; addr != end; ptep++, addr += PAGE_SIZE) {
137		pte_t pte = *ptep;
138
139		if (pte_none(pte))
140			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
141						 vma, vec);
142		else if (pte_present(pte))
143			*vec = 1;
144		else { /* pte is a swap entry */
145			swp_entry_t entry = pte_to_swp_entry(pte);
146
147			if (non_swap_entry(entry)) {
148				/*
149				 * migration or hwpoison entries are always
150				 * uptodate
151				 */
152				*vec = 1;
153			} else {
154#ifdef CONFIG_SWAP
155				*vec = mincore_page(swap_address_space(entry),
156						    swp_offset(entry));
157#else
158				WARN_ON(1);
159				*vec = 1;
160#endif
161			}
162		}
163		vec++;
164	}
165	pte_unmap_unlock(ptep - 1, ptl);
166out:
167	walk->private += nr;
168	cond_resched();
169	return 0;
170}
171
172/*
173 * Do a chunk of "sys_mincore()". We've already checked
174 * all the arguments, we hold the mmap semaphore: we should
175 * just return the amount of info we're asked for.
176 */
177static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
178{
179	struct vm_area_struct *vma;
180	unsigned long end;
181	int err;
182	struct mm_walk mincore_walk = {
183		.pmd_entry = mincore_pte_range,
184		.pte_hole = mincore_unmapped_range,
185		.hugetlb_entry = mincore_hugetlb,
186		.private = vec,
187	};
188
189	vma = find_vma(current->mm, addr);
190	if (!vma || addr < vma->vm_start)
191		return -ENOMEM;
192	mincore_walk.mm = vma->vm_mm;
193	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
194	err = walk_page_range(addr, end, &mincore_walk);
195	if (err < 0)
196		return err;
197	return (end - addr) >> PAGE_SHIFT;
198}
199
200/*
201 * The mincore(2) system call.
202 *
203 * mincore() returns the memory residency status of the pages in the
204 * current process's address space specified by [addr, addr + len).
205 * The status is returned in a vector of bytes.  The least significant
206 * bit of each byte is 1 if the referenced page is in memory, otherwise
207 * it is zero.
208 *
209 * Because the status of a page can change after mincore() checks it
210 * but before it returns to the application, the returned vector may
211 * contain stale information.  Only locked pages are guaranteed to
212 * remain in memory.
213 *
214 * return values:
215 *  zero    - success
216 *  -EFAULT - vec points to an illegal address
217 *  -EINVAL - addr is not a multiple of PAGE_SIZE
218 *  -ENOMEM - Addresses in the range [addr, addr + len] are
219 *		invalid for the address space of this process, or
220 *		specify one or more pages which are not currently
221 *		mapped
222 *  -EAGAIN - A kernel resource was temporarily unavailable.
223 */
224SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
225		unsigned char __user *, vec)
226{
227	long retval;
228	unsigned long pages;
229	unsigned char *tmp;
230
231	/* Check the start address: needs to be page-aligned.. */
232	if (start & ~PAGE_MASK)
233		return -EINVAL;
234
235	/* ..and we need to be passed a valid user-space range */
236	if (!access_ok(VERIFY_READ, (void __user *) start, len))
237		return -ENOMEM;
238
239	/* This also avoids any overflows on PAGE_ALIGN */
240	pages = len >> PAGE_SHIFT;
241	pages += (offset_in_page(len)) != 0;
242
243	if (!access_ok(VERIFY_WRITE, vec, pages))
244		return -EFAULT;
245
246	tmp = (void *) __get_free_page(GFP_USER);
247	if (!tmp)
248		return -EAGAIN;
249
250	retval = 0;
251	while (pages) {
252		/*
253		 * Do at most PAGE_SIZE entries per iteration, due to
254		 * the temporary buffer size.
255		 */
256		down_read(&current->mm->mmap_sem);
257		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
258		up_read(&current->mm->mmap_sem);
259
260		if (retval <= 0)
261			break;
262		if (copy_to_user(vec, tmp, retval)) {
263			retval = -EFAULT;
264			break;
265		}
266		pages -= retval;
267		vec += retval;
268		start += retval << PAGE_SHIFT;
269		retval = 0;
270	}
271	free_page((unsigned long) tmp);
272	return retval;
273}
v4.6
 
  1/*
  2 *	linux/mm/mincore.c
  3 *
  4 * Copyright (C) 1994-2006  Linus Torvalds
  5 */
  6
  7/*
  8 * The mincore() system call.
  9 */
 10#include <linux/pagemap.h>
 11#include <linux/gfp.h>
 12#include <linux/mm.h>
 13#include <linux/mman.h>
 14#include <linux/syscalls.h>
 15#include <linux/swap.h>
 16#include <linux/swapops.h>
 
 17#include <linux/hugetlb.h>
 18
 19#include <asm/uaccess.h>
 20#include <asm/pgtable.h>
 21
 22static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
 23			unsigned long end, struct mm_walk *walk)
 24{
 25#ifdef CONFIG_HUGETLB_PAGE
 26	unsigned char present;
 27	unsigned char *vec = walk->private;
 28
 29	/*
 30	 * Hugepages under user process are always in RAM and never
 31	 * swapped out, but theoretically it needs to be checked.
 32	 */
 33	present = pte && !huge_pte_none(huge_ptep_get(pte));
 34	for (; addr != end; vec++, addr += PAGE_SIZE)
 35		*vec = present;
 36	walk->private = vec;
 37#else
 38	BUG();
 39#endif
 40	return 0;
 41}
 42
 43/*
 44 * Later we can get more picky about what "in core" means precisely.
 45 * For now, simply check to see if the page is in the page cache,
 46 * and is up to date; i.e. that no page-in operation would be required
 47 * at this time if an application were to map and access this page.
 48 */
 49static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
 50{
 51	unsigned char present = 0;
 52	struct page *page;
 53
 54	/*
 55	 * When tmpfs swaps out a page from a file, any process mapping that
 56	 * file will not get a swp_entry_t in its pte, but rather it is like
 57	 * any other file mapping (ie. marked !present and faulted in with
 58	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
 59	 */
 60#ifdef CONFIG_SWAP
 61	if (shmem_mapping(mapping)) {
 62		page = find_get_entry(mapping, pgoff);
 63		/*
 64		 * shmem/tmpfs may return swap: account for swapcache
 65		 * page too.
 66		 */
 67		if (radix_tree_exceptional_entry(page)) {
 68			swp_entry_t swp = radix_to_swp_entry(page);
 69			page = find_get_page(swap_address_space(swp), swp.val);
 
 70		}
 71	} else
 72		page = find_get_page(mapping, pgoff);
 73#else
 74	page = find_get_page(mapping, pgoff);
 75#endif
 76	if (page) {
 77		present = PageUptodate(page);
 78		put_page(page);
 79	}
 80
 81	return present;
 82}
 83
 84static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
 85				struct vm_area_struct *vma, unsigned char *vec)
 86{
 87	unsigned long nr = (end - addr) >> PAGE_SHIFT;
 88	int i;
 89
 90	if (vma->vm_file) {
 91		pgoff_t pgoff;
 92
 93		pgoff = linear_page_index(vma, addr);
 94		for (i = 0; i < nr; i++, pgoff++)
 95			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
 96	} else {
 97		for (i = 0; i < nr; i++)
 98			vec[i] = 0;
 99	}
100	return nr;
101}
102
103static int mincore_unmapped_range(unsigned long addr, unsigned long end,
104				   struct mm_walk *walk)
105{
106	walk->private += __mincore_unmapped_range(addr, end,
107						  walk->vma, walk->private);
108	return 0;
109}
110
111static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
112			struct mm_walk *walk)
113{
114	spinlock_t *ptl;
115	struct vm_area_struct *vma = walk->vma;
116	pte_t *ptep;
117	unsigned char *vec = walk->private;
118	int nr = (end - addr) >> PAGE_SHIFT;
119
120	ptl = pmd_trans_huge_lock(pmd, vma);
121	if (ptl) {
122		memset(vec, 1, nr);
123		spin_unlock(ptl);
124		goto out;
125	}
126
127	if (pmd_trans_unstable(pmd)) {
128		__mincore_unmapped_range(addr, end, vma, vec);
129		goto out;
130	}
131
132	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
133	for (; addr != end; ptep++, addr += PAGE_SIZE) {
134		pte_t pte = *ptep;
135
136		if (pte_none(pte))
137			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
138						 vma, vec);
139		else if (pte_present(pte))
140			*vec = 1;
141		else { /* pte is a swap entry */
142			swp_entry_t entry = pte_to_swp_entry(pte);
143
144			if (non_swap_entry(entry)) {
145				/*
146				 * migration or hwpoison entries are always
147				 * uptodate
148				 */
149				*vec = 1;
150			} else {
151#ifdef CONFIG_SWAP
152				*vec = mincore_page(swap_address_space(entry),
153					entry.val);
154#else
155				WARN_ON(1);
156				*vec = 1;
157#endif
158			}
159		}
160		vec++;
161	}
162	pte_unmap_unlock(ptep - 1, ptl);
163out:
164	walk->private += nr;
165	cond_resched();
166	return 0;
167}
168
169/*
170 * Do a chunk of "sys_mincore()". We've already checked
171 * all the arguments, we hold the mmap semaphore: we should
172 * just return the amount of info we're asked for.
173 */
174static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
175{
176	struct vm_area_struct *vma;
177	unsigned long end;
178	int err;
179	struct mm_walk mincore_walk = {
180		.pmd_entry = mincore_pte_range,
181		.pte_hole = mincore_unmapped_range,
182		.hugetlb_entry = mincore_hugetlb,
183		.private = vec,
184	};
185
186	vma = find_vma(current->mm, addr);
187	if (!vma || addr < vma->vm_start)
188		return -ENOMEM;
189	mincore_walk.mm = vma->vm_mm;
190	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
191	err = walk_page_range(addr, end, &mincore_walk);
192	if (err < 0)
193		return err;
194	return (end - addr) >> PAGE_SHIFT;
195}
196
197/*
198 * The mincore(2) system call.
199 *
200 * mincore() returns the memory residency status of the pages in the
201 * current process's address space specified by [addr, addr + len).
202 * The status is returned in a vector of bytes.  The least significant
203 * bit of each byte is 1 if the referenced page is in memory, otherwise
204 * it is zero.
205 *
206 * Because the status of a page can change after mincore() checks it
207 * but before it returns to the application, the returned vector may
208 * contain stale information.  Only locked pages are guaranteed to
209 * remain in memory.
210 *
211 * return values:
212 *  zero    - success
213 *  -EFAULT - vec points to an illegal address
214 *  -EINVAL - addr is not a multiple of PAGE_SIZE
215 *  -ENOMEM - Addresses in the range [addr, addr + len] are
216 *		invalid for the address space of this process, or
217 *		specify one or more pages which are not currently
218 *		mapped
219 *  -EAGAIN - A kernel resource was temporarily unavailable.
220 */
221SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
222		unsigned char __user *, vec)
223{
224	long retval;
225	unsigned long pages;
226	unsigned char *tmp;
227
228	/* Check the start address: needs to be page-aligned.. */
229	if (start & ~PAGE_MASK)
230		return -EINVAL;
231
232	/* ..and we need to be passed a valid user-space range */
233	if (!access_ok(VERIFY_READ, (void __user *) start, len))
234		return -ENOMEM;
235
236	/* This also avoids any overflows on PAGE_ALIGN */
237	pages = len >> PAGE_SHIFT;
238	pages += (offset_in_page(len)) != 0;
239
240	if (!access_ok(VERIFY_WRITE, vec, pages))
241		return -EFAULT;
242
243	tmp = (void *) __get_free_page(GFP_USER);
244	if (!tmp)
245		return -EAGAIN;
246
247	retval = 0;
248	while (pages) {
249		/*
250		 * Do at most PAGE_SIZE entries per iteration, due to
251		 * the temporary buffer size.
252		 */
253		down_read(&current->mm->mmap_sem);
254		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
255		up_read(&current->mm->mmap_sem);
256
257		if (retval <= 0)
258			break;
259		if (copy_to_user(vec, tmp, retval)) {
260			retval = -EFAULT;
261			break;
262		}
263		pages -= retval;
264		vec += retval;
265		start += retval << PAGE_SHIFT;
266		retval = 0;
267	}
268	free_page((unsigned long) tmp);
269	return retval;
270}