Loading...
1/*
2 * linux/mm/mincore.c
3 *
4 * Copyright (C) 1994-2006 Linus Torvalds
5 */
6
7/*
8 * The mincore() system call.
9 */
10#include <linux/pagemap.h>
11#include <linux/gfp.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/syscalls.h>
15#include <linux/swap.h>
16#include <linux/swapops.h>
17#include <linux/hugetlb.h>
18
19#include <asm/uaccess.h>
20#include <asm/pgtable.h>
21
22static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23 unsigned long addr, unsigned long end,
24 unsigned char *vec)
25{
26#ifdef CONFIG_HUGETLB_PAGE
27 struct hstate *h;
28
29 h = hstate_vma(vma);
30 while (1) {
31 unsigned char present;
32 pte_t *ptep;
33 /*
34 * Huge pages are always in RAM for now, but
35 * theoretically it needs to be checked.
36 */
37 ptep = huge_pte_offset(current->mm,
38 addr & huge_page_mask(h));
39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
40 while (1) {
41 *vec = present;
42 vec++;
43 addr += PAGE_SIZE;
44 if (addr == end)
45 return;
46 /* check hugepage border */
47 if (!(addr & ~huge_page_mask(h)))
48 break;
49 }
50 }
51#else
52 BUG();
53#endif
54}
55
56/*
57 * Later we can get more picky about what "in core" means precisely.
58 * For now, simply check to see if the page is in the page cache,
59 * and is up to date; i.e. that no page-in operation would be required
60 * at this time if an application were to map and access this page.
61 */
62static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
63{
64 unsigned char present = 0;
65 struct page *page;
66
67 /*
68 * When tmpfs swaps out a page from a file, any process mapping that
69 * file will not get a swp_entry_t in its pte, but rather it is like
70 * any other file mapping (ie. marked !present and faulted in with
71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
72 */
73 page = find_get_page(mapping, pgoff);
74#ifdef CONFIG_SWAP
75 /* shmem/tmpfs may return swap: account for swapcache page too. */
76 if (radix_tree_exceptional_entry(page)) {
77 swp_entry_t swap = radix_to_swp_entry(page);
78 page = find_get_page(&swapper_space, swap.val);
79 }
80#endif
81 if (page) {
82 present = PageUptodate(page);
83 page_cache_release(page);
84 }
85
86 return present;
87}
88
89static void mincore_unmapped_range(struct vm_area_struct *vma,
90 unsigned long addr, unsigned long end,
91 unsigned char *vec)
92{
93 unsigned long nr = (end - addr) >> PAGE_SHIFT;
94 int i;
95
96 if (vma->vm_file) {
97 pgoff_t pgoff;
98
99 pgoff = linear_page_index(vma, addr);
100 for (i = 0; i < nr; i++, pgoff++)
101 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
102 } else {
103 for (i = 0; i < nr; i++)
104 vec[i] = 0;
105 }
106}
107
108static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
109 unsigned long addr, unsigned long end,
110 unsigned char *vec)
111{
112 unsigned long next;
113 spinlock_t *ptl;
114 pte_t *ptep;
115
116 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
117 do {
118 pte_t pte = *ptep;
119 pgoff_t pgoff;
120
121 next = addr + PAGE_SIZE;
122 if (pte_none(pte))
123 mincore_unmapped_range(vma, addr, next, vec);
124 else if (pte_present(pte))
125 *vec = 1;
126 else if (pte_file(pte)) {
127 pgoff = pte_to_pgoff(pte);
128 *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
129 } else { /* pte is a swap entry */
130 swp_entry_t entry = pte_to_swp_entry(pte);
131
132 if (is_migration_entry(entry)) {
133 /* migration entries are always uptodate */
134 *vec = 1;
135 } else {
136#ifdef CONFIG_SWAP
137 pgoff = entry.val;
138 *vec = mincore_page(&swapper_space, pgoff);
139#else
140 WARN_ON(1);
141 *vec = 1;
142#endif
143 }
144 }
145 vec++;
146 } while (ptep++, addr = next, addr != end);
147 pte_unmap_unlock(ptep - 1, ptl);
148}
149
150static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
151 unsigned long addr, unsigned long end,
152 unsigned char *vec)
153{
154 unsigned long next;
155 pmd_t *pmd;
156
157 pmd = pmd_offset(pud, addr);
158 do {
159 next = pmd_addr_end(addr, end);
160 if (pmd_trans_huge(*pmd)) {
161 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
162 vec += (next - addr) >> PAGE_SHIFT;
163 continue;
164 }
165 /* fall through */
166 }
167 if (pmd_none_or_clear_bad(pmd))
168 mincore_unmapped_range(vma, addr, next, vec);
169 else
170 mincore_pte_range(vma, pmd, addr, next, vec);
171 vec += (next - addr) >> PAGE_SHIFT;
172 } while (pmd++, addr = next, addr != end);
173}
174
175static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
176 unsigned long addr, unsigned long end,
177 unsigned char *vec)
178{
179 unsigned long next;
180 pud_t *pud;
181
182 pud = pud_offset(pgd, addr);
183 do {
184 next = pud_addr_end(addr, end);
185 if (pud_none_or_clear_bad(pud))
186 mincore_unmapped_range(vma, addr, next, vec);
187 else
188 mincore_pmd_range(vma, pud, addr, next, vec);
189 vec += (next - addr) >> PAGE_SHIFT;
190 } while (pud++, addr = next, addr != end);
191}
192
193static void mincore_page_range(struct vm_area_struct *vma,
194 unsigned long addr, unsigned long end,
195 unsigned char *vec)
196{
197 unsigned long next;
198 pgd_t *pgd;
199
200 pgd = pgd_offset(vma->vm_mm, addr);
201 do {
202 next = pgd_addr_end(addr, end);
203 if (pgd_none_or_clear_bad(pgd))
204 mincore_unmapped_range(vma, addr, next, vec);
205 else
206 mincore_pud_range(vma, pgd, addr, next, vec);
207 vec += (next - addr) >> PAGE_SHIFT;
208 } while (pgd++, addr = next, addr != end);
209}
210
211/*
212 * Do a chunk of "sys_mincore()". We've already checked
213 * all the arguments, we hold the mmap semaphore: we should
214 * just return the amount of info we're asked for.
215 */
216static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
217{
218 struct vm_area_struct *vma;
219 unsigned long end;
220
221 vma = find_vma(current->mm, addr);
222 if (!vma || addr < vma->vm_start)
223 return -ENOMEM;
224
225 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
226
227 if (is_vm_hugetlb_page(vma)) {
228 mincore_hugetlb_page_range(vma, addr, end, vec);
229 return (end - addr) >> PAGE_SHIFT;
230 }
231
232 end = pmd_addr_end(addr, end);
233
234 if (is_vm_hugetlb_page(vma))
235 mincore_hugetlb_page_range(vma, addr, end, vec);
236 else
237 mincore_page_range(vma, addr, end, vec);
238
239 return (end - addr) >> PAGE_SHIFT;
240}
241
242/*
243 * The mincore(2) system call.
244 *
245 * mincore() returns the memory residency status of the pages in the
246 * current process's address space specified by [addr, addr + len).
247 * The status is returned in a vector of bytes. The least significant
248 * bit of each byte is 1 if the referenced page is in memory, otherwise
249 * it is zero.
250 *
251 * Because the status of a page can change after mincore() checks it
252 * but before it returns to the application, the returned vector may
253 * contain stale information. Only locked pages are guaranteed to
254 * remain in memory.
255 *
256 * return values:
257 * zero - success
258 * -EFAULT - vec points to an illegal address
259 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
260 * -ENOMEM - Addresses in the range [addr, addr + len] are
261 * invalid for the address space of this process, or
262 * specify one or more pages which are not currently
263 * mapped
264 * -EAGAIN - A kernel resource was temporarily unavailable.
265 */
266SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
267 unsigned char __user *, vec)
268{
269 long retval;
270 unsigned long pages;
271 unsigned char *tmp;
272
273 /* Check the start address: needs to be page-aligned.. */
274 if (start & ~PAGE_CACHE_MASK)
275 return -EINVAL;
276
277 /* ..and we need to be passed a valid user-space range */
278 if (!access_ok(VERIFY_READ, (void __user *) start, len))
279 return -ENOMEM;
280
281 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
282 pages = len >> PAGE_SHIFT;
283 pages += (len & ~PAGE_MASK) != 0;
284
285 if (!access_ok(VERIFY_WRITE, vec, pages))
286 return -EFAULT;
287
288 tmp = (void *) __get_free_page(GFP_USER);
289 if (!tmp)
290 return -EAGAIN;
291
292 retval = 0;
293 while (pages) {
294 /*
295 * Do at most PAGE_SIZE entries per iteration, due to
296 * the temporary buffer size.
297 */
298 down_read(¤t->mm->mmap_sem);
299 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
300 up_read(¤t->mm->mmap_sem);
301
302 if (retval <= 0)
303 break;
304 if (copy_to_user(vec, tmp, retval)) {
305 retval = -EFAULT;
306 break;
307 }
308 pages -= retval;
309 vec += retval;
310 start += retval << PAGE_SHIFT;
311 retval = 0;
312 }
313 free_page((unsigned long) tmp);
314 return retval;
315}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/mincore.c
4 *
5 * Copyright (C) 1994-2006 Linus Torvalds
6 */
7
8/*
9 * The mincore() system call.
10 */
11#include <linux/pagemap.h>
12#include <linux/gfp.h>
13#include <linux/pagewalk.h>
14#include <linux/mman.h>
15#include <linux/syscalls.h>
16#include <linux/swap.h>
17#include <linux/swapops.h>
18#include <linux/shmem_fs.h>
19#include <linux/hugetlb.h>
20#include <linux/pgtable.h>
21
22#include <linux/uaccess.h>
23
24static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
25 unsigned long end, struct mm_walk *walk)
26{
27#ifdef CONFIG_HUGETLB_PAGE
28 unsigned char present;
29 unsigned char *vec = walk->private;
30
31 /*
32 * Hugepages under user process are always in RAM and never
33 * swapped out, but theoretically it needs to be checked.
34 */
35 present = pte && !huge_pte_none(huge_ptep_get(pte));
36 for (; addr != end; vec++, addr += PAGE_SIZE)
37 *vec = present;
38 walk->private = vec;
39#else
40 BUG();
41#endif
42 return 0;
43}
44
45/*
46 * Later we can get more picky about what "in core" means precisely.
47 * For now, simply check to see if the page is in the page cache,
48 * and is up to date; i.e. that no page-in operation would be required
49 * at this time if an application were to map and access this page.
50 */
51static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
52{
53 unsigned char present = 0;
54 struct page *page;
55
56 /*
57 * When tmpfs swaps out a page from a file, any process mapping that
58 * file will not get a swp_entry_t in its pte, but rather it is like
59 * any other file mapping (ie. marked !present and faulted in with
60 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
61 */
62#ifdef CONFIG_SWAP
63 if (shmem_mapping(mapping)) {
64 page = find_get_entry(mapping, pgoff);
65 /*
66 * shmem/tmpfs may return swap: account for swapcache
67 * page too.
68 */
69 if (xa_is_value(page)) {
70 swp_entry_t swp = radix_to_swp_entry(page);
71 struct swap_info_struct *si;
72
73 /* Prevent swap device to being swapoff under us */
74 si = get_swap_device(swp);
75 if (si) {
76 page = find_get_page(swap_address_space(swp),
77 swp_offset(swp));
78 put_swap_device(si);
79 } else
80 page = NULL;
81 }
82 } else
83 page = find_get_page(mapping, pgoff);
84#else
85 page = find_get_page(mapping, pgoff);
86#endif
87 if (page) {
88 present = PageUptodate(page);
89 put_page(page);
90 }
91
92 return present;
93}
94
95static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
96 struct vm_area_struct *vma, unsigned char *vec)
97{
98 unsigned long nr = (end - addr) >> PAGE_SHIFT;
99 int i;
100
101 if (vma->vm_file) {
102 pgoff_t pgoff;
103
104 pgoff = linear_page_index(vma, addr);
105 for (i = 0; i < nr; i++, pgoff++)
106 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
107 } else {
108 for (i = 0; i < nr; i++)
109 vec[i] = 0;
110 }
111 return nr;
112}
113
114static int mincore_unmapped_range(unsigned long addr, unsigned long end,
115 __always_unused int depth,
116 struct mm_walk *walk)
117{
118 walk->private += __mincore_unmapped_range(addr, end,
119 walk->vma, walk->private);
120 return 0;
121}
122
123static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
124 struct mm_walk *walk)
125{
126 spinlock_t *ptl;
127 struct vm_area_struct *vma = walk->vma;
128 pte_t *ptep;
129 unsigned char *vec = walk->private;
130 int nr = (end - addr) >> PAGE_SHIFT;
131
132 ptl = pmd_trans_huge_lock(pmd, vma);
133 if (ptl) {
134 memset(vec, 1, nr);
135 spin_unlock(ptl);
136 goto out;
137 }
138
139 if (pmd_trans_unstable(pmd)) {
140 __mincore_unmapped_range(addr, end, vma, vec);
141 goto out;
142 }
143
144 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
145 for (; addr != end; ptep++, addr += PAGE_SIZE) {
146 pte_t pte = *ptep;
147
148 if (pte_none(pte))
149 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
150 vma, vec);
151 else if (pte_present(pte))
152 *vec = 1;
153 else { /* pte is a swap entry */
154 swp_entry_t entry = pte_to_swp_entry(pte);
155
156 if (non_swap_entry(entry)) {
157 /*
158 * migration or hwpoison entries are always
159 * uptodate
160 */
161 *vec = 1;
162 } else {
163#ifdef CONFIG_SWAP
164 *vec = mincore_page(swap_address_space(entry),
165 swp_offset(entry));
166#else
167 WARN_ON(1);
168 *vec = 1;
169#endif
170 }
171 }
172 vec++;
173 }
174 pte_unmap_unlock(ptep - 1, ptl);
175out:
176 walk->private += nr;
177 cond_resched();
178 return 0;
179}
180
181static inline bool can_do_mincore(struct vm_area_struct *vma)
182{
183 if (vma_is_anonymous(vma))
184 return true;
185 if (!vma->vm_file)
186 return false;
187 /*
188 * Reveal pagecache information only for non-anonymous mappings that
189 * correspond to the files the calling process could (if tried) open
190 * for writing; otherwise we'd be including shared non-exclusive
191 * mappings, which opens a side channel.
192 */
193 return inode_owner_or_capable(file_inode(vma->vm_file)) ||
194 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
195}
196
197static const struct mm_walk_ops mincore_walk_ops = {
198 .pmd_entry = mincore_pte_range,
199 .pte_hole = mincore_unmapped_range,
200 .hugetlb_entry = mincore_hugetlb,
201};
202
203/*
204 * Do a chunk of "sys_mincore()". We've already checked
205 * all the arguments, we hold the mmap semaphore: we should
206 * just return the amount of info we're asked for.
207 */
208static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
209{
210 struct vm_area_struct *vma;
211 unsigned long end;
212 int err;
213
214 vma = find_vma(current->mm, addr);
215 if (!vma || addr < vma->vm_start)
216 return -ENOMEM;
217 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
218 if (!can_do_mincore(vma)) {
219 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
220 memset(vec, 1, pages);
221 return pages;
222 }
223 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
224 if (err < 0)
225 return err;
226 return (end - addr) >> PAGE_SHIFT;
227}
228
229/*
230 * The mincore(2) system call.
231 *
232 * mincore() returns the memory residency status of the pages in the
233 * current process's address space specified by [addr, addr + len).
234 * The status is returned in a vector of bytes. The least significant
235 * bit of each byte is 1 if the referenced page is in memory, otherwise
236 * it is zero.
237 *
238 * Because the status of a page can change after mincore() checks it
239 * but before it returns to the application, the returned vector may
240 * contain stale information. Only locked pages are guaranteed to
241 * remain in memory.
242 *
243 * return values:
244 * zero - success
245 * -EFAULT - vec points to an illegal address
246 * -EINVAL - addr is not a multiple of PAGE_SIZE
247 * -ENOMEM - Addresses in the range [addr, addr + len] are
248 * invalid for the address space of this process, or
249 * specify one or more pages which are not currently
250 * mapped
251 * -EAGAIN - A kernel resource was temporarily unavailable.
252 */
253SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
254 unsigned char __user *, vec)
255{
256 long retval;
257 unsigned long pages;
258 unsigned char *tmp;
259
260 start = untagged_addr(start);
261
262 /* Check the start address: needs to be page-aligned.. */
263 if (start & ~PAGE_MASK)
264 return -EINVAL;
265
266 /* ..and we need to be passed a valid user-space range */
267 if (!access_ok((void __user *) start, len))
268 return -ENOMEM;
269
270 /* This also avoids any overflows on PAGE_ALIGN */
271 pages = len >> PAGE_SHIFT;
272 pages += (offset_in_page(len)) != 0;
273
274 if (!access_ok(vec, pages))
275 return -EFAULT;
276
277 tmp = (void *) __get_free_page(GFP_USER);
278 if (!tmp)
279 return -EAGAIN;
280
281 retval = 0;
282 while (pages) {
283 /*
284 * Do at most PAGE_SIZE entries per iteration, due to
285 * the temporary buffer size.
286 */
287 mmap_read_lock(current->mm);
288 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
289 mmap_read_unlock(current->mm);
290
291 if (retval <= 0)
292 break;
293 if (copy_to_user(vec, tmp, retval)) {
294 retval = -EFAULT;
295 break;
296 }
297 pages -= retval;
298 vec += retval;
299 start += retval << PAGE_SHIFT;
300 retval = 0;
301 }
302 free_page((unsigned long) tmp);
303 return retval;
304}