Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/pagewalk.h>
3#include <linux/highmem.h>
4#include <linux/sched.h>
5#include <linux/hugetlb.h>
6
7/*
8 * We want to know the real level where a entry is located ignoring any
9 * folding of levels which may be happening. For example if p4d is folded then
10 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
11 */
12static int real_depth(int depth)
13{
14 if (depth == 3 && PTRS_PER_PMD == 1)
15 depth = 2;
16 if (depth == 2 && PTRS_PER_PUD == 1)
17 depth = 1;
18 if (depth == 1 && PTRS_PER_P4D == 1)
19 depth = 0;
20 return depth;
21}
22
23static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
24 unsigned long end, struct mm_walk *walk)
25{
26 const struct mm_walk_ops *ops = walk->ops;
27 int err = 0;
28
29 for (;;) {
30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
31 if (err)
32 break;
33 if (addr >= end - PAGE_SIZE)
34 break;
35 addr += PAGE_SIZE;
36 pte++;
37 }
38 return err;
39}
40
41static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
42 struct mm_walk *walk)
43{
44 pte_t *pte;
45 int err = 0;
46 spinlock_t *ptl;
47
48 if (walk->no_vma) {
49 pte = pte_offset_map(pmd, addr);
50 err = walk_pte_range_inner(pte, addr, end, walk);
51 pte_unmap(pte);
52 } else {
53 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
54 err = walk_pte_range_inner(pte, addr, end, walk);
55 pte_unmap_unlock(pte, ptl);
56 }
57
58 return err;
59}
60
61static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
62 struct mm_walk *walk)
63{
64 pmd_t *pmd;
65 unsigned long next;
66 const struct mm_walk_ops *ops = walk->ops;
67 int err = 0;
68 int depth = real_depth(3);
69
70 pmd = pmd_offset(pud, addr);
71 do {
72again:
73 next = pmd_addr_end(addr, end);
74 if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
75 if (ops->pte_hole)
76 err = ops->pte_hole(addr, next, depth, walk);
77 if (err)
78 break;
79 continue;
80 }
81
82 walk->action = ACTION_SUBTREE;
83
84 /*
85 * This implies that each ->pmd_entry() handler
86 * needs to know about pmd_trans_huge() pmds
87 */
88 if (ops->pmd_entry)
89 err = ops->pmd_entry(pmd, addr, next, walk);
90 if (err)
91 break;
92
93 if (walk->action == ACTION_AGAIN)
94 goto again;
95
96 /*
97 * Check this here so we only break down trans_huge
98 * pages when we _need_ to
99 */
100 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
101 walk->action == ACTION_CONTINUE ||
102 !(ops->pte_entry))
103 continue;
104
105 if (walk->vma) {
106 split_huge_pmd(walk->vma, pmd, addr);
107 if (pmd_trans_unstable(pmd))
108 goto again;
109 }
110
111 err = walk_pte_range(pmd, addr, next, walk);
112 if (err)
113 break;
114 } while (pmd++, addr = next, addr != end);
115
116 return err;
117}
118
119static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
120 struct mm_walk *walk)
121{
122 pud_t *pud;
123 unsigned long next;
124 const struct mm_walk_ops *ops = walk->ops;
125 int err = 0;
126 int depth = real_depth(2);
127
128 pud = pud_offset(p4d, addr);
129 do {
130 again:
131 next = pud_addr_end(addr, end);
132 if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
133 if (ops->pte_hole)
134 err = ops->pte_hole(addr, next, depth, walk);
135 if (err)
136 break;
137 continue;
138 }
139
140 walk->action = ACTION_SUBTREE;
141
142 if (ops->pud_entry)
143 err = ops->pud_entry(pud, addr, next, walk);
144 if (err)
145 break;
146
147 if (walk->action == ACTION_AGAIN)
148 goto again;
149
150 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
151 walk->action == ACTION_CONTINUE ||
152 !(ops->pmd_entry || ops->pte_entry))
153 continue;
154
155 if (walk->vma)
156 split_huge_pud(walk->vma, pud, addr);
157 if (pud_none(*pud))
158 goto again;
159
160 err = walk_pmd_range(pud, addr, next, walk);
161 if (err)
162 break;
163 } while (pud++, addr = next, addr != end);
164
165 return err;
166}
167
168static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
169 struct mm_walk *walk)
170{
171 p4d_t *p4d;
172 unsigned long next;
173 const struct mm_walk_ops *ops = walk->ops;
174 int err = 0;
175 int depth = real_depth(1);
176
177 p4d = p4d_offset(pgd, addr);
178 do {
179 next = p4d_addr_end(addr, end);
180 if (p4d_none_or_clear_bad(p4d)) {
181 if (ops->pte_hole)
182 err = ops->pte_hole(addr, next, depth, walk);
183 if (err)
184 break;
185 continue;
186 }
187 if (ops->p4d_entry) {
188 err = ops->p4d_entry(p4d, addr, next, walk);
189 if (err)
190 break;
191 }
192 if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
193 err = walk_pud_range(p4d, addr, next, walk);
194 if (err)
195 break;
196 } while (p4d++, addr = next, addr != end);
197
198 return err;
199}
200
201static int walk_pgd_range(unsigned long addr, unsigned long end,
202 struct mm_walk *walk)
203{
204 pgd_t *pgd;
205 unsigned long next;
206 const struct mm_walk_ops *ops = walk->ops;
207 int err = 0;
208
209 if (walk->pgd)
210 pgd = walk->pgd + pgd_index(addr);
211 else
212 pgd = pgd_offset(walk->mm, addr);
213 do {
214 next = pgd_addr_end(addr, end);
215 if (pgd_none_or_clear_bad(pgd)) {
216 if (ops->pte_hole)
217 err = ops->pte_hole(addr, next, 0, walk);
218 if (err)
219 break;
220 continue;
221 }
222 if (ops->pgd_entry) {
223 err = ops->pgd_entry(pgd, addr, next, walk);
224 if (err)
225 break;
226 }
227 if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
228 ops->pte_entry)
229 err = walk_p4d_range(pgd, addr, next, walk);
230 if (err)
231 break;
232 } while (pgd++, addr = next, addr != end);
233
234 return err;
235}
236
237#ifdef CONFIG_HUGETLB_PAGE
238static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
239 unsigned long end)
240{
241 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
242 return boundary < end ? boundary : end;
243}
244
245static int walk_hugetlb_range(unsigned long addr, unsigned long end,
246 struct mm_walk *walk)
247{
248 struct vm_area_struct *vma = walk->vma;
249 struct hstate *h = hstate_vma(vma);
250 unsigned long next;
251 unsigned long hmask = huge_page_mask(h);
252 unsigned long sz = huge_page_size(h);
253 pte_t *pte;
254 const struct mm_walk_ops *ops = walk->ops;
255 int err = 0;
256
257 do {
258 next = hugetlb_entry_end(h, addr, end);
259 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
260
261 if (pte)
262 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
263 else if (ops->pte_hole)
264 err = ops->pte_hole(addr, next, -1, walk);
265
266 if (err)
267 break;
268 } while (addr = next, addr != end);
269
270 return err;
271}
272
273#else /* CONFIG_HUGETLB_PAGE */
274static int walk_hugetlb_range(unsigned long addr, unsigned long end,
275 struct mm_walk *walk)
276{
277 return 0;
278}
279
280#endif /* CONFIG_HUGETLB_PAGE */
281
282/*
283 * Decide whether we really walk over the current vma on [@start, @end)
284 * or skip it via the returned value. Return 0 if we do walk over the
285 * current vma, and return 1 if we skip the vma. Negative values means
286 * error, where we abort the current walk.
287 */
288static int walk_page_test(unsigned long start, unsigned long end,
289 struct mm_walk *walk)
290{
291 struct vm_area_struct *vma = walk->vma;
292 const struct mm_walk_ops *ops = walk->ops;
293
294 if (ops->test_walk)
295 return ops->test_walk(start, end, walk);
296
297 /*
298 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
299 * range, so we don't walk over it as we do for normal vmas. However,
300 * Some callers are interested in handling hole range and they don't
301 * want to just ignore any single address range. Such users certainly
302 * define their ->pte_hole() callbacks, so let's delegate them to handle
303 * vma(VM_PFNMAP).
304 */
305 if (vma->vm_flags & VM_PFNMAP) {
306 int err = 1;
307 if (ops->pte_hole)
308 err = ops->pte_hole(start, end, -1, walk);
309 return err ? err : 1;
310 }
311 return 0;
312}
313
314static int __walk_page_range(unsigned long start, unsigned long end,
315 struct mm_walk *walk)
316{
317 int err = 0;
318 struct vm_area_struct *vma = walk->vma;
319 const struct mm_walk_ops *ops = walk->ops;
320
321 if (vma && ops->pre_vma) {
322 err = ops->pre_vma(start, end, walk);
323 if (err)
324 return err;
325 }
326
327 if (vma && is_vm_hugetlb_page(vma)) {
328 if (ops->hugetlb_entry)
329 err = walk_hugetlb_range(start, end, walk);
330 } else
331 err = walk_pgd_range(start, end, walk);
332
333 if (vma && ops->post_vma)
334 ops->post_vma(walk);
335
336 return err;
337}
338
339/**
340 * walk_page_range - walk page table with caller specific callbacks
341 * @mm: mm_struct representing the target process of page table walk
342 * @start: start address of the virtual address range
343 * @end: end address of the virtual address range
344 * @ops: operation to call during the walk
345 * @private: private data for callbacks' usage
346 *
347 * Recursively walk the page table tree of the process represented by @mm
348 * within the virtual address range [@start, @end). During walking, we can do
349 * some caller-specific works for each entry, by setting up pmd_entry(),
350 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
351 * callbacks, the associated entries/pages are just ignored.
352 * The return values of these callbacks are commonly defined like below:
353 *
354 * - 0 : succeeded to handle the current entry, and if you don't reach the
355 * end address yet, continue to walk.
356 * - >0 : succeeded to handle the current entry, and return to the caller
357 * with caller specific value.
358 * - <0 : failed to handle the current entry, and return to the caller
359 * with error code.
360 *
361 * Before starting to walk page table, some callers want to check whether
362 * they really want to walk over the current vma, typically by checking
363 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
364 * purpose.
365 *
366 * If operations need to be staged before and committed after a vma is walked,
367 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
368 * since it is intended to handle commit-type operations, can't return any
369 * errors.
370 *
371 * struct mm_walk keeps current values of some common data like vma and pmd,
372 * which are useful for the access from callbacks. If you want to pass some
373 * caller-specific data to callbacks, @private should be helpful.
374 *
375 * Locking:
376 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
377 * because these function traverse vma list and/or access to vma's data.
378 */
379int walk_page_range(struct mm_struct *mm, unsigned long start,
380 unsigned long end, const struct mm_walk_ops *ops,
381 void *private)
382{
383 int err = 0;
384 unsigned long next;
385 struct vm_area_struct *vma;
386 struct mm_walk walk = {
387 .ops = ops,
388 .mm = mm,
389 .private = private,
390 };
391
392 if (start >= end)
393 return -EINVAL;
394
395 if (!walk.mm)
396 return -EINVAL;
397
398 mmap_assert_locked(walk.mm);
399
400 vma = find_vma(walk.mm, start);
401 do {
402 if (!vma) { /* after the last vma */
403 walk.vma = NULL;
404 next = end;
405 } else if (start < vma->vm_start) { /* outside vma */
406 walk.vma = NULL;
407 next = min(end, vma->vm_start);
408 } else { /* inside vma */
409 walk.vma = vma;
410 next = min(end, vma->vm_end);
411 vma = vma->vm_next;
412
413 err = walk_page_test(start, next, &walk);
414 if (err > 0) {
415 /*
416 * positive return values are purely for
417 * controlling the pagewalk, so should never
418 * be passed to the callers.
419 */
420 err = 0;
421 continue;
422 }
423 if (err < 0)
424 break;
425 }
426 if (walk.vma || walk.ops->pte_hole)
427 err = __walk_page_range(start, next, &walk);
428 if (err)
429 break;
430 } while (start = next, start < end);
431 return err;
432}
433
434/*
435 * Similar to walk_page_range() but can walk any page tables even if they are
436 * not backed by VMAs. Because 'unusual' entries may be walked this function
437 * will also not lock the PTEs for the pte_entry() callback. This is useful for
438 * walking the kernel pages tables or page tables for firmware.
439 */
440int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
441 unsigned long end, const struct mm_walk_ops *ops,
442 pgd_t *pgd,
443 void *private)
444{
445 struct mm_walk walk = {
446 .ops = ops,
447 .mm = mm,
448 .pgd = pgd,
449 .private = private,
450 .no_vma = true
451 };
452
453 if (start >= end || !walk.mm)
454 return -EINVAL;
455
456 mmap_assert_locked(walk.mm);
457
458 return __walk_page_range(start, end, &walk);
459}
460
461int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
462 void *private)
463{
464 struct mm_walk walk = {
465 .ops = ops,
466 .mm = vma->vm_mm,
467 .vma = vma,
468 .private = private,
469 };
470 int err;
471
472 if (!walk.mm)
473 return -EINVAL;
474
475 mmap_assert_locked(walk.mm);
476
477 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
478 if (err > 0)
479 return 0;
480 if (err < 0)
481 return err;
482 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
483}
484
485/**
486 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
487 * @mapping: Pointer to the struct address_space
488 * @first_index: First page offset in the address_space
489 * @nr: Number of incremental page offsets to cover
490 * @ops: operation to call during the walk
491 * @private: private data for callbacks' usage
492 *
493 * This function walks all memory areas mapped into a struct address_space.
494 * The walk is limited to only the given page-size index range, but if
495 * the index boundaries cross a huge page-table entry, that entry will be
496 * included.
497 *
498 * Also see walk_page_range() for additional information.
499 *
500 * Locking:
501 * This function can't require that the struct mm_struct::mmap_lock is held,
502 * since @mapping may be mapped by multiple processes. Instead
503 * @mapping->i_mmap_rwsem must be held. This might have implications in the
504 * callbacks, and it's up tho the caller to ensure that the
505 * struct mm_struct::mmap_lock is not needed.
506 *
507 * Also this means that a caller can't rely on the struct
508 * vm_area_struct::vm_flags to be constant across a call,
509 * except for immutable flags. Callers requiring this shouldn't use
510 * this function.
511 *
512 * Return: 0 on success, negative error code on failure, positive number on
513 * caller defined premature termination.
514 */
515int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
516 pgoff_t nr, const struct mm_walk_ops *ops,
517 void *private)
518{
519 struct mm_walk walk = {
520 .ops = ops,
521 .private = private,
522 };
523 struct vm_area_struct *vma;
524 pgoff_t vba, vea, cba, cea;
525 unsigned long start_addr, end_addr;
526 int err = 0;
527
528 lockdep_assert_held(&mapping->i_mmap_rwsem);
529 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
530 first_index + nr - 1) {
531 /* Clip to the vma */
532 vba = vma->vm_pgoff;
533 vea = vba + vma_pages(vma);
534 cba = first_index;
535 cba = max(cba, vba);
536 cea = first_index + nr;
537 cea = min(cea, vea);
538
539 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
540 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
541 if (start_addr >= end_addr)
542 continue;
543
544 walk.vma = vma;
545 walk.mm = vma->vm_mm;
546
547 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
548 if (err > 0) {
549 err = 0;
550 break;
551 } else if (err < 0)
552 break;
553
554 err = __walk_page_range(start_addr, end_addr, &walk);
555 if (err)
556 break;
557 }
558
559 return err;
560}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/pagewalk.h>
3#include <linux/highmem.h>
4#include <linux/sched.h>
5#include <linux/hugetlb.h>
6
7/*
8 * We want to know the real level where a entry is located ignoring any
9 * folding of levels which may be happening. For example if p4d is folded then
10 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
11 */
12static int real_depth(int depth)
13{
14 if (depth == 3 && PTRS_PER_PMD == 1)
15 depth = 2;
16 if (depth == 2 && PTRS_PER_PUD == 1)
17 depth = 1;
18 if (depth == 1 && PTRS_PER_P4D == 1)
19 depth = 0;
20 return depth;
21}
22
23static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
24 unsigned long end, struct mm_walk *walk)
25{
26 const struct mm_walk_ops *ops = walk->ops;
27 int err = 0;
28
29 for (;;) {
30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
31 if (err)
32 break;
33 if (addr >= end - PAGE_SIZE)
34 break;
35 addr += PAGE_SIZE;
36 pte++;
37 }
38 return err;
39}
40
41static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
42 struct mm_walk *walk)
43{
44 pte_t *pte;
45 int err = 0;
46 spinlock_t *ptl;
47
48 if (walk->no_vma) {
49 pte = pte_offset_map(pmd, addr);
50 err = walk_pte_range_inner(pte, addr, end, walk);
51 pte_unmap(pte);
52 } else {
53 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
54 err = walk_pte_range_inner(pte, addr, end, walk);
55 pte_unmap_unlock(pte, ptl);
56 }
57
58 return err;
59}
60
61#ifdef CONFIG_ARCH_HAS_HUGEPD
62static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
63 unsigned long end, struct mm_walk *walk, int pdshift)
64{
65 int err = 0;
66 const struct mm_walk_ops *ops = walk->ops;
67 int shift = hugepd_shift(*phpd);
68 int page_size = 1 << shift;
69
70 if (!ops->pte_entry)
71 return 0;
72
73 if (addr & (page_size - 1))
74 return 0;
75
76 for (;;) {
77 pte_t *pte;
78
79 spin_lock(&walk->mm->page_table_lock);
80 pte = hugepte_offset(*phpd, addr, pdshift);
81 err = ops->pte_entry(pte, addr, addr + page_size, walk);
82 spin_unlock(&walk->mm->page_table_lock);
83
84 if (err)
85 break;
86 if (addr >= end - page_size)
87 break;
88 addr += page_size;
89 }
90 return err;
91}
92#else
93static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
94 unsigned long end, struct mm_walk *walk, int pdshift)
95{
96 return 0;
97}
98#endif
99
100static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
101 struct mm_walk *walk)
102{
103 pmd_t *pmd;
104 unsigned long next;
105 const struct mm_walk_ops *ops = walk->ops;
106 int err = 0;
107 int depth = real_depth(3);
108
109 pmd = pmd_offset(pud, addr);
110 do {
111again:
112 next = pmd_addr_end(addr, end);
113 if (pmd_none(*pmd)) {
114 if (ops->pte_hole)
115 err = ops->pte_hole(addr, next, depth, walk);
116 if (err)
117 break;
118 continue;
119 }
120
121 walk->action = ACTION_SUBTREE;
122
123 /*
124 * This implies that each ->pmd_entry() handler
125 * needs to know about pmd_trans_huge() pmds
126 */
127 if (ops->pmd_entry)
128 err = ops->pmd_entry(pmd, addr, next, walk);
129 if (err)
130 break;
131
132 if (walk->action == ACTION_AGAIN)
133 goto again;
134
135 /*
136 * Check this here so we only break down trans_huge
137 * pages when we _need_ to
138 */
139 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
140 walk->action == ACTION_CONTINUE ||
141 !(ops->pte_entry))
142 continue;
143
144 if (walk->vma) {
145 split_huge_pmd(walk->vma, pmd, addr);
146 if (pmd_trans_unstable(pmd))
147 goto again;
148 }
149
150 if (is_hugepd(__hugepd(pmd_val(*pmd))))
151 err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT);
152 else
153 err = walk_pte_range(pmd, addr, next, walk);
154 if (err)
155 break;
156 } while (pmd++, addr = next, addr != end);
157
158 return err;
159}
160
161static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
162 struct mm_walk *walk)
163{
164 pud_t *pud;
165 unsigned long next;
166 const struct mm_walk_ops *ops = walk->ops;
167 int err = 0;
168 int depth = real_depth(2);
169
170 pud = pud_offset(p4d, addr);
171 do {
172 again:
173 next = pud_addr_end(addr, end);
174 if (pud_none(*pud)) {
175 if (ops->pte_hole)
176 err = ops->pte_hole(addr, next, depth, walk);
177 if (err)
178 break;
179 continue;
180 }
181
182 walk->action = ACTION_SUBTREE;
183
184 if (ops->pud_entry)
185 err = ops->pud_entry(pud, addr, next, walk);
186 if (err)
187 break;
188
189 if (walk->action == ACTION_AGAIN)
190 goto again;
191
192 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
193 walk->action == ACTION_CONTINUE ||
194 !(ops->pmd_entry || ops->pte_entry))
195 continue;
196
197 if (walk->vma)
198 split_huge_pud(walk->vma, pud, addr);
199 if (pud_none(*pud))
200 goto again;
201
202 if (is_hugepd(__hugepd(pud_val(*pud))))
203 err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT);
204 else
205 err = walk_pmd_range(pud, addr, next, walk);
206 if (err)
207 break;
208 } while (pud++, addr = next, addr != end);
209
210 return err;
211}
212
213static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
214 struct mm_walk *walk)
215{
216 p4d_t *p4d;
217 unsigned long next;
218 const struct mm_walk_ops *ops = walk->ops;
219 int err = 0;
220 int depth = real_depth(1);
221
222 p4d = p4d_offset(pgd, addr);
223 do {
224 next = p4d_addr_end(addr, end);
225 if (p4d_none_or_clear_bad(p4d)) {
226 if (ops->pte_hole)
227 err = ops->pte_hole(addr, next, depth, walk);
228 if (err)
229 break;
230 continue;
231 }
232 if (ops->p4d_entry) {
233 err = ops->p4d_entry(p4d, addr, next, walk);
234 if (err)
235 break;
236 }
237 if (is_hugepd(__hugepd(p4d_val(*p4d))))
238 err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT);
239 else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
240 err = walk_pud_range(p4d, addr, next, walk);
241 if (err)
242 break;
243 } while (p4d++, addr = next, addr != end);
244
245 return err;
246}
247
248static int walk_pgd_range(unsigned long addr, unsigned long end,
249 struct mm_walk *walk)
250{
251 pgd_t *pgd;
252 unsigned long next;
253 const struct mm_walk_ops *ops = walk->ops;
254 int err = 0;
255
256 if (walk->pgd)
257 pgd = walk->pgd + pgd_index(addr);
258 else
259 pgd = pgd_offset(walk->mm, addr);
260 do {
261 next = pgd_addr_end(addr, end);
262 if (pgd_none_or_clear_bad(pgd)) {
263 if (ops->pte_hole)
264 err = ops->pte_hole(addr, next, 0, walk);
265 if (err)
266 break;
267 continue;
268 }
269 if (ops->pgd_entry) {
270 err = ops->pgd_entry(pgd, addr, next, walk);
271 if (err)
272 break;
273 }
274 if (is_hugepd(__hugepd(pgd_val(*pgd))))
275 err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT);
276 else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry)
277 err = walk_p4d_range(pgd, addr, next, walk);
278 if (err)
279 break;
280 } while (pgd++, addr = next, addr != end);
281
282 return err;
283}
284
285#ifdef CONFIG_HUGETLB_PAGE
286static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
287 unsigned long end)
288{
289 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
290 return boundary < end ? boundary : end;
291}
292
293static int walk_hugetlb_range(unsigned long addr, unsigned long end,
294 struct mm_walk *walk)
295{
296 struct vm_area_struct *vma = walk->vma;
297 struct hstate *h = hstate_vma(vma);
298 unsigned long next;
299 unsigned long hmask = huge_page_mask(h);
300 unsigned long sz = huge_page_size(h);
301 pte_t *pte;
302 const struct mm_walk_ops *ops = walk->ops;
303 int err = 0;
304
305 do {
306 next = hugetlb_entry_end(h, addr, end);
307 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
308
309 if (pte)
310 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
311 else if (ops->pte_hole)
312 err = ops->pte_hole(addr, next, -1, walk);
313
314 if (err)
315 break;
316 } while (addr = next, addr != end);
317
318 return err;
319}
320
321#else /* CONFIG_HUGETLB_PAGE */
322static int walk_hugetlb_range(unsigned long addr, unsigned long end,
323 struct mm_walk *walk)
324{
325 return 0;
326}
327
328#endif /* CONFIG_HUGETLB_PAGE */
329
330/*
331 * Decide whether we really walk over the current vma on [@start, @end)
332 * or skip it via the returned value. Return 0 if we do walk over the
333 * current vma, and return 1 if we skip the vma. Negative values means
334 * error, where we abort the current walk.
335 */
336static int walk_page_test(unsigned long start, unsigned long end,
337 struct mm_walk *walk)
338{
339 struct vm_area_struct *vma = walk->vma;
340 const struct mm_walk_ops *ops = walk->ops;
341
342 if (ops->test_walk)
343 return ops->test_walk(start, end, walk);
344
345 /*
346 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
347 * range, so we don't walk over it as we do for normal vmas. However,
348 * Some callers are interested in handling hole range and they don't
349 * want to just ignore any single address range. Such users certainly
350 * define their ->pte_hole() callbacks, so let's delegate them to handle
351 * vma(VM_PFNMAP).
352 */
353 if (vma->vm_flags & VM_PFNMAP) {
354 int err = 1;
355 if (ops->pte_hole)
356 err = ops->pte_hole(start, end, -1, walk);
357 return err ? err : 1;
358 }
359 return 0;
360}
361
362static int __walk_page_range(unsigned long start, unsigned long end,
363 struct mm_walk *walk)
364{
365 int err = 0;
366 struct vm_area_struct *vma = walk->vma;
367 const struct mm_walk_ops *ops = walk->ops;
368
369 if (ops->pre_vma) {
370 err = ops->pre_vma(start, end, walk);
371 if (err)
372 return err;
373 }
374
375 if (is_vm_hugetlb_page(vma)) {
376 if (ops->hugetlb_entry)
377 err = walk_hugetlb_range(start, end, walk);
378 } else
379 err = walk_pgd_range(start, end, walk);
380
381 if (ops->post_vma)
382 ops->post_vma(walk);
383
384 return err;
385}
386
387/**
388 * walk_page_range - walk page table with caller specific callbacks
389 * @mm: mm_struct representing the target process of page table walk
390 * @start: start address of the virtual address range
391 * @end: end address of the virtual address range
392 * @ops: operation to call during the walk
393 * @private: private data for callbacks' usage
394 *
395 * Recursively walk the page table tree of the process represented by @mm
396 * within the virtual address range [@start, @end). During walking, we can do
397 * some caller-specific works for each entry, by setting up pmd_entry(),
398 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
399 * callbacks, the associated entries/pages are just ignored.
400 * The return values of these callbacks are commonly defined like below:
401 *
402 * - 0 : succeeded to handle the current entry, and if you don't reach the
403 * end address yet, continue to walk.
404 * - >0 : succeeded to handle the current entry, and return to the caller
405 * with caller specific value.
406 * - <0 : failed to handle the current entry, and return to the caller
407 * with error code.
408 *
409 * Before starting to walk page table, some callers want to check whether
410 * they really want to walk over the current vma, typically by checking
411 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
412 * purpose.
413 *
414 * If operations need to be staged before and committed after a vma is walked,
415 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
416 * since it is intended to handle commit-type operations, can't return any
417 * errors.
418 *
419 * struct mm_walk keeps current values of some common data like vma and pmd,
420 * which are useful for the access from callbacks. If you want to pass some
421 * caller-specific data to callbacks, @private should be helpful.
422 *
423 * Locking:
424 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
425 * because these function traverse vma list and/or access to vma's data.
426 */
427int walk_page_range(struct mm_struct *mm, unsigned long start,
428 unsigned long end, const struct mm_walk_ops *ops,
429 void *private)
430{
431 int err = 0;
432 unsigned long next;
433 struct vm_area_struct *vma;
434 struct mm_walk walk = {
435 .ops = ops,
436 .mm = mm,
437 .private = private,
438 };
439
440 if (start >= end)
441 return -EINVAL;
442
443 if (!walk.mm)
444 return -EINVAL;
445
446 mmap_assert_locked(walk.mm);
447
448 vma = find_vma(walk.mm, start);
449 do {
450 if (!vma) { /* after the last vma */
451 walk.vma = NULL;
452 next = end;
453 if (ops->pte_hole)
454 err = ops->pte_hole(start, next, -1, &walk);
455 } else if (start < vma->vm_start) { /* outside vma */
456 walk.vma = NULL;
457 next = min(end, vma->vm_start);
458 if (ops->pte_hole)
459 err = ops->pte_hole(start, next, -1, &walk);
460 } else { /* inside vma */
461 walk.vma = vma;
462 next = min(end, vma->vm_end);
463 vma = find_vma(mm, vma->vm_end);
464
465 err = walk_page_test(start, next, &walk);
466 if (err > 0) {
467 /*
468 * positive return values are purely for
469 * controlling the pagewalk, so should never
470 * be passed to the callers.
471 */
472 err = 0;
473 continue;
474 }
475 if (err < 0)
476 break;
477 err = __walk_page_range(start, next, &walk);
478 }
479 if (err)
480 break;
481 } while (start = next, start < end);
482 return err;
483}
484
485/**
486 * walk_page_range_novma - walk a range of pagetables not backed by a vma
487 * @mm: mm_struct representing the target process of page table walk
488 * @start: start address of the virtual address range
489 * @end: end address of the virtual address range
490 * @ops: operation to call during the walk
491 * @pgd: pgd to walk if different from mm->pgd
492 * @private: private data for callbacks' usage
493 *
494 * Similar to walk_page_range() but can walk any page tables even if they are
495 * not backed by VMAs. Because 'unusual' entries may be walked this function
496 * will also not lock the PTEs for the pte_entry() callback. This is useful for
497 * walking the kernel pages tables or page tables for firmware.
498 */
499int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
500 unsigned long end, const struct mm_walk_ops *ops,
501 pgd_t *pgd,
502 void *private)
503{
504 struct mm_walk walk = {
505 .ops = ops,
506 .mm = mm,
507 .pgd = pgd,
508 .private = private,
509 .no_vma = true
510 };
511
512 if (start >= end || !walk.mm)
513 return -EINVAL;
514
515 mmap_assert_write_locked(walk.mm);
516
517 return walk_pgd_range(start, end, &walk);
518}
519
520int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
521 unsigned long end, const struct mm_walk_ops *ops,
522 void *private)
523{
524 struct mm_walk walk = {
525 .ops = ops,
526 .mm = vma->vm_mm,
527 .vma = vma,
528 .private = private,
529 };
530
531 if (start >= end || !walk.mm)
532 return -EINVAL;
533 if (start < vma->vm_start || end > vma->vm_end)
534 return -EINVAL;
535
536 mmap_assert_locked(walk.mm);
537 return __walk_page_range(start, end, &walk);
538}
539
540int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
541 void *private)
542{
543 struct mm_walk walk = {
544 .ops = ops,
545 .mm = vma->vm_mm,
546 .vma = vma,
547 .private = private,
548 };
549
550 if (!walk.mm)
551 return -EINVAL;
552
553 mmap_assert_locked(walk.mm);
554 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
555}
556
557/**
558 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
559 * @mapping: Pointer to the struct address_space
560 * @first_index: First page offset in the address_space
561 * @nr: Number of incremental page offsets to cover
562 * @ops: operation to call during the walk
563 * @private: private data for callbacks' usage
564 *
565 * This function walks all memory areas mapped into a struct address_space.
566 * The walk is limited to only the given page-size index range, but if
567 * the index boundaries cross a huge page-table entry, that entry will be
568 * included.
569 *
570 * Also see walk_page_range() for additional information.
571 *
572 * Locking:
573 * This function can't require that the struct mm_struct::mmap_lock is held,
574 * since @mapping may be mapped by multiple processes. Instead
575 * @mapping->i_mmap_rwsem must be held. This might have implications in the
576 * callbacks, and it's up tho the caller to ensure that the
577 * struct mm_struct::mmap_lock is not needed.
578 *
579 * Also this means that a caller can't rely on the struct
580 * vm_area_struct::vm_flags to be constant across a call,
581 * except for immutable flags. Callers requiring this shouldn't use
582 * this function.
583 *
584 * Return: 0 on success, negative error code on failure, positive number on
585 * caller defined premature termination.
586 */
587int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
588 pgoff_t nr, const struct mm_walk_ops *ops,
589 void *private)
590{
591 struct mm_walk walk = {
592 .ops = ops,
593 .private = private,
594 };
595 struct vm_area_struct *vma;
596 pgoff_t vba, vea, cba, cea;
597 unsigned long start_addr, end_addr;
598 int err = 0;
599
600 lockdep_assert_held(&mapping->i_mmap_rwsem);
601 vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
602 first_index + nr - 1) {
603 /* Clip to the vma */
604 vba = vma->vm_pgoff;
605 vea = vba + vma_pages(vma);
606 cba = first_index;
607 cba = max(cba, vba);
608 cea = first_index + nr;
609 cea = min(cea, vea);
610
611 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
612 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
613 if (start_addr >= end_addr)
614 continue;
615
616 walk.vma = vma;
617 walk.mm = vma->vm_mm;
618
619 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
620 if (err > 0) {
621 err = 0;
622 break;
623 } else if (err < 0)
624 break;
625
626 err = __walk_page_range(start_addr, end_addr, &walk);
627 if (err)
628 break;
629 }
630
631 return err;
632}