Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/highmem.h>
  4#include <linux/sched.h>
  5#include <linux/hugetlb.h>
  6
  7static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  8			  struct mm_walk *walk)
 
 
 
 
  9{
 10	pte_t *pte;
 
 
 
 
 
 
 
 
 
 
 
 
 11	int err = 0;
 12
 13	pte = pte_offset_map(pmd, addr);
 14	for (;;) {
 15		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
 16		if (err)
 17		       break;
 18		addr += PAGE_SIZE;
 19		if (addr == end)
 20			break;
 
 21		pte++;
 22	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23
 24	pte_unmap(pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 25	return err;
 26}
 
 
 
 
 
 
 
 27
 28static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 29			  struct mm_walk *walk)
 30{
 31	pmd_t *pmd;
 32	unsigned long next;
 
 33	int err = 0;
 
 34
 35	pmd = pmd_offset(pud, addr);
 36	do {
 37again:
 38		next = pmd_addr_end(addr, end);
 39		if (pmd_none(*pmd) || !walk->vma) {
 40			if (walk->pte_hole)
 41				err = walk->pte_hole(addr, next, walk);
 42			if (err)
 43				break;
 44			continue;
 45		}
 
 
 
 46		/*
 47		 * This implies that each ->pmd_entry() handler
 48		 * needs to know about pmd_trans_huge() pmds
 49		 */
 50		if (walk->pmd_entry)
 51			err = walk->pmd_entry(pmd, addr, next, walk);
 52		if (err)
 53			break;
 54
 
 
 
 55		/*
 56		 * Check this here so we only break down trans_huge
 57		 * pages when we _need_ to
 58		 */
 59		if (!walk->pte_entry)
 
 
 60			continue;
 61
 62		split_huge_pmd(walk->vma, pmd, addr);
 63		if (pmd_trans_unstable(pmd))
 64			goto again;
 65		err = walk_pte_range(pmd, addr, next, walk);
 
 
 
 
 
 
 66		if (err)
 67			break;
 68	} while (pmd++, addr = next, addr != end);
 69
 70	return err;
 71}
 72
 73static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 74			  struct mm_walk *walk)
 75{
 76	pud_t *pud;
 77	unsigned long next;
 
 78	int err = 0;
 
 79
 80	pud = pud_offset(p4d, addr);
 81	do {
 82 again:
 83		next = pud_addr_end(addr, end);
 84		if (pud_none(*pud) || !walk->vma) {
 85			if (walk->pte_hole)
 86				err = walk->pte_hole(addr, next, walk);
 87			if (err)
 88				break;
 89			continue;
 90		}
 91
 92		if (walk->pud_entry) {
 93			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
 94
 95			if (ptl) {
 96				err = walk->pud_entry(pud, addr, next, walk);
 97				spin_unlock(ptl);
 98				if (err)
 99					break;
100				continue;
101			}
102		}
103
104		split_huge_pud(walk->vma, pud, addr);
 
 
 
 
 
 
105		if (pud_none(*pud))
106			goto again;
107
108		if (walk->pmd_entry || walk->pte_entry)
 
 
109			err = walk_pmd_range(pud, addr, next, walk);
110		if (err)
111			break;
112	} while (pud++, addr = next, addr != end);
113
114	return err;
115}
116
117static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
118			  struct mm_walk *walk)
119{
120	p4d_t *p4d;
121	unsigned long next;
 
122	int err = 0;
 
123
124	p4d = p4d_offset(pgd, addr);
125	do {
126		next = p4d_addr_end(addr, end);
127		if (p4d_none_or_clear_bad(p4d)) {
128			if (walk->pte_hole)
129				err = walk->pte_hole(addr, next, walk);
130			if (err)
131				break;
132			continue;
133		}
134		if (walk->pmd_entry || walk->pte_entry)
 
 
 
 
 
 
 
135			err = walk_pud_range(p4d, addr, next, walk);
136		if (err)
137			break;
138	} while (p4d++, addr = next, addr != end);
139
140	return err;
141}
142
143static int walk_pgd_range(unsigned long addr, unsigned long end,
144			  struct mm_walk *walk)
145{
146	pgd_t *pgd;
147	unsigned long next;
 
148	int err = 0;
149
150	pgd = pgd_offset(walk->mm, addr);
 
 
 
151	do {
152		next = pgd_addr_end(addr, end);
153		if (pgd_none_or_clear_bad(pgd)) {
154			if (walk->pte_hole)
155				err = walk->pte_hole(addr, next, walk);
156			if (err)
157				break;
158			continue;
159		}
160		if (walk->pmd_entry || walk->pte_entry)
 
 
 
 
 
 
 
161			err = walk_p4d_range(pgd, addr, next, walk);
162		if (err)
163			break;
164	} while (pgd++, addr = next, addr != end);
165
166	return err;
167}
168
169#ifdef CONFIG_HUGETLB_PAGE
170static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
171				       unsigned long end)
172{
173	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
174	return boundary < end ? boundary : end;
175}
176
177static int walk_hugetlb_range(unsigned long addr, unsigned long end,
178			      struct mm_walk *walk)
179{
180	struct vm_area_struct *vma = walk->vma;
181	struct hstate *h = hstate_vma(vma);
182	unsigned long next;
183	unsigned long hmask = huge_page_mask(h);
184	unsigned long sz = huge_page_size(h);
185	pte_t *pte;
 
186	int err = 0;
187
188	do {
189		next = hugetlb_entry_end(h, addr, end);
190		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
191
192		if (pte)
193			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
194		else if (walk->pte_hole)
195			err = walk->pte_hole(addr, next, walk);
196
197		if (err)
198			break;
199	} while (addr = next, addr != end);
200
201	return err;
202}
203
204#else /* CONFIG_HUGETLB_PAGE */
205static int walk_hugetlb_range(unsigned long addr, unsigned long end,
206			      struct mm_walk *walk)
207{
208	return 0;
209}
210
211#endif /* CONFIG_HUGETLB_PAGE */
212
213/*
214 * Decide whether we really walk over the current vma on [@start, @end)
215 * or skip it via the returned value. Return 0 if we do walk over the
216 * current vma, and return 1 if we skip the vma. Negative values means
217 * error, where we abort the current walk.
218 */
219static int walk_page_test(unsigned long start, unsigned long end,
220			struct mm_walk *walk)
221{
222	struct vm_area_struct *vma = walk->vma;
 
223
224	if (walk->test_walk)
225		return walk->test_walk(start, end, walk);
226
227	/*
228	 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
229	 * range, so we don't walk over it as we do for normal vmas. However,
230	 * Some callers are interested in handling hole range and they don't
231	 * want to just ignore any single address range. Such users certainly
232	 * define their ->pte_hole() callbacks, so let's delegate them to handle
233	 * vma(VM_PFNMAP).
234	 */
235	if (vma->vm_flags & VM_PFNMAP) {
236		int err = 1;
237		if (walk->pte_hole)
238			err = walk->pte_hole(start, end, walk);
239		return err ? err : 1;
240	}
241	return 0;
242}
243
244static int __walk_page_range(unsigned long start, unsigned long end,
245			struct mm_walk *walk)
246{
247	int err = 0;
248	struct vm_area_struct *vma = walk->vma;
 
 
 
 
 
 
 
249
250	if (vma && is_vm_hugetlb_page(vma)) {
251		if (walk->hugetlb_entry)
252			err = walk_hugetlb_range(start, end, walk);
253	} else
254		err = walk_pgd_range(start, end, walk);
255
 
 
 
256	return err;
257}
258
259/**
260 * walk_page_range - walk page table with caller specific callbacks
261 * @start: start address of the virtual address range
262 * @end: end address of the virtual address range
263 * @walk: mm_walk structure defining the callbacks and the target address space
 
 
264 *
265 * Recursively walk the page table tree of the process represented by @walk->mm
266 * within the virtual address range [@start, @end). During walking, we can do
267 * some caller-specific works for each entry, by setting up pmd_entry(),
268 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
269 * callbacks, the associated entries/pages are just ignored.
270 * The return values of these callbacks are commonly defined like below:
271 *
272 *  - 0  : succeeded to handle the current entry, and if you don't reach the
273 *         end address yet, continue to walk.
274 *  - >0 : succeeded to handle the current entry, and return to the caller
275 *         with caller specific value.
276 *  - <0 : failed to handle the current entry, and return to the caller
277 *         with error code.
278 *
279 * Before starting to walk page table, some callers want to check whether
280 * they really want to walk over the current vma, typically by checking
281 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
282 * purpose.
283 *
 
 
 
 
 
284 * struct mm_walk keeps current values of some common data like vma and pmd,
285 * which are useful for the access from callbacks. If you want to pass some
286 * caller-specific data to callbacks, @walk->private should be helpful.
287 *
288 * Locking:
289 *   Callers of walk_page_range() and walk_page_vma() should hold
290 *   @walk->mm->mmap_sem, because these function traverse vma list and/or
291 *   access to vma's data.
292 */
293int walk_page_range(unsigned long start, unsigned long end,
294		    struct mm_walk *walk)
 
295{
296	int err = 0;
297	unsigned long next;
298	struct vm_area_struct *vma;
 
 
 
 
 
299
300	if (start >= end)
301		return -EINVAL;
302
303	if (!walk->mm)
304		return -EINVAL;
305
306	VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
307
308	vma = find_vma(walk->mm, start);
309	do {
310		if (!vma) { /* after the last vma */
311			walk->vma = NULL;
312			next = end;
 
 
313		} else if (start < vma->vm_start) { /* outside vma */
314			walk->vma = NULL;
315			next = min(end, vma->vm_start);
 
 
316		} else { /* inside vma */
317			walk->vma = vma;
318			next = min(end, vma->vm_end);
319			vma = vma->vm_next;
320
321			err = walk_page_test(start, next, walk);
322			if (err > 0) {
323				/*
324				 * positive return values are purely for
325				 * controlling the pagewalk, so should never
326				 * be passed to the callers.
327				 */
328				err = 0;
329				continue;
330			}
331			if (err < 0)
332				break;
 
333		}
334		if (walk->vma || walk->pte_hole)
335			err = __walk_page_range(start, next, walk);
336		if (err)
337			break;
338	} while (start = next, start < end);
339	return err;
340}
341
342int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343{
344	int err;
 
 
 
 
 
345
346	if (!walk->mm)
 
 
347		return -EINVAL;
348
349	VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
350	VM_BUG_ON(!vma);
351	walk->vma = vma;
352	err = walk_page_test(vma->vm_start, vma->vm_end, walk);
353	if (err > 0)
354		return 0;
355	if (err < 0)
356		return err;
357	return __walk_page_range(vma->vm_start, vma->vm_end, walk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/pagewalk.h>
  3#include <linux/highmem.h>
  4#include <linux/sched.h>
  5#include <linux/hugetlb.h>
  6
  7/*
  8 * We want to know the real level where a entry is located ignoring any
  9 * folding of levels which may be happening. For example if p4d is folded then
 10 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
 11 */
 12static int real_depth(int depth)
 13{
 14	if (depth == 3 && PTRS_PER_PMD == 1)
 15		depth = 2;
 16	if (depth == 2 && PTRS_PER_PUD == 1)
 17		depth = 1;
 18	if (depth == 1 && PTRS_PER_P4D == 1)
 19		depth = 0;
 20	return depth;
 21}
 22
 23static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
 24				unsigned long end, struct mm_walk *walk)
 25{
 26	const struct mm_walk_ops *ops = walk->ops;
 27	int err = 0;
 28
 
 29	for (;;) {
 30		err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
 31		if (err)
 32		       break;
 33		if (addr >= end - PAGE_SIZE)
 
 34			break;
 35		addr += PAGE_SIZE;
 36		pte++;
 37	}
 38	return err;
 39}
 40
 41static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 42			  struct mm_walk *walk)
 43{
 44	pte_t *pte;
 45	int err = 0;
 46	spinlock_t *ptl;
 47
 48	if (walk->no_vma) {
 49		pte = pte_offset_map(pmd, addr);
 50		err = walk_pte_range_inner(pte, addr, end, walk);
 51		pte_unmap(pte);
 52	} else {
 53		pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 54		err = walk_pte_range_inner(pte, addr, end, walk);
 55		pte_unmap_unlock(pte, ptl);
 56	}
 57
 58	return err;
 59}
 60
 61#ifdef CONFIG_ARCH_HAS_HUGEPD
 62static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
 63			     unsigned long end, struct mm_walk *walk, int pdshift)
 64{
 65	int err = 0;
 66	const struct mm_walk_ops *ops = walk->ops;
 67	int shift = hugepd_shift(*phpd);
 68	int page_size = 1 << shift;
 69
 70	if (!ops->pte_entry)
 71		return 0;
 72
 73	if (addr & (page_size - 1))
 74		return 0;
 75
 76	for (;;) {
 77		pte_t *pte;
 78
 79		spin_lock(&walk->mm->page_table_lock);
 80		pte = hugepte_offset(*phpd, addr, pdshift);
 81		err = ops->pte_entry(pte, addr, addr + page_size, walk);
 82		spin_unlock(&walk->mm->page_table_lock);
 83
 84		if (err)
 85			break;
 86		if (addr >= end - page_size)
 87			break;
 88		addr += page_size;
 89	}
 90	return err;
 91}
 92#else
 93static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
 94			     unsigned long end, struct mm_walk *walk, int pdshift)
 95{
 96	return 0;
 97}
 98#endif
 99
100static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
101			  struct mm_walk *walk)
102{
103	pmd_t *pmd;
104	unsigned long next;
105	const struct mm_walk_ops *ops = walk->ops;
106	int err = 0;
107	int depth = real_depth(3);
108
109	pmd = pmd_offset(pud, addr);
110	do {
111again:
112		next = pmd_addr_end(addr, end);
113		if (pmd_none(*pmd)) {
114			if (ops->pte_hole)
115				err = ops->pte_hole(addr, next, depth, walk);
116			if (err)
117				break;
118			continue;
119		}
120
121		walk->action = ACTION_SUBTREE;
122
123		/*
124		 * This implies that each ->pmd_entry() handler
125		 * needs to know about pmd_trans_huge() pmds
126		 */
127		if (ops->pmd_entry)
128			err = ops->pmd_entry(pmd, addr, next, walk);
129		if (err)
130			break;
131
132		if (walk->action == ACTION_AGAIN)
133			goto again;
134
135		/*
136		 * Check this here so we only break down trans_huge
137		 * pages when we _need_ to
138		 */
139		if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
140		    walk->action == ACTION_CONTINUE ||
141		    !(ops->pte_entry))
142			continue;
143
144		if (walk->vma) {
145			split_huge_pmd(walk->vma, pmd, addr);
146			if (pmd_trans_unstable(pmd))
147				goto again;
148		}
149
150		if (is_hugepd(__hugepd(pmd_val(*pmd))))
151			err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT);
152		else
153			err = walk_pte_range(pmd, addr, next, walk);
154		if (err)
155			break;
156	} while (pmd++, addr = next, addr != end);
157
158	return err;
159}
160
161static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
162			  struct mm_walk *walk)
163{
164	pud_t *pud;
165	unsigned long next;
166	const struct mm_walk_ops *ops = walk->ops;
167	int err = 0;
168	int depth = real_depth(2);
169
170	pud = pud_offset(p4d, addr);
171	do {
172 again:
173		next = pud_addr_end(addr, end);
174		if (pud_none(*pud)) {
175			if (ops->pte_hole)
176				err = ops->pte_hole(addr, next, depth, walk);
177			if (err)
178				break;
179			continue;
180		}
181
182		walk->action = ACTION_SUBTREE;
 
183
184		if (ops->pud_entry)
185			err = ops->pud_entry(pud, addr, next, walk);
186		if (err)
187			break;
188
189		if (walk->action == ACTION_AGAIN)
190			goto again;
 
191
192		if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
193		    walk->action == ACTION_CONTINUE ||
194		    !(ops->pmd_entry || ops->pte_entry))
195			continue;
196
197		if (walk->vma)
198			split_huge_pud(walk->vma, pud, addr);
199		if (pud_none(*pud))
200			goto again;
201
202		if (is_hugepd(__hugepd(pud_val(*pud))))
203			err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT);
204		else
205			err = walk_pmd_range(pud, addr, next, walk);
206		if (err)
207			break;
208	} while (pud++, addr = next, addr != end);
209
210	return err;
211}
212
213static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
214			  struct mm_walk *walk)
215{
216	p4d_t *p4d;
217	unsigned long next;
218	const struct mm_walk_ops *ops = walk->ops;
219	int err = 0;
220	int depth = real_depth(1);
221
222	p4d = p4d_offset(pgd, addr);
223	do {
224		next = p4d_addr_end(addr, end);
225		if (p4d_none_or_clear_bad(p4d)) {
226			if (ops->pte_hole)
227				err = ops->pte_hole(addr, next, depth, walk);
228			if (err)
229				break;
230			continue;
231		}
232		if (ops->p4d_entry) {
233			err = ops->p4d_entry(p4d, addr, next, walk);
234			if (err)
235				break;
236		}
237		if (is_hugepd(__hugepd(p4d_val(*p4d))))
238			err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT);
239		else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
240			err = walk_pud_range(p4d, addr, next, walk);
241		if (err)
242			break;
243	} while (p4d++, addr = next, addr != end);
244
245	return err;
246}
247
248static int walk_pgd_range(unsigned long addr, unsigned long end,
249			  struct mm_walk *walk)
250{
251	pgd_t *pgd;
252	unsigned long next;
253	const struct mm_walk_ops *ops = walk->ops;
254	int err = 0;
255
256	if (walk->pgd)
257		pgd = walk->pgd + pgd_index(addr);
258	else
259		pgd = pgd_offset(walk->mm, addr);
260	do {
261		next = pgd_addr_end(addr, end);
262		if (pgd_none_or_clear_bad(pgd)) {
263			if (ops->pte_hole)
264				err = ops->pte_hole(addr, next, 0, walk);
265			if (err)
266				break;
267			continue;
268		}
269		if (ops->pgd_entry) {
270			err = ops->pgd_entry(pgd, addr, next, walk);
271			if (err)
272				break;
273		}
274		if (is_hugepd(__hugepd(pgd_val(*pgd))))
275			err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT);
276		else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry)
277			err = walk_p4d_range(pgd, addr, next, walk);
278		if (err)
279			break;
280	} while (pgd++, addr = next, addr != end);
281
282	return err;
283}
284
285#ifdef CONFIG_HUGETLB_PAGE
286static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
287				       unsigned long end)
288{
289	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
290	return boundary < end ? boundary : end;
291}
292
293static int walk_hugetlb_range(unsigned long addr, unsigned long end,
294			      struct mm_walk *walk)
295{
296	struct vm_area_struct *vma = walk->vma;
297	struct hstate *h = hstate_vma(vma);
298	unsigned long next;
299	unsigned long hmask = huge_page_mask(h);
300	unsigned long sz = huge_page_size(h);
301	pte_t *pte;
302	const struct mm_walk_ops *ops = walk->ops;
303	int err = 0;
304
305	do {
306		next = hugetlb_entry_end(h, addr, end);
307		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
308
309		if (pte)
310			err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
311		else if (ops->pte_hole)
312			err = ops->pte_hole(addr, next, -1, walk);
313
314		if (err)
315			break;
316	} while (addr = next, addr != end);
317
318	return err;
319}
320
321#else /* CONFIG_HUGETLB_PAGE */
322static int walk_hugetlb_range(unsigned long addr, unsigned long end,
323			      struct mm_walk *walk)
324{
325	return 0;
326}
327
328#endif /* CONFIG_HUGETLB_PAGE */
329
330/*
331 * Decide whether we really walk over the current vma on [@start, @end)
332 * or skip it via the returned value. Return 0 if we do walk over the
333 * current vma, and return 1 if we skip the vma. Negative values means
334 * error, where we abort the current walk.
335 */
336static int walk_page_test(unsigned long start, unsigned long end,
337			struct mm_walk *walk)
338{
339	struct vm_area_struct *vma = walk->vma;
340	const struct mm_walk_ops *ops = walk->ops;
341
342	if (ops->test_walk)
343		return ops->test_walk(start, end, walk);
344
345	/*
346	 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
347	 * range, so we don't walk over it as we do for normal vmas. However,
348	 * Some callers are interested in handling hole range and they don't
349	 * want to just ignore any single address range. Such users certainly
350	 * define their ->pte_hole() callbacks, so let's delegate them to handle
351	 * vma(VM_PFNMAP).
352	 */
353	if (vma->vm_flags & VM_PFNMAP) {
354		int err = 1;
355		if (ops->pte_hole)
356			err = ops->pte_hole(start, end, -1, walk);
357		return err ? err : 1;
358	}
359	return 0;
360}
361
362static int __walk_page_range(unsigned long start, unsigned long end,
363			struct mm_walk *walk)
364{
365	int err = 0;
366	struct vm_area_struct *vma = walk->vma;
367	const struct mm_walk_ops *ops = walk->ops;
368
369	if (ops->pre_vma) {
370		err = ops->pre_vma(start, end, walk);
371		if (err)
372			return err;
373	}
374
375	if (is_vm_hugetlb_page(vma)) {
376		if (ops->hugetlb_entry)
377			err = walk_hugetlb_range(start, end, walk);
378	} else
379		err = walk_pgd_range(start, end, walk);
380
381	if (ops->post_vma)
382		ops->post_vma(walk);
383
384	return err;
385}
386
387/**
388 * walk_page_range - walk page table with caller specific callbacks
389 * @mm:		mm_struct representing the target process of page table walk
390 * @start:	start address of the virtual address range
391 * @end:	end address of the virtual address range
392 * @ops:	operation to call during the walk
393 * @private:	private data for callbacks' usage
394 *
395 * Recursively walk the page table tree of the process represented by @mm
396 * within the virtual address range [@start, @end). During walking, we can do
397 * some caller-specific works for each entry, by setting up pmd_entry(),
398 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
399 * callbacks, the associated entries/pages are just ignored.
400 * The return values of these callbacks are commonly defined like below:
401 *
402 *  - 0  : succeeded to handle the current entry, and if you don't reach the
403 *         end address yet, continue to walk.
404 *  - >0 : succeeded to handle the current entry, and return to the caller
405 *         with caller specific value.
406 *  - <0 : failed to handle the current entry, and return to the caller
407 *         with error code.
408 *
409 * Before starting to walk page table, some callers want to check whether
410 * they really want to walk over the current vma, typically by checking
411 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
412 * purpose.
413 *
414 * If operations need to be staged before and committed after a vma is walked,
415 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
416 * since it is intended to handle commit-type operations, can't return any
417 * errors.
418 *
419 * struct mm_walk keeps current values of some common data like vma and pmd,
420 * which are useful for the access from callbacks. If you want to pass some
421 * caller-specific data to callbacks, @private should be helpful.
422 *
423 * Locking:
424 *   Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
425 *   because these function traverse vma list and/or access to vma's data.
 
426 */
427int walk_page_range(struct mm_struct *mm, unsigned long start,
428		unsigned long end, const struct mm_walk_ops *ops,
429		void *private)
430{
431	int err = 0;
432	unsigned long next;
433	struct vm_area_struct *vma;
434	struct mm_walk walk = {
435		.ops		= ops,
436		.mm		= mm,
437		.private	= private,
438	};
439
440	if (start >= end)
441		return -EINVAL;
442
443	if (!walk.mm)
444		return -EINVAL;
445
446	mmap_assert_locked(walk.mm);
447
448	vma = find_vma(walk.mm, start);
449	do {
450		if (!vma) { /* after the last vma */
451			walk.vma = NULL;
452			next = end;
453			if (ops->pte_hole)
454				err = ops->pte_hole(start, next, -1, &walk);
455		} else if (start < vma->vm_start) { /* outside vma */
456			walk.vma = NULL;
457			next = min(end, vma->vm_start);
458			if (ops->pte_hole)
459				err = ops->pte_hole(start, next, -1, &walk);
460		} else { /* inside vma */
461			walk.vma = vma;
462			next = min(end, vma->vm_end);
463			vma = find_vma(mm, vma->vm_end);
464
465			err = walk_page_test(start, next, &walk);
466			if (err > 0) {
467				/*
468				 * positive return values are purely for
469				 * controlling the pagewalk, so should never
470				 * be passed to the callers.
471				 */
472				err = 0;
473				continue;
474			}
475			if (err < 0)
476				break;
477			err = __walk_page_range(start, next, &walk);
478		}
 
 
479		if (err)
480			break;
481	} while (start = next, start < end);
482	return err;
483}
484
485/**
486 * walk_page_range_novma - walk a range of pagetables not backed by a vma
487 * @mm:		mm_struct representing the target process of page table walk
488 * @start:	start address of the virtual address range
489 * @end:	end address of the virtual address range
490 * @ops:	operation to call during the walk
491 * @pgd:	pgd to walk if different from mm->pgd
492 * @private:	private data for callbacks' usage
493 *
494 * Similar to walk_page_range() but can walk any page tables even if they are
495 * not backed by VMAs. Because 'unusual' entries may be walked this function
496 * will also not lock the PTEs for the pte_entry() callback. This is useful for
497 * walking the kernel pages tables or page tables for firmware.
498 */
499int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
500			  unsigned long end, const struct mm_walk_ops *ops,
501			  pgd_t *pgd,
502			  void *private)
503{
504	struct mm_walk walk = {
505		.ops		= ops,
506		.mm		= mm,
507		.pgd		= pgd,
508		.private	= private,
509		.no_vma		= true
510	};
511
512	if (start >= end || !walk.mm)
513		return -EINVAL;
514
515	mmap_assert_write_locked(walk.mm);
516
517	return walk_pgd_range(start, end, &walk);
518}
519
520int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
521			unsigned long end, const struct mm_walk_ops *ops,
522			void *private)
523{
524	struct mm_walk walk = {
525		.ops		= ops,
526		.mm		= vma->vm_mm,
527		.vma		= vma,
528		.private	= private,
529	};
530
531	if (start >= end || !walk.mm)
532		return -EINVAL;
533	if (start < vma->vm_start || end > vma->vm_end)
534		return -EINVAL;
535
536	mmap_assert_locked(walk.mm);
537	return __walk_page_range(start, end, &walk);
538}
539
540int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
541		void *private)
542{
543	struct mm_walk walk = {
544		.ops		= ops,
545		.mm		= vma->vm_mm,
546		.vma		= vma,
547		.private	= private,
548	};
549
550	if (!walk.mm)
551		return -EINVAL;
552
553	mmap_assert_locked(walk.mm);
554	return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
555}
556
557/**
558 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
559 * @mapping: Pointer to the struct address_space
560 * @first_index: First page offset in the address_space
561 * @nr: Number of incremental page offsets to cover
562 * @ops:	operation to call during the walk
563 * @private:	private data for callbacks' usage
564 *
565 * This function walks all memory areas mapped into a struct address_space.
566 * The walk is limited to only the given page-size index range, but if
567 * the index boundaries cross a huge page-table entry, that entry will be
568 * included.
569 *
570 * Also see walk_page_range() for additional information.
571 *
572 * Locking:
573 *   This function can't require that the struct mm_struct::mmap_lock is held,
574 *   since @mapping may be mapped by multiple processes. Instead
575 *   @mapping->i_mmap_rwsem must be held. This might have implications in the
576 *   callbacks, and it's up tho the caller to ensure that the
577 *   struct mm_struct::mmap_lock is not needed.
578 *
579 *   Also this means that a caller can't rely on the struct
580 *   vm_area_struct::vm_flags to be constant across a call,
581 *   except for immutable flags. Callers requiring this shouldn't use
582 *   this function.
583 *
584 * Return: 0 on success, negative error code on failure, positive number on
585 * caller defined premature termination.
586 */
587int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
588		      pgoff_t nr, const struct mm_walk_ops *ops,
589		      void *private)
590{
591	struct mm_walk walk = {
592		.ops		= ops,
593		.private	= private,
594	};
595	struct vm_area_struct *vma;
596	pgoff_t vba, vea, cba, cea;
597	unsigned long start_addr, end_addr;
598	int err = 0;
599
600	lockdep_assert_held(&mapping->i_mmap_rwsem);
601	vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
602				  first_index + nr - 1) {
603		/* Clip to the vma */
604		vba = vma->vm_pgoff;
605		vea = vba + vma_pages(vma);
606		cba = first_index;
607		cba = max(cba, vba);
608		cea = first_index + nr;
609		cea = min(cea, vea);
610
611		start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
612		end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
613		if (start_addr >= end_addr)
614			continue;
615
616		walk.vma = vma;
617		walk.mm = vma->vm_mm;
618
619		err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
620		if (err > 0) {
621			err = 0;
622			break;
623		} else if (err < 0)
624			break;
625
626		err = __walk_page_range(start_addr, end_addr, &walk);
627		if (err)
628			break;
629	}
630
631	return err;
632}