Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw)
 
 17{
 18	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
 19	if (!(pvmw->flags & PVMW_SYNC)) {
 20		if (pvmw->flags & PVMW_MIGRATION) {
 21			if (!is_swap_pte(*pvmw->pte))
 22				return false;
 23		} else {
 24			/*
 25			 * We get here when we are trying to unmap a private
 26			 * device page from the process address space. Such
 27			 * page is not CPU accessible and thus is mapped as
 28			 * a special swap entry, nonetheless it still does
 29			 * count as a valid regular mapping for the page (and
 30			 * is accounted as such in page maps count).
 31			 *
 32			 * So handle this special case as if it was a normal
 33			 * page mapping ie lock CPU page table and returns
 34			 * true.
 35			 *
 36			 * For more details on device private memory see HMM
 37			 * (include/linux/hmm.h or mm/hmm.c).
 38			 */
 39			if (is_swap_pte(*pvmw->pte)) {
 40				swp_entry_t entry;
 41
 42				/* Handle un-addressable ZONE_DEVICE memory */
 43				entry = pte_to_swp_entry(*pvmw->pte);
 44				if (!is_device_private_entry(entry))
 45					return false;
 46			} else if (!pte_present(*pvmw->pte))
 47				return false;
 48		}
 49	}
 50	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
 51	spin_lock(pvmw->ptl);
 52	return true;
 53}
 54
 55static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
 56{
 57	unsigned long hpage_pfn = page_to_pfn(hpage);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59	/* THP can be referenced by any subpage */
 60	return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
 61}
 62
 63/**
 64 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
 
 
 
 65 *
 66 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 67 * mapped. check_pte() has to validate this.
 68 *
 69 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
 70 * page.
 71 *
 72 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 73 * entry that points to @pvmw->page or any subpage in case of THP.
 74 *
 75 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
 76 * @pvmw->page or any subpage in case of THP.
 77 *
 78 * Otherwise, return false.
 79 *
 80 */
 81static bool check_pte(struct page_vma_mapped_walk *pvmw)
 82{
 83	unsigned long pfn;
 
 84
 85	if (pvmw->flags & PVMW_MIGRATION) {
 86		swp_entry_t entry;
 87		if (!is_swap_pte(*pvmw->pte))
 88			return false;
 89		entry = pte_to_swp_entry(*pvmw->pte);
 90
 91		if (!is_migration_entry(entry))
 
 92			return false;
 93
 94		pfn = migration_entry_to_pfn(entry);
 95	} else if (is_swap_pte(*pvmw->pte)) {
 96		swp_entry_t entry;
 97
 98		/* Handle un-addressable ZONE_DEVICE memory */
 99		entry = pte_to_swp_entry(*pvmw->pte);
100		if (!is_device_private_entry(entry))
 
101			return false;
102
103		pfn = device_private_entry_to_pfn(entry);
104	} else {
105		if (!pte_present(*pvmw->pte))
106			return false;
107
108		pfn = pte_pfn(*pvmw->pte);
109	}
110
111	return pfn_in_hpage(pvmw->page, pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112}
113
114/**
115 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
116 * @pvmw->address
117 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
118 * must be set. pmd, pte and ptl must be NULL.
119 *
120 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
121 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
122 * adjusted if needed (for PTE-mapped THPs).
123 *
124 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
125 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
126 * a loop to find all PTEs that map the THP.
127 *
128 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
129 * regardless of which page table level the page is mapped at. @pvmw->pmd is
130 * NULL.
131 *
132 * Retruns false if there are no more page table entries for the page in
133 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
134 *
135 * If you need to stop the walk before page_vma_mapped_walk() returned false,
136 * use page_vma_mapped_walk_done(). It will do the housekeeping.
137 */
138bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
139{
140	struct mm_struct *mm = pvmw->vma->vm_mm;
141	struct page *page = pvmw->page;
 
 
142	pgd_t *pgd;
143	p4d_t *p4d;
144	pud_t *pud;
145	pmd_t pmde;
146
147	/* The only possible pmd mapping has been handled on last iteration */
148	if (pvmw->pmd && !pvmw->pte)
149		return not_found(pvmw);
150
151	if (pvmw->pte)
152		goto next_pte;
153
154	if (unlikely(PageHuge(pvmw->page))) {
155		/* when pud is not present, pte will be NULL */
156		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
 
 
 
 
 
 
157		if (!pvmw->pte)
158			return false;
159
160		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
161		spin_lock(pvmw->ptl);
162		if (!check_pte(pvmw))
163			return not_found(pvmw);
164		return true;
165	}
 
 
 
 
166restart:
167	pgd = pgd_offset(mm, pvmw->address);
168	if (!pgd_present(*pgd))
169		return false;
170	p4d = p4d_offset(pgd, pvmw->address);
171	if (!p4d_present(*p4d))
172		return false;
173	pud = pud_offset(p4d, pvmw->address);
174	if (!pud_present(*pud))
175		return false;
176	pvmw->pmd = pmd_offset(pud, pvmw->address);
177	/*
178	 * Make sure the pmd value isn't cached in a register by the
179	 * compiler and used as a stale value after we've observed a
180	 * subsequent update.
181	 */
182	pmde = READ_ONCE(*pvmw->pmd);
183	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
184		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
185		if (likely(pmd_trans_huge(*pvmw->pmd))) {
186			if (pvmw->flags & PVMW_MIGRATION)
187				return not_found(pvmw);
188			if (pmd_page(*pvmw->pmd) != page)
189				return not_found(pvmw);
190			return true;
191		} else if (!pmd_present(*pvmw->pmd)) {
192			if (thp_migration_supported()) {
193				if (!(pvmw->flags & PVMW_MIGRATION))
194					return not_found(pvmw);
195				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
196					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
197
198					if (migration_entry_to_page(entry) != page)
199						return not_found(pvmw);
200					return true;
201				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202			}
203			return not_found(pvmw);
204		} else {
205			/* THP pmd was split under us: handle on pte level */
206			spin_unlock(pvmw->ptl);
207			pvmw->ptl = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208		}
209	} else if (!pmd_present(pmde)) {
210		return false;
211	}
212	if (!map_pte(pvmw))
213		goto next_pte;
214	while (1) {
215		if (check_pte(pvmw))
216			return true;
217next_pte:
218		/* Seek to next pte only makes sense for THP */
219		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
220			return not_found(pvmw);
221		do {
222			pvmw->address += PAGE_SIZE;
223			if (pvmw->address >= pvmw->vma->vm_end ||
224			    pvmw->address >=
225					__vma_address(pvmw->page, pvmw->vma) +
226					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
227				return not_found(pvmw);
228			/* Did we cross page table boundary? */
229			if (pvmw->address % PMD_SIZE == 0) {
230				pte_unmap(pvmw->pte);
231				if (pvmw->ptl) {
232					spin_unlock(pvmw->ptl);
233					pvmw->ptl = NULL;
234				}
 
 
235				goto restart;
236			} else {
237				pvmw->pte++;
238			}
239		} while (pte_none(*pvmw->pte));
 
240
241		if (!pvmw->ptl) {
242			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
243			spin_lock(pvmw->ptl);
 
 
 
 
 
244		}
245	}
 
 
 
246}
247
 
248/**
249 * page_mapped_in_vma - check whether a page is really mapped in a VMA
250 * @page: the page to test
251 * @vma: the VMA to test
252 *
253 * Returns 1 if the page is mapped into the page tables of the VMA, 0
254 * if the page is not mapped into the page tables of this VMA.  Only
255 * valid for normal file or anonymous VMAs.
 
256 */
257int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 
258{
 
259	struct page_vma_mapped_walk pvmw = {
260		.page = page,
 
261		.vma = vma,
262		.flags = PVMW_SYNC,
263	};
264	unsigned long start, end;
265
266	start = __vma_address(page, vma);
267	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
268
269	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
270		return 0;
271	pvmw.address = max(start, vma->vm_start);
272	if (!page_vma_mapped_walk(&pvmw))
273		return 0;
274	page_vma_mapped_walk_done(&pvmw);
275	return 1;
 
276}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
 17		    spinlock_t **ptlp)
 18{
 19	pte_t ptent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20
 21	if (pvmw->flags & PVMW_SYNC) {
 22		/* Use the stricter lookup */
 23		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
 24						pvmw->address, &pvmw->ptl);
 25		*ptlp = pvmw->ptl;
 26		return !!pvmw->pte;
 
 27	}
 
 
 
 
 28
 29again:
 30	/*
 31	 * It is important to return the ptl corresponding to pte,
 32	 * in case *pvmw->pmd changes underneath us; so we need to
 33	 * return it even when choosing not to lock, in case caller
 34	 * proceeds to loop over next ptes, and finds a match later.
 35	 * Though, in most cases, page lock already protects this.
 36	 */
 37	pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd,
 38					     pvmw->address, pmdvalp, ptlp);
 39	if (!pvmw->pte)
 40		return false;
 41
 42	ptent = ptep_get(pvmw->pte);
 43
 44	if (pvmw->flags & PVMW_MIGRATION) {
 45		if (!is_swap_pte(ptent))
 46			return false;
 47	} else if (is_swap_pte(ptent)) {
 48		swp_entry_t entry;
 49		/*
 50		 * Handle un-addressable ZONE_DEVICE memory.
 51		 *
 52		 * We get here when we are trying to unmap a private
 53		 * device page from the process address space. Such
 54		 * page is not CPU accessible and thus is mapped as
 55		 * a special swap entry, nonetheless it still does
 56		 * count as a valid regular mapping for the page
 57		 * (and is accounted as such in page maps count).
 58		 *
 59		 * So handle this special case as if it was a normal
 60		 * page mapping ie lock CPU page table and return true.
 61		 *
 62		 * For more details on device private memory see HMM
 63		 * (include/linux/hmm.h or mm/hmm.c).
 64		 */
 65		entry = pte_to_swp_entry(ptent);
 66		if (!is_device_private_entry(entry) &&
 67		    !is_device_exclusive_entry(entry))
 68			return false;
 69	} else if (!pte_present(ptent)) {
 70		return false;
 71	}
 72	spin_lock(*ptlp);
 73	if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) {
 74		pte_unmap_unlock(pvmw->pte, *ptlp);
 75		goto again;
 76	}
 77	pvmw->ptl = *ptlp;
 78
 79	return true;
 
 80}
 81
 82/**
 83 * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
 84 * mapped at the @pvmw->pte
 85 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
 86 * for checking
 87 *
 88 * page_vma_mapped_walk() found a place where pfn range is *potentially*
 89 * mapped. check_pte() has to validate this.
 90 *
 91 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
 92 * arbitrary page.
 93 *
 94 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 95 * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
 96 *
 97 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
 98 * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
 99 *
100 * Otherwise, return false.
101 *
102 */
103static bool check_pte(struct page_vma_mapped_walk *pvmw)
104{
105	unsigned long pfn;
106	pte_t ptent = ptep_get(pvmw->pte);
107
108	if (pvmw->flags & PVMW_MIGRATION) {
109		swp_entry_t entry;
110		if (!is_swap_pte(ptent))
111			return false;
112		entry = pte_to_swp_entry(ptent);
113
114		if (!is_migration_entry(entry) &&
115		    !is_device_exclusive_entry(entry))
116			return false;
117
118		pfn = swp_offset_pfn(entry);
119	} else if (is_swap_pte(ptent)) {
120		swp_entry_t entry;
121
122		/* Handle un-addressable ZONE_DEVICE memory */
123		entry = pte_to_swp_entry(ptent);
124		if (!is_device_private_entry(entry) &&
125		    !is_device_exclusive_entry(entry))
126			return false;
127
128		pfn = swp_offset_pfn(entry);
129	} else {
130		if (!pte_present(ptent))
131			return false;
132
133		pfn = pte_pfn(ptent);
134	}
135
136	return (pfn - pvmw->pfn) < pvmw->nr_pages;
137}
138
139/* Returns true if the two ranges overlap.  Careful to not overflow. */
140static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
141{
142	if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
143		return false;
144	if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
145		return false;
146	return true;
147}
148
149static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
150{
151	pvmw->address = (pvmw->address + size) & ~(size - 1);
152	if (!pvmw->address)
153		pvmw->address = ULONG_MAX;
154}
155
156/**
157 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
158 * @pvmw->address
159 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
160 * must be set. pmd, pte and ptl must be NULL.
161 *
162 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
163 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
164 * adjusted if needed (for PTE-mapped THPs).
165 *
166 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
167 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
168 * a loop to find all PTEs that map the THP.
169 *
170 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
171 * regardless of which page table level the page is mapped at. @pvmw->pmd is
172 * NULL.
173 *
174 * Returns false if there are no more page table entries for the page in
175 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
176 *
177 * If you need to stop the walk before page_vma_mapped_walk() returned false,
178 * use page_vma_mapped_walk_done(). It will do the housekeeping.
179 */
180bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
181{
182	struct vm_area_struct *vma = pvmw->vma;
183	struct mm_struct *mm = vma->vm_mm;
184	unsigned long end;
185	spinlock_t *ptl;
186	pgd_t *pgd;
187	p4d_t *p4d;
188	pud_t *pud;
189	pmd_t pmde;
190
191	/* The only possible pmd mapping has been handled on last iteration */
192	if (pvmw->pmd && !pvmw->pte)
193		return not_found(pvmw);
194
195	if (unlikely(is_vm_hugetlb_page(vma))) {
196		struct hstate *hstate = hstate_vma(vma);
197		unsigned long size = huge_page_size(hstate);
198		/* The only possible mapping was handled on last iteration */
199		if (pvmw->pte)
200			return not_found(pvmw);
201		/*
202		 * All callers that get here will already hold the
203		 * i_mmap_rwsem.  Therefore, no additional locks need to be
204		 * taken before calling hugetlb_walk().
205		 */
206		pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
207		if (!pvmw->pte)
208			return false;
209
210		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
 
211		if (!check_pte(pvmw))
212			return not_found(pvmw);
213		return true;
214	}
215
216	end = vma_address_end(pvmw);
217	if (pvmw->pte)
218		goto next_pte;
219restart:
220	do {
221		pgd = pgd_offset(mm, pvmw->address);
222		if (!pgd_present(*pgd)) {
223			step_forward(pvmw, PGDIR_SIZE);
224			continue;
225		}
226		p4d = p4d_offset(pgd, pvmw->address);
227		if (!p4d_present(*p4d)) {
228			step_forward(pvmw, P4D_SIZE);
229			continue;
230		}
231		pud = pud_offset(p4d, pvmw->address);
232		if (!pud_present(*pud)) {
233			step_forward(pvmw, PUD_SIZE);
234			continue;
235		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
237		pvmw->pmd = pmd_offset(pud, pvmw->address);
238		/*
239		 * Make sure the pmd value isn't cached in a register by the
240		 * compiler and used as a stale value after we've observed a
241		 * subsequent update.
242		 */
243		pmde = pmdp_get_lockless(pvmw->pmd);
244
245		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
246		    (pmd_present(pmde) && pmd_devmap(pmde))) {
247			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
248			pmde = *pvmw->pmd;
249			if (!pmd_present(pmde)) {
250				swp_entry_t entry;
251
252				if (!thp_migration_supported() ||
253				    !(pvmw->flags & PVMW_MIGRATION))
254					return not_found(pvmw);
255				entry = pmd_to_swp_entry(pmde);
256				if (!is_migration_entry(entry) ||
257				    !check_pmd(swp_offset_pfn(entry), pvmw))
258					return not_found(pvmw);
259				return true;
260			}
261			if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
262				if (pvmw->flags & PVMW_MIGRATION)
263					return not_found(pvmw);
264				if (!check_pmd(pmd_pfn(pmde), pvmw))
265					return not_found(pvmw);
266				return true;
267			}
 
 
268			/* THP pmd was split under us: handle on pte level */
269			spin_unlock(pvmw->ptl);
270			pvmw->ptl = NULL;
271		} else if (!pmd_present(pmde)) {
272			/*
273			 * If PVMW_SYNC, take and drop THP pmd lock so that we
274			 * cannot return prematurely, while zap_huge_pmd() has
275			 * cleared *pmd but not decremented compound_mapcount().
276			 */
277			if ((pvmw->flags & PVMW_SYNC) &&
278			    thp_vma_suitable_order(vma, pvmw->address,
279						   PMD_ORDER) &&
280			    (pvmw->nr_pages >= HPAGE_PMD_NR)) {
281				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
282
283				spin_unlock(ptl);
284			}
285			step_forward(pvmw, PMD_SIZE);
286			continue;
287		}
288		if (!map_pte(pvmw, &pmde, &ptl)) {
289			if (!pvmw->pte)
290				goto restart;
291			goto next_pte;
292		}
293this_pte:
294		if (check_pte(pvmw))
295			return true;
296next_pte:
 
 
 
297		do {
298			pvmw->address += PAGE_SIZE;
299			if (pvmw->address >= end)
 
 
 
300				return not_found(pvmw);
301			/* Did we cross page table boundary? */
302			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
 
303				if (pvmw->ptl) {
304					spin_unlock(pvmw->ptl);
305					pvmw->ptl = NULL;
306				}
307				pte_unmap(pvmw->pte);
308				pvmw->pte = NULL;
309				goto restart;
 
 
310			}
311			pvmw->pte++;
312		} while (pte_none(ptep_get(pvmw->pte)));
313
314		if (!pvmw->ptl) {
315			spin_lock(ptl);
316			if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
317				pte_unmap_unlock(pvmw->pte, ptl);
318				pvmw->pte = NULL;
319				goto restart;
320			}
321			pvmw->ptl = ptl;
322		}
323		goto this_pte;
324	} while (pvmw->address < end);
325
326	return false;
327}
328
329#ifdef CONFIG_MEMORY_FAILURE
330/**
331 * page_mapped_in_vma - check whether a page is really mapped in a VMA
332 * @page: the page to test
333 * @vma: the VMA to test
334 *
335 * Return: The address the page is mapped at if the page is in the range
336 * covered by the VMA and present in the page table.  If the page is
337 * outside the VMA or not present, returns -EFAULT.
338 * Only valid for normal file or anonymous VMAs.
339 */
340unsigned long page_mapped_in_vma(const struct page *page,
341		struct vm_area_struct *vma)
342{
343	const struct folio *folio = page_folio(page);
344	struct page_vma_mapped_walk pvmw = {
345		.pfn = page_to_pfn(page),
346		.nr_pages = 1,
347		.vma = vma,
348		.flags = PVMW_SYNC,
349	};
 
 
 
 
350
351	pvmw.address = vma_address(vma, page_pgoff(folio, page), 1);
352	if (pvmw.address == -EFAULT)
353		goto out;
354	if (!page_vma_mapped_walk(&pvmw))
355		return -EFAULT;
356	page_vma_mapped_walk_done(&pvmw);
357out:
358	return pvmw.address;
359}
360#endif