Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw)
 17{
 18	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
 19	if (!(pvmw->flags & PVMW_SYNC)) {
 20		if (pvmw->flags & PVMW_MIGRATION) {
 21			if (!is_swap_pte(*pvmw->pte))
 22				return false;
 23		} else {
 24			/*
 25			 * We get here when we are trying to unmap a private
 26			 * device page from the process address space. Such
 27			 * page is not CPU accessible and thus is mapped as
 28			 * a special swap entry, nonetheless it still does
 29			 * count as a valid regular mapping for the page (and
 30			 * is accounted as such in page maps count).
 31			 *
 32			 * So handle this special case as if it was a normal
 33			 * page mapping ie lock CPU page table and returns
 34			 * true.
 35			 *
 36			 * For more details on device private memory see HMM
 37			 * (include/linux/hmm.h or mm/hmm.c).
 38			 */
 39			if (is_swap_pte(*pvmw->pte)) {
 40				swp_entry_t entry;
 41
 42				/* Handle un-addressable ZONE_DEVICE memory */
 43				entry = pte_to_swp_entry(*pvmw->pte);
 44				if (!is_device_private_entry(entry) &&
 45				    !is_device_exclusive_entry(entry))
 46					return false;
 47			} else if (!pte_present(*pvmw->pte))
 48				return false;
 49		}
 50	}
 51	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
 52	spin_lock(pvmw->ptl);
 53	return true;
 54}
 55
 56static inline bool pfn_is_match(struct page *page, unsigned long pfn)
 57{
 58	unsigned long page_pfn = page_to_pfn(page);
 59
 60	/* normal page and hugetlbfs page */
 61	if (!PageTransCompound(page) || PageHuge(page))
 62		return page_pfn == pfn;
 63
 64	/* THP can be referenced by any subpage */
 65	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
 66}
 67
 68/**
 69 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
 70 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
 71 *
 72 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 73 * mapped. check_pte() has to validate this.
 74 *
 75 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
 76 * arbitrary page.
 77 *
 78 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 79 * entry that points to @pvmw->page or any subpage in case of THP.
 80 *
 81 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
 82 * pvmw->page or any subpage in case of THP.
 83 *
 84 * Otherwise, return false.
 85 *
 86 */
 87static bool check_pte(struct page_vma_mapped_walk *pvmw)
 88{
 89	unsigned long pfn;
 90
 91	if (pvmw->flags & PVMW_MIGRATION) {
 92		swp_entry_t entry;
 93		if (!is_swap_pte(*pvmw->pte))
 94			return false;
 95		entry = pte_to_swp_entry(*pvmw->pte);
 96
 97		if (!is_migration_entry(entry) &&
 98		    !is_device_exclusive_entry(entry))
 99			return false;
100
101		pfn = swp_offset(entry);
102	} else if (is_swap_pte(*pvmw->pte)) {
103		swp_entry_t entry;
104
105		/* Handle un-addressable ZONE_DEVICE memory */
106		entry = pte_to_swp_entry(*pvmw->pte);
107		if (!is_device_private_entry(entry) &&
108		    !is_device_exclusive_entry(entry))
109			return false;
110
111		pfn = swp_offset(entry);
112	} else {
113		if (!pte_present(*pvmw->pte))
114			return false;
115
116		pfn = pte_pfn(*pvmw->pte);
117	}
118
119	return pfn_is_match(pvmw->page, pfn);
120}
121
122static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
123{
124	pvmw->address = (pvmw->address + size) & ~(size - 1);
125	if (!pvmw->address)
126		pvmw->address = ULONG_MAX;
127}
128
129/**
130 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
131 * @pvmw->address
132 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
133 * must be set. pmd, pte and ptl must be NULL.
134 *
135 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
136 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
137 * adjusted if needed (for PTE-mapped THPs).
138 *
139 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
140 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
141 * a loop to find all PTEs that map the THP.
142 *
143 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
144 * regardless of which page table level the page is mapped at. @pvmw->pmd is
145 * NULL.
146 *
147 * Returns false if there are no more page table entries for the page in
148 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
149 *
150 * If you need to stop the walk before page_vma_mapped_walk() returned false,
151 * use page_vma_mapped_walk_done(). It will do the housekeeping.
152 */
153bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
154{
155	struct mm_struct *mm = pvmw->vma->vm_mm;
156	struct page *page = pvmw->page;
157	unsigned long end;
158	pgd_t *pgd;
159	p4d_t *p4d;
160	pud_t *pud;
161	pmd_t pmde;
162
163	/* The only possible pmd mapping has been handled on last iteration */
164	if (pvmw->pmd && !pvmw->pte)
165		return not_found(pvmw);
166
167	if (unlikely(PageHuge(page))) {
168		/* The only possible mapping was handled on last iteration */
169		if (pvmw->pte)
170			return not_found(pvmw);
171
 
172		/* when pud is not present, pte will be NULL */
173		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
174		if (!pvmw->pte)
175			return false;
176
177		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
178		spin_lock(pvmw->ptl);
179		if (!check_pte(pvmw))
180			return not_found(pvmw);
181		return true;
182	}
183
 
 
 
 
 
 
 
 
 
 
184	/*
185	 * Seek to next pte only makes sense for THP.
186	 * But more important than that optimization, is to filter out
187	 * any PageKsm page: whose page->index misleads vma_address()
188	 * and vma_address_end() to disaster.
189	 */
190	end = PageTransCompound(page) ?
191		vma_address_end(page, pvmw->vma) :
192		pvmw->address + PAGE_SIZE;
193	if (pvmw->pte)
194		goto next_pte;
195restart:
196	do {
197		pgd = pgd_offset(mm, pvmw->address);
198		if (!pgd_present(*pgd)) {
199			step_forward(pvmw, PGDIR_SIZE);
200			continue;
201		}
202		p4d = p4d_offset(pgd, pvmw->address);
203		if (!p4d_present(*p4d)) {
204			step_forward(pvmw, P4D_SIZE);
205			continue;
206		}
207		pud = pud_offset(p4d, pvmw->address);
208		if (!pud_present(*pud)) {
209			step_forward(pvmw, PUD_SIZE);
210			continue;
211		}
212
213		pvmw->pmd = pmd_offset(pud, pvmw->address);
214		/*
215		 * Make sure the pmd value isn't cached in a register by the
216		 * compiler and used as a stale value after we've observed a
217		 * subsequent update.
218		 */
219		pmde = READ_ONCE(*pvmw->pmd);
220
221		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
222			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
223			pmde = *pvmw->pmd;
224			if (likely(pmd_trans_huge(pmde))) {
225				if (pvmw->flags & PVMW_MIGRATION)
226					return not_found(pvmw);
227				if (pmd_page(pmde) != page)
228					return not_found(pvmw);
229				return true;
230			}
231			if (!pmd_present(pmde)) {
232				swp_entry_t entry;
233
234				if (!thp_migration_supported() ||
235				    !(pvmw->flags & PVMW_MIGRATION))
236					return not_found(pvmw);
237				entry = pmd_to_swp_entry(pmde);
238				if (!is_migration_entry(entry) ||
239				    pfn_swap_entry_to_page(entry) != page)
240					return not_found(pvmw);
241				return true;
242			}
 
 
243			/* THP pmd was split under us: handle on pte level */
244			spin_unlock(pvmw->ptl);
245			pvmw->ptl = NULL;
246		} else if (!pmd_present(pmde)) {
247			/*
248			 * If PVMW_SYNC, take and drop THP pmd lock so that we
249			 * cannot return prematurely, while zap_huge_pmd() has
250			 * cleared *pmd but not decremented compound_mapcount().
251			 */
252			if ((pvmw->flags & PVMW_SYNC) &&
253			    PageTransCompound(page)) {
254				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
255
256				spin_unlock(ptl);
257			}
258			step_forward(pvmw, PMD_SIZE);
259			continue;
260		}
261		if (!map_pte(pvmw))
262			goto next_pte;
263this_pte:
 
 
 
264		if (check_pte(pvmw))
265			return true;
266next_pte:
 
 
 
267		do {
268			pvmw->address += PAGE_SIZE;
269			if (pvmw->address >= end)
 
 
 
270				return not_found(pvmw);
271			/* Did we cross page table boundary? */
272			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
 
273				if (pvmw->ptl) {
274					spin_unlock(pvmw->ptl);
275					pvmw->ptl = NULL;
276				}
277				pte_unmap(pvmw->pte);
278				pvmw->pte = NULL;
279				goto restart;
280			}
281			pvmw->pte++;
282			if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
283				pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
284				spin_lock(pvmw->ptl);
285			}
286		} while (pte_none(*pvmw->pte));
287
288		if (!pvmw->ptl) {
289			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
290			spin_lock(pvmw->ptl);
291		}
292		goto this_pte;
293	} while (pvmw->address < end);
294
295	return false;
296}
297
298/**
299 * page_mapped_in_vma - check whether a page is really mapped in a VMA
300 * @page: the page to test
301 * @vma: the VMA to test
302 *
303 * Returns 1 if the page is mapped into the page tables of the VMA, 0
304 * if the page is not mapped into the page tables of this VMA.  Only
305 * valid for normal file or anonymous VMAs.
306 */
307int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
308{
309	struct page_vma_mapped_walk pvmw = {
310		.page = page,
311		.vma = vma,
312		.flags = PVMW_SYNC,
313	};
 
 
 
 
314
315	pvmw.address = vma_address(page, vma);
316	if (pvmw.address == -EFAULT)
317		return 0;
 
318	if (!page_vma_mapped_walk(&pvmw))
319		return 0;
320	page_vma_mapped_walk_done(&pvmw);
321	return 1;
322}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw)
 17{
 18	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
 19	if (!(pvmw->flags & PVMW_SYNC)) {
 20		if (pvmw->flags & PVMW_MIGRATION) {
 21			if (!is_swap_pte(*pvmw->pte))
 22				return false;
 23		} else {
 24			/*
 25			 * We get here when we are trying to unmap a private
 26			 * device page from the process address space. Such
 27			 * page is not CPU accessible and thus is mapped as
 28			 * a special swap entry, nonetheless it still does
 29			 * count as a valid regular mapping for the page (and
 30			 * is accounted as such in page maps count).
 31			 *
 32			 * So handle this special case as if it was a normal
 33			 * page mapping ie lock CPU page table and returns
 34			 * true.
 35			 *
 36			 * For more details on device private memory see HMM
 37			 * (include/linux/hmm.h or mm/hmm.c).
 38			 */
 39			if (is_swap_pte(*pvmw->pte)) {
 40				swp_entry_t entry;
 41
 42				/* Handle un-addressable ZONE_DEVICE memory */
 43				entry = pte_to_swp_entry(*pvmw->pte);
 44				if (!is_device_private_entry(entry))
 
 45					return false;
 46			} else if (!pte_present(*pvmw->pte))
 47				return false;
 48		}
 49	}
 50	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
 51	spin_lock(pvmw->ptl);
 52	return true;
 53}
 54
 55static inline bool pfn_is_match(struct page *page, unsigned long pfn)
 56{
 57	unsigned long page_pfn = page_to_pfn(page);
 58
 59	/* normal page and hugetlbfs page */
 60	if (!PageTransCompound(page) || PageHuge(page))
 61		return page_pfn == pfn;
 62
 63	/* THP can be referenced by any subpage */
 64	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
 65}
 66
 67/**
 68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
 
 69 *
 70 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 71 * mapped. check_pte() has to validate this.
 72 *
 73 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
 74 * page.
 75 *
 76 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 77 * entry that points to @pvmw->page or any subpage in case of THP.
 78 *
 79 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
 80 * @pvmw->page or any subpage in case of THP.
 81 *
 82 * Otherwise, return false.
 83 *
 84 */
 85static bool check_pte(struct page_vma_mapped_walk *pvmw)
 86{
 87	unsigned long pfn;
 88
 89	if (pvmw->flags & PVMW_MIGRATION) {
 90		swp_entry_t entry;
 91		if (!is_swap_pte(*pvmw->pte))
 92			return false;
 93		entry = pte_to_swp_entry(*pvmw->pte);
 94
 95		if (!is_migration_entry(entry))
 
 96			return false;
 97
 98		pfn = migration_entry_to_pfn(entry);
 99	} else if (is_swap_pte(*pvmw->pte)) {
100		swp_entry_t entry;
101
102		/* Handle un-addressable ZONE_DEVICE memory */
103		entry = pte_to_swp_entry(*pvmw->pte);
104		if (!is_device_private_entry(entry))
 
105			return false;
106
107		pfn = device_private_entry_to_pfn(entry);
108	} else {
109		if (!pte_present(*pvmw->pte))
110			return false;
111
112		pfn = pte_pfn(*pvmw->pte);
113	}
114
115	return pfn_is_match(pvmw->page, pfn);
116}
117
 
 
 
 
 
 
 
118/**
119 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
120 * @pvmw->address
121 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
122 * must be set. pmd, pte and ptl must be NULL.
123 *
124 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
125 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
126 * adjusted if needed (for PTE-mapped THPs).
127 *
128 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
129 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
130 * a loop to find all PTEs that map the THP.
131 *
132 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
133 * regardless of which page table level the page is mapped at. @pvmw->pmd is
134 * NULL.
135 *
136 * Retruns false if there are no more page table entries for the page in
137 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
138 *
139 * If you need to stop the walk before page_vma_mapped_walk() returned false,
140 * use page_vma_mapped_walk_done(). It will do the housekeeping.
141 */
142bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
143{
144	struct mm_struct *mm = pvmw->vma->vm_mm;
145	struct page *page = pvmw->page;
 
146	pgd_t *pgd;
147	p4d_t *p4d;
148	pud_t *pud;
149	pmd_t pmde;
150
151	/* The only possible pmd mapping has been handled on last iteration */
152	if (pvmw->pmd && !pvmw->pte)
153		return not_found(pvmw);
154
155	if (pvmw->pte)
156		goto next_pte;
 
 
157
158	if (unlikely(PageHuge(pvmw->page))) {
159		/* when pud is not present, pte will be NULL */
160		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
161		if (!pvmw->pte)
162			return false;
163
164		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
165		spin_lock(pvmw->ptl);
166		if (!check_pte(pvmw))
167			return not_found(pvmw);
168		return true;
169	}
170restart:
171	pgd = pgd_offset(mm, pvmw->address);
172	if (!pgd_present(*pgd))
173		return false;
174	p4d = p4d_offset(pgd, pvmw->address);
175	if (!p4d_present(*p4d))
176		return false;
177	pud = pud_offset(p4d, pvmw->address);
178	if (!pud_present(*pud))
179		return false;
180	pvmw->pmd = pmd_offset(pud, pvmw->address);
181	/*
182	 * Make sure the pmd value isn't cached in a register by the
183	 * compiler and used as a stale value after we've observed a
184	 * subsequent update.
 
185	 */
186	pmde = READ_ONCE(*pvmw->pmd);
187	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
188		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
189		if (likely(pmd_trans_huge(*pvmw->pmd))) {
190			if (pvmw->flags & PVMW_MIGRATION)
191				return not_found(pvmw);
192			if (pmd_page(*pvmw->pmd) != page)
193				return not_found(pvmw);
194			return true;
195		} else if (!pmd_present(*pvmw->pmd)) {
196			if (thp_migration_supported()) {
197				if (!(pvmw->flags & PVMW_MIGRATION))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198					return not_found(pvmw);
199				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
200					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
 
 
201
202					if (migration_entry_to_page(entry) != page)
203						return not_found(pvmw);
204					return true;
205				}
 
 
 
 
206			}
207			return not_found(pvmw);
208		} else {
209			/* THP pmd was split under us: handle on pte level */
210			spin_unlock(pvmw->ptl);
211			pvmw->ptl = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212		}
213	} else if (!pmd_present(pmde)) {
214		return false;
215	}
216	if (!map_pte(pvmw))
217		goto next_pte;
218	while (1) {
219		if (check_pte(pvmw))
220			return true;
221next_pte:
222		/* Seek to next pte only makes sense for THP */
223		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
224			return not_found(pvmw);
225		do {
226			pvmw->address += PAGE_SIZE;
227			if (pvmw->address >= pvmw->vma->vm_end ||
228			    pvmw->address >=
229					__vma_address(pvmw->page, pvmw->vma) +
230					thp_size(pvmw->page))
231				return not_found(pvmw);
232			/* Did we cross page table boundary? */
233			if (pvmw->address % PMD_SIZE == 0) {
234				pte_unmap(pvmw->pte);
235				if (pvmw->ptl) {
236					spin_unlock(pvmw->ptl);
237					pvmw->ptl = NULL;
238				}
 
 
239				goto restart;
240			} else {
241				pvmw->pte++;
 
 
 
242			}
243		} while (pte_none(*pvmw->pte));
244
245		if (!pvmw->ptl) {
246			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
247			spin_lock(pvmw->ptl);
248		}
249	}
 
 
 
250}
251
252/**
253 * page_mapped_in_vma - check whether a page is really mapped in a VMA
254 * @page: the page to test
255 * @vma: the VMA to test
256 *
257 * Returns 1 if the page is mapped into the page tables of the VMA, 0
258 * if the page is not mapped into the page tables of this VMA.  Only
259 * valid for normal file or anonymous VMAs.
260 */
261int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
262{
263	struct page_vma_mapped_walk pvmw = {
264		.page = page,
265		.vma = vma,
266		.flags = PVMW_SYNC,
267	};
268	unsigned long start, end;
269
270	start = __vma_address(page, vma);
271	end = start + thp_size(page) - PAGE_SIZE;
272
273	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
 
274		return 0;
275	pvmw.address = max(start, vma->vm_start);
276	if (!page_vma_mapped_walk(&pvmw))
277		return 0;
278	page_vma_mapped_walk_done(&pvmw);
279	return 1;
280}