Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw)
 17{
 18	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
 19	if (!(pvmw->flags & PVMW_SYNC)) {
 20		if (pvmw->flags & PVMW_MIGRATION) {
 21			if (!is_swap_pte(*pvmw->pte))
 22				return false;
 23		} else {
 24			/*
 25			 * We get here when we are trying to unmap a private
 26			 * device page from the process address space. Such
 27			 * page is not CPU accessible and thus is mapped as
 28			 * a special swap entry, nonetheless it still does
 29			 * count as a valid regular mapping for the page (and
 30			 * is accounted as such in page maps count).
 31			 *
 32			 * So handle this special case as if it was a normal
 33			 * page mapping ie lock CPU page table and returns
 34			 * true.
 35			 *
 36			 * For more details on device private memory see HMM
 37			 * (include/linux/hmm.h or mm/hmm.c).
 38			 */
 39			if (is_swap_pte(*pvmw->pte)) {
 40				swp_entry_t entry;
 41
 42				/* Handle un-addressable ZONE_DEVICE memory */
 43				entry = pte_to_swp_entry(*pvmw->pte);
 44				if (!is_device_private_entry(entry) &&
 45				    !is_device_exclusive_entry(entry))
 46					return false;
 47			} else if (!pte_present(*pvmw->pte))
 48				return false;
 49		}
 50	}
 51	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
 52	spin_lock(pvmw->ptl);
 53	return true;
 54}
 55
 56static inline bool pfn_is_match(struct page *page, unsigned long pfn)
 57{
 58	unsigned long page_pfn = page_to_pfn(page);
 59
 60	/* normal page and hugetlbfs page */
 61	if (!PageTransCompound(page) || PageHuge(page))
 62		return page_pfn == pfn;
 63
 64	/* THP can be referenced by any subpage */
 65	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
 66}
 67
 68/**
 69 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
 70 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
 71 *
 72 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 73 * mapped. check_pte() has to validate this.
 74 *
 75 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
 76 * arbitrary page.
 77 *
 78 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 79 * entry that points to @pvmw->page or any subpage in case of THP.
 80 *
 81 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
 82 * pvmw->page or any subpage in case of THP.
 83 *
 84 * Otherwise, return false.
 85 *
 86 */
 87static bool check_pte(struct page_vma_mapped_walk *pvmw)
 88{
 89	unsigned long pfn;
 90
 91	if (pvmw->flags & PVMW_MIGRATION) {
 92		swp_entry_t entry;
 93		if (!is_swap_pte(*pvmw->pte))
 94			return false;
 95		entry = pte_to_swp_entry(*pvmw->pte);
 96
 97		if (!is_migration_entry(entry) &&
 98		    !is_device_exclusive_entry(entry))
 99			return false;
100
101		pfn = swp_offset(entry);
102	} else if (is_swap_pte(*pvmw->pte)) {
103		swp_entry_t entry;
104
105		/* Handle un-addressable ZONE_DEVICE memory */
106		entry = pte_to_swp_entry(*pvmw->pte);
107		if (!is_device_private_entry(entry) &&
108		    !is_device_exclusive_entry(entry))
109			return false;
110
111		pfn = swp_offset(entry);
112	} else {
113		if (!pte_present(*pvmw->pte))
114			return false;
115
116		pfn = pte_pfn(*pvmw->pte);
117	}
118
119	return pfn_is_match(pvmw->page, pfn);
 
 
 
 
 
 
 
 
 
 
120}
121
122static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
123{
124	pvmw->address = (pvmw->address + size) & ~(size - 1);
125	if (!pvmw->address)
126		pvmw->address = ULONG_MAX;
127}
128
129/**
130 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
131 * @pvmw->address
132 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
133 * must be set. pmd, pte and ptl must be NULL.
134 *
135 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
136 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
137 * adjusted if needed (for PTE-mapped THPs).
138 *
139 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
140 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
141 * a loop to find all PTEs that map the THP.
142 *
143 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
144 * regardless of which page table level the page is mapped at. @pvmw->pmd is
145 * NULL.
146 *
147 * Returns false if there are no more page table entries for the page in
148 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
149 *
150 * If you need to stop the walk before page_vma_mapped_walk() returned false,
151 * use page_vma_mapped_walk_done(). It will do the housekeeping.
152 */
153bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
154{
155	struct mm_struct *mm = pvmw->vma->vm_mm;
156	struct page *page = pvmw->page;
157	unsigned long end;
158	pgd_t *pgd;
159	p4d_t *p4d;
160	pud_t *pud;
161	pmd_t pmde;
162
163	/* The only possible pmd mapping has been handled on last iteration */
164	if (pvmw->pmd && !pvmw->pte)
165		return not_found(pvmw);
166
167	if (unlikely(PageHuge(page))) {
 
 
168		/* The only possible mapping was handled on last iteration */
169		if (pvmw->pte)
170			return not_found(pvmw);
171
172		/* when pud is not present, pte will be NULL */
173		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
174		if (!pvmw->pte)
175			return false;
176
177		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
178		spin_lock(pvmw->ptl);
179		if (!check_pte(pvmw))
180			return not_found(pvmw);
181		return true;
182	}
183
184	/*
185	 * Seek to next pte only makes sense for THP.
186	 * But more important than that optimization, is to filter out
187	 * any PageKsm page: whose page->index misleads vma_address()
188	 * and vma_address_end() to disaster.
189	 */
190	end = PageTransCompound(page) ?
191		vma_address_end(page, pvmw->vma) :
192		pvmw->address + PAGE_SIZE;
193	if (pvmw->pte)
194		goto next_pte;
195restart:
196	do {
197		pgd = pgd_offset(mm, pvmw->address);
198		if (!pgd_present(*pgd)) {
199			step_forward(pvmw, PGDIR_SIZE);
200			continue;
201		}
202		p4d = p4d_offset(pgd, pvmw->address);
203		if (!p4d_present(*p4d)) {
204			step_forward(pvmw, P4D_SIZE);
205			continue;
206		}
207		pud = pud_offset(p4d, pvmw->address);
208		if (!pud_present(*pud)) {
209			step_forward(pvmw, PUD_SIZE);
210			continue;
211		}
212
213		pvmw->pmd = pmd_offset(pud, pvmw->address);
214		/*
215		 * Make sure the pmd value isn't cached in a register by the
216		 * compiler and used as a stale value after we've observed a
217		 * subsequent update.
218		 */
219		pmde = READ_ONCE(*pvmw->pmd);
220
221		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
 
222			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
223			pmde = *pvmw->pmd;
224			if (likely(pmd_trans_huge(pmde))) {
225				if (pvmw->flags & PVMW_MIGRATION)
226					return not_found(pvmw);
227				if (pmd_page(pmde) != page)
228					return not_found(pvmw);
229				return true;
230			}
231			if (!pmd_present(pmde)) {
232				swp_entry_t entry;
233
234				if (!thp_migration_supported() ||
235				    !(pvmw->flags & PVMW_MIGRATION))
236					return not_found(pvmw);
237				entry = pmd_to_swp_entry(pmde);
238				if (!is_migration_entry(entry) ||
239				    pfn_swap_entry_to_page(entry) != page)
 
 
 
 
 
 
 
240					return not_found(pvmw);
241				return true;
242			}
243			/* THP pmd was split under us: handle on pte level */
244			spin_unlock(pvmw->ptl);
245			pvmw->ptl = NULL;
246		} else if (!pmd_present(pmde)) {
247			/*
248			 * If PVMW_SYNC, take and drop THP pmd lock so that we
249			 * cannot return prematurely, while zap_huge_pmd() has
250			 * cleared *pmd but not decremented compound_mapcount().
251			 */
252			if ((pvmw->flags & PVMW_SYNC) &&
253			    PageTransCompound(page)) {
 
254				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
255
256				spin_unlock(ptl);
257			}
258			step_forward(pvmw, PMD_SIZE);
259			continue;
260		}
261		if (!map_pte(pvmw))
262			goto next_pte;
263this_pte:
264		if (check_pte(pvmw))
265			return true;
266next_pte:
267		do {
268			pvmw->address += PAGE_SIZE;
269			if (pvmw->address >= end)
270				return not_found(pvmw);
271			/* Did we cross page table boundary? */
272			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
273				if (pvmw->ptl) {
274					spin_unlock(pvmw->ptl);
275					pvmw->ptl = NULL;
276				}
277				pte_unmap(pvmw->pte);
278				pvmw->pte = NULL;
279				goto restart;
280			}
281			pvmw->pte++;
282			if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
283				pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
284				spin_lock(pvmw->ptl);
285			}
286		} while (pte_none(*pvmw->pte));
287
288		if (!pvmw->ptl) {
289			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
290			spin_lock(pvmw->ptl);
291		}
292		goto this_pte;
293	} while (pvmw->address < end);
294
295	return false;
296}
297
298/**
299 * page_mapped_in_vma - check whether a page is really mapped in a VMA
300 * @page: the page to test
301 * @vma: the VMA to test
302 *
303 * Returns 1 if the page is mapped into the page tables of the VMA, 0
304 * if the page is not mapped into the page tables of this VMA.  Only
305 * valid for normal file or anonymous VMAs.
306 */
307int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
308{
309	struct page_vma_mapped_walk pvmw = {
310		.page = page,
 
311		.vma = vma,
312		.flags = PVMW_SYNC,
313	};
314
315	pvmw.address = vma_address(page, vma);
316	if (pvmw.address == -EFAULT)
317		return 0;
318	if (!page_vma_mapped_walk(&pvmw))
319		return 0;
320	page_vma_mapped_walk_done(&pvmw);
321	return 1;
322}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw)
 17{
 18	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
 19	if (!(pvmw->flags & PVMW_SYNC)) {
 20		if (pvmw->flags & PVMW_MIGRATION) {
 21			if (!is_swap_pte(*pvmw->pte))
 22				return false;
 23		} else {
 24			/*
 25			 * We get here when we are trying to unmap a private
 26			 * device page from the process address space. Such
 27			 * page is not CPU accessible and thus is mapped as
 28			 * a special swap entry, nonetheless it still does
 29			 * count as a valid regular mapping for the page (and
 30			 * is accounted as such in page maps count).
 31			 *
 32			 * So handle this special case as if it was a normal
 33			 * page mapping ie lock CPU page table and returns
 34			 * true.
 35			 *
 36			 * For more details on device private memory see HMM
 37			 * (include/linux/hmm.h or mm/hmm.c).
 38			 */
 39			if (is_swap_pte(*pvmw->pte)) {
 40				swp_entry_t entry;
 41
 42				/* Handle un-addressable ZONE_DEVICE memory */
 43				entry = pte_to_swp_entry(*pvmw->pte);
 44				if (!is_device_private_entry(entry) &&
 45				    !is_device_exclusive_entry(entry))
 46					return false;
 47			} else if (!pte_present(*pvmw->pte))
 48				return false;
 49		}
 50	}
 51	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
 52	spin_lock(pvmw->ptl);
 53	return true;
 54}
 55
 
 
 
 
 
 
 
 
 
 
 
 
 56/**
 57 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
 58 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
 59 *
 60 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 61 * mapped. check_pte() has to validate this.
 62 *
 63 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
 64 * arbitrary page.
 65 *
 66 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 67 * entry that points to @pvmw->page or any subpage in case of THP.
 68 *
 69 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
 70 * pvmw->page or any subpage in case of THP.
 71 *
 72 * Otherwise, return false.
 73 *
 74 */
 75static bool check_pte(struct page_vma_mapped_walk *pvmw)
 76{
 77	unsigned long pfn;
 78
 79	if (pvmw->flags & PVMW_MIGRATION) {
 80		swp_entry_t entry;
 81		if (!is_swap_pte(*pvmw->pte))
 82			return false;
 83		entry = pte_to_swp_entry(*pvmw->pte);
 84
 85		if (!is_migration_entry(entry) &&
 86		    !is_device_exclusive_entry(entry))
 87			return false;
 88
 89		pfn = swp_offset_pfn(entry);
 90	} else if (is_swap_pte(*pvmw->pte)) {
 91		swp_entry_t entry;
 92
 93		/* Handle un-addressable ZONE_DEVICE memory */
 94		entry = pte_to_swp_entry(*pvmw->pte);
 95		if (!is_device_private_entry(entry) &&
 96		    !is_device_exclusive_entry(entry))
 97			return false;
 98
 99		pfn = swp_offset_pfn(entry);
100	} else {
101		if (!pte_present(*pvmw->pte))
102			return false;
103
104		pfn = pte_pfn(*pvmw->pte);
105	}
106
107	return (pfn - pvmw->pfn) < pvmw->nr_pages;
108}
109
110/* Returns true if the two ranges overlap.  Careful to not overflow. */
111static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
112{
113	if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
114		return false;
115	if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
116		return false;
117	return true;
118}
119
120static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
121{
122	pvmw->address = (pvmw->address + size) & ~(size - 1);
123	if (!pvmw->address)
124		pvmw->address = ULONG_MAX;
125}
126
127/**
128 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
129 * @pvmw->address
130 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
131 * must be set. pmd, pte and ptl must be NULL.
132 *
133 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
134 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
135 * adjusted if needed (for PTE-mapped THPs).
136 *
137 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
138 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
139 * a loop to find all PTEs that map the THP.
140 *
141 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
142 * regardless of which page table level the page is mapped at. @pvmw->pmd is
143 * NULL.
144 *
145 * Returns false if there are no more page table entries for the page in
146 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
147 *
148 * If you need to stop the walk before page_vma_mapped_walk() returned false,
149 * use page_vma_mapped_walk_done(). It will do the housekeeping.
150 */
151bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
152{
153	struct vm_area_struct *vma = pvmw->vma;
154	struct mm_struct *mm = vma->vm_mm;
155	unsigned long end;
156	pgd_t *pgd;
157	p4d_t *p4d;
158	pud_t *pud;
159	pmd_t pmde;
160
161	/* The only possible pmd mapping has been handled on last iteration */
162	if (pvmw->pmd && !pvmw->pte)
163		return not_found(pvmw);
164
165	if (unlikely(is_vm_hugetlb_page(vma))) {
166		struct hstate *hstate = hstate_vma(vma);
167		unsigned long size = huge_page_size(hstate);
168		/* The only possible mapping was handled on last iteration */
169		if (pvmw->pte)
170			return not_found(pvmw);
171
172		/* when pud is not present, pte will be NULL */
173		pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
174		if (!pvmw->pte)
175			return false;
176
177		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
 
178		if (!check_pte(pvmw))
179			return not_found(pvmw);
180		return true;
181	}
182
183	end = vma_address_end(pvmw);
 
 
 
 
 
 
 
 
184	if (pvmw->pte)
185		goto next_pte;
186restart:
187	do {
188		pgd = pgd_offset(mm, pvmw->address);
189		if (!pgd_present(*pgd)) {
190			step_forward(pvmw, PGDIR_SIZE);
191			continue;
192		}
193		p4d = p4d_offset(pgd, pvmw->address);
194		if (!p4d_present(*p4d)) {
195			step_forward(pvmw, P4D_SIZE);
196			continue;
197		}
198		pud = pud_offset(p4d, pvmw->address);
199		if (!pud_present(*pud)) {
200			step_forward(pvmw, PUD_SIZE);
201			continue;
202		}
203
204		pvmw->pmd = pmd_offset(pud, pvmw->address);
205		/*
206		 * Make sure the pmd value isn't cached in a register by the
207		 * compiler and used as a stale value after we've observed a
208		 * subsequent update.
209		 */
210		pmde = READ_ONCE(*pvmw->pmd);
211
212		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
213		    (pmd_present(pmde) && pmd_devmap(pmde))) {
214			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
215			pmde = *pvmw->pmd;
 
 
 
 
 
 
 
216			if (!pmd_present(pmde)) {
217				swp_entry_t entry;
218
219				if (!thp_migration_supported() ||
220				    !(pvmw->flags & PVMW_MIGRATION))
221					return not_found(pvmw);
222				entry = pmd_to_swp_entry(pmde);
223				if (!is_migration_entry(entry) ||
224				    !check_pmd(swp_offset_pfn(entry), pvmw))
225					return not_found(pvmw);
226				return true;
227			}
228			if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
229				if (pvmw->flags & PVMW_MIGRATION)
230					return not_found(pvmw);
231				if (!check_pmd(pmd_pfn(pmde), pvmw))
232					return not_found(pvmw);
233				return true;
234			}
235			/* THP pmd was split under us: handle on pte level */
236			spin_unlock(pvmw->ptl);
237			pvmw->ptl = NULL;
238		} else if (!pmd_present(pmde)) {
239			/*
240			 * If PVMW_SYNC, take and drop THP pmd lock so that we
241			 * cannot return prematurely, while zap_huge_pmd() has
242			 * cleared *pmd but not decremented compound_mapcount().
243			 */
244			if ((pvmw->flags & PVMW_SYNC) &&
245			    transhuge_vma_suitable(vma, pvmw->address) &&
246			    (pvmw->nr_pages >= HPAGE_PMD_NR)) {
247				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
248
249				spin_unlock(ptl);
250			}
251			step_forward(pvmw, PMD_SIZE);
252			continue;
253		}
254		if (!map_pte(pvmw))
255			goto next_pte;
256this_pte:
257		if (check_pte(pvmw))
258			return true;
259next_pte:
260		do {
261			pvmw->address += PAGE_SIZE;
262			if (pvmw->address >= end)
263				return not_found(pvmw);
264			/* Did we cross page table boundary? */
265			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
266				if (pvmw->ptl) {
267					spin_unlock(pvmw->ptl);
268					pvmw->ptl = NULL;
269				}
270				pte_unmap(pvmw->pte);
271				pvmw->pte = NULL;
272				goto restart;
273			}
274			pvmw->pte++;
275			if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
276				pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
277				spin_lock(pvmw->ptl);
278			}
279		} while (pte_none(*pvmw->pte));
280
281		if (!pvmw->ptl) {
282			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
283			spin_lock(pvmw->ptl);
284		}
285		goto this_pte;
286	} while (pvmw->address < end);
287
288	return false;
289}
290
291/**
292 * page_mapped_in_vma - check whether a page is really mapped in a VMA
293 * @page: the page to test
294 * @vma: the VMA to test
295 *
296 * Returns 1 if the page is mapped into the page tables of the VMA, 0
297 * if the page is not mapped into the page tables of this VMA.  Only
298 * valid for normal file or anonymous VMAs.
299 */
300int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
301{
302	struct page_vma_mapped_walk pvmw = {
303		.pfn = page_to_pfn(page),
304		.nr_pages = 1,
305		.vma = vma,
306		.flags = PVMW_SYNC,
307	};
308
309	pvmw.address = vma_address(page, vma);
310	if (pvmw.address == -EFAULT)
311		return 0;
312	if (!page_vma_mapped_walk(&pvmw))
313		return 0;
314	page_vma_mapped_walk_done(&pvmw);
315	return 1;
316}