Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/rmap.h>
  4#include <linux/hugetlb.h>
  5#include <linux/swap.h>
  6#include <linux/swapops.h>
  7
  8#include "internal.h"
  9
 10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 11{
 12	page_vma_mapped_walk_done(pvmw);
 13	return false;
 14}
 15
 16static bool map_pte(struct page_vma_mapped_walk *pvmw)
 17{
 18	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
 19	if (!(pvmw->flags & PVMW_SYNC)) {
 20		if (pvmw->flags & PVMW_MIGRATION) {
 21			if (!is_swap_pte(*pvmw->pte))
 22				return false;
 23		} else {
 24			if (!pte_present(*pvmw->pte))
 25				return false;
 26		}
 27	}
 28	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
 29	spin_lock(pvmw->ptl);
 30	return true;
 31}
 32
 33static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
 34{
 35	unsigned long hpage_pfn = page_to_pfn(hpage);
 36
 37	/* THP can be referenced by any subpage */
 38	return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
 39}
 40
 41/**
 42 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
 43 *
 44 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 45 * mapped. check_pte() has to validate this.
 46 *
 47 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
 48 * page.
 49 *
 50 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 51 * entry that points to @pvmw->page or any subpage in case of THP.
 52 *
 53 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
 54 * @pvmw->page or any subpage in case of THP.
 55 *
 56 * Otherwise, return false.
 57 *
 58 */
 59static bool check_pte(struct page_vma_mapped_walk *pvmw)
 60{
 61	unsigned long pfn;
 62
 63	if (pvmw->flags & PVMW_MIGRATION) {
 64		swp_entry_t entry;
 65		if (!is_swap_pte(*pvmw->pte))
 66			return false;
 67		entry = pte_to_swp_entry(*pvmw->pte);
 68
 69		if (!is_migration_entry(entry))
 70			return false;
 71
 72		pfn = migration_entry_to_pfn(entry);
 73	} else if (is_swap_pte(*pvmw->pte)) {
 74		swp_entry_t entry;
 75
 76		/* Handle un-addressable ZONE_DEVICE memory */
 77		entry = pte_to_swp_entry(*pvmw->pte);
 78		if (!is_device_private_entry(entry))
 79			return false;
 80
 81		pfn = device_private_entry_to_pfn(entry);
 82	} else {
 83		if (!pte_present(*pvmw->pte))
 84			return false;
 85
 86		pfn = pte_pfn(*pvmw->pte);
 87	}
 88
 89	return pfn_in_hpage(pvmw->page, pfn);
 90}
 91
 92/**
 93 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
 94 * @pvmw->address
 95 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
 96 * must be set. pmd, pte and ptl must be NULL.
 97 *
 98 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
 99 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
100 * adjusted if needed (for PTE-mapped THPs).
101 *
102 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
103 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
104 * a loop to find all PTEs that map the THP.
105 *
106 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
107 * regardless of which page table level the page is mapped at. @pvmw->pmd is
108 * NULL.
109 *
110 * Retruns false if there are no more page table entries for the page in
111 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
112 *
113 * If you need to stop the walk before page_vma_mapped_walk() returned false,
114 * use page_vma_mapped_walk_done(). It will do the housekeeping.
115 */
116bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
117{
118	struct mm_struct *mm = pvmw->vma->vm_mm;
119	struct page *page = pvmw->page;
120	pgd_t *pgd;
121	p4d_t *p4d;
122	pud_t *pud;
123	pmd_t pmde;
124
125	/* The only possible pmd mapping has been handled on last iteration */
126	if (pvmw->pmd && !pvmw->pte)
127		return not_found(pvmw);
128
129	if (pvmw->pte)
130		goto next_pte;
131
132	if (unlikely(PageHuge(pvmw->page))) {
133		/* when pud is not present, pte will be NULL */
134		pvmw->pte = huge_pte_offset(mm, pvmw->address,
135					    PAGE_SIZE << compound_order(page));
136		if (!pvmw->pte)
137			return false;
138
139		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
140		spin_lock(pvmw->ptl);
141		if (!check_pte(pvmw))
142			return not_found(pvmw);
143		return true;
144	}
145restart:
146	pgd = pgd_offset(mm, pvmw->address);
147	if (!pgd_present(*pgd))
148		return false;
149	p4d = p4d_offset(pgd, pvmw->address);
150	if (!p4d_present(*p4d))
151		return false;
152	pud = pud_offset(p4d, pvmw->address);
153	if (!pud_present(*pud))
154		return false;
155	pvmw->pmd = pmd_offset(pud, pvmw->address);
156	/*
157	 * Make sure the pmd value isn't cached in a register by the
158	 * compiler and used as a stale value after we've observed a
159	 * subsequent update.
160	 */
161	pmde = READ_ONCE(*pvmw->pmd);
162	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
163		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
164		if (likely(pmd_trans_huge(*pvmw->pmd))) {
165			if (pvmw->flags & PVMW_MIGRATION)
166				return not_found(pvmw);
167			if (pmd_page(*pvmw->pmd) != page)
168				return not_found(pvmw);
169			return true;
170		} else if (!pmd_present(*pvmw->pmd)) {
171			if (thp_migration_supported()) {
172				if (!(pvmw->flags & PVMW_MIGRATION))
173					return not_found(pvmw);
174				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
175					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
176
177					if (migration_entry_to_page(entry) != page)
178						return not_found(pvmw);
179					return true;
180				}
181			}
182			return not_found(pvmw);
183		} else {
184			/* THP pmd was split under us: handle on pte level */
185			spin_unlock(pvmw->ptl);
186			pvmw->ptl = NULL;
187		}
188	} else if (!pmd_present(pmde)) {
189		return false;
190	}
191	if (!map_pte(pvmw))
192		goto next_pte;
193	while (1) {
194		if (check_pte(pvmw))
195			return true;
196next_pte:
197		/* Seek to next pte only makes sense for THP */
198		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
199			return not_found(pvmw);
200		do {
201			pvmw->address += PAGE_SIZE;
202			if (pvmw->address >= pvmw->vma->vm_end ||
203			    pvmw->address >=
204					__vma_address(pvmw->page, pvmw->vma) +
205					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
206				return not_found(pvmw);
207			/* Did we cross page table boundary? */
208			if (pvmw->address % PMD_SIZE == 0) {
209				pte_unmap(pvmw->pte);
210				if (pvmw->ptl) {
211					spin_unlock(pvmw->ptl);
212					pvmw->ptl = NULL;
213				}
214				goto restart;
215			} else {
216				pvmw->pte++;
217			}
218		} while (pte_none(*pvmw->pte));
219
220		if (!pvmw->ptl) {
221			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
222			spin_lock(pvmw->ptl);
223		}
224	}
225}
226
227/**
228 * page_mapped_in_vma - check whether a page is really mapped in a VMA
229 * @page: the page to test
230 * @vma: the VMA to test
231 *
232 * Returns 1 if the page is mapped into the page tables of the VMA, 0
233 * if the page is not mapped into the page tables of this VMA.  Only
234 * valid for normal file or anonymous VMAs.
235 */
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
237{
238	struct page_vma_mapped_walk pvmw = {
239		.page = page,
240		.vma = vma,
241		.flags = PVMW_SYNC,
242	};
243	unsigned long start, end;
244
245	start = __vma_address(page, vma);
246	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
247
248	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
249		return 0;
250	pvmw.address = max(start, vma->vm_start);
251	if (!page_vma_mapped_walk(&pvmw))
252		return 0;
253	page_vma_mapped_walk_done(&pvmw);
254	return 1;
255}