Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/pgtable-generic.c
  4 *
  5 *  Generic pgtable methods declared in asm-generic/pgtable.h
  6 *
  7 *  Copyright (C) 2010  Linus Torvalds
  8 */
  9
 10#include <linux/pagemap.h>
 11#include <asm/tlb.h>
 12#include <asm-generic/pgtable.h>
 13
 14/*
 15 * If a p?d_bad entry is found while walking page tables, report
 16 * the error, before resetting entry to p?d_none.  Usually (but
 17 * very seldom) called out from the p?d_none_or_clear_bad macros.
 18 */
 19
 20void pgd_clear_bad(pgd_t *pgd)
 21{
 22	pgd_ERROR(*pgd);
 23	pgd_clear(pgd);
 24}
 25
 26void p4d_clear_bad(p4d_t *p4d)
 27{
 28	p4d_ERROR(*p4d);
 29	p4d_clear(p4d);
 30}
 31
 32void pud_clear_bad(pud_t *pud)
 33{
 34	pud_ERROR(*pud);
 35	pud_clear(pud);
 36}
 37
 38void pmd_clear_bad(pmd_t *pmd)
 39{
 40	pmd_ERROR(*pmd);
 41	pmd_clear(pmd);
 42}
 43
 44#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 45/*
 46 * Only sets the access flags (dirty, accessed), as well as write 
 47 * permission. Furthermore, we know it always gets set to a "more
 48 * permissive" setting, which allows most architectures to optimize
 49 * this. We return whether the PTE actually changed, which in turn
 50 * instructs the caller to do things like update__mmu_cache.  This
 51 * used to be done in the caller, but sparc needs minor faults to
 52 * force that call on sun4c so we changed this macro slightly
 53 */
 54int ptep_set_access_flags(struct vm_area_struct *vma,
 55			  unsigned long address, pte_t *ptep,
 56			  pte_t entry, int dirty)
 57{
 58	int changed = !pte_same(*ptep, entry);
 59	if (changed) {
 60		set_pte_at(vma->vm_mm, address, ptep, entry);
 61		flush_tlb_fix_spurious_fault(vma, address);
 62	}
 63	return changed;
 64}
 65#endif
 66
 67#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 68int ptep_clear_flush_young(struct vm_area_struct *vma,
 69			   unsigned long address, pte_t *ptep)
 70{
 71	int young;
 72	young = ptep_test_and_clear_young(vma, address, ptep);
 73	if (young)
 74		flush_tlb_page(vma, address);
 75	return young;
 76}
 77#endif
 78
 79#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
 80pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
 81		       pte_t *ptep)
 82{
 83	struct mm_struct *mm = (vma)->vm_mm;
 84	pte_t pte;
 85	pte = ptep_get_and_clear(mm, address, ptep);
 86	if (pte_accessible(mm, pte))
 87		flush_tlb_page(vma, address);
 88	return pte;
 89}
 90#endif
 91
 92#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 93
 94#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 95int pmdp_set_access_flags(struct vm_area_struct *vma,
 96			  unsigned long address, pmd_t *pmdp,
 97			  pmd_t entry, int dirty)
 98{
 
 99	int changed = !pmd_same(*pmdp, entry);
100	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
101	if (changed) {
102		set_pmd_at(vma->vm_mm, address, pmdp, entry);
103		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
104	}
105	return changed;
 
 
 
 
106}
107#endif
108
109#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
110int pmdp_clear_flush_young(struct vm_area_struct *vma,
111			   unsigned long address, pmd_t *pmdp)
112{
113	int young;
114	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
115	young = pmdp_test_and_clear_young(vma, address, pmdp);
116	if (young)
117		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
118	return young;
119}
120#endif
121
122#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
123pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
124			    pmd_t *pmdp)
125{
126	pmd_t pmd;
 
 
 
127	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
128	VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
129			   !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
130	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
131	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132	return pmd;
133}
134
135#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
136pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
137			    pud_t *pudp)
138{
139	pud_t pud;
140
141	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
142	VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
143	pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
144	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
145	return pud;
146}
147#endif
148#endif
149
150#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
151void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
152				pgtable_t pgtable)
153{
154	assert_spin_locked(pmd_lockptr(mm, pmdp));
155
156	/* FIFO */
157	if (!pmd_huge_pte(mm, pmdp))
158		INIT_LIST_HEAD(&pgtable->lru);
159	else
160		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
161	pmd_huge_pte(mm, pmdp) = pgtable;
162}
163#endif
164
165#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
166/* no "address" argument so destroys page coloring of some arch */
167pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
168{
169	pgtable_t pgtable;
170
171	assert_spin_locked(pmd_lockptr(mm, pmdp));
172
173	/* FIFO */
174	pgtable = pmd_huge_pte(mm, pmdp);
175	pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
176							  struct page, lru);
177	if (pmd_huge_pte(mm, pmdp))
178		list_del(&pgtable->lru);
179	return pgtable;
180}
181#endif
182
183#ifndef __HAVE_ARCH_PMDP_INVALIDATE
184pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
185		     pmd_t *pmdp)
 
186{
187	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
188	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
189	return old;
 
 
190}
 
191#endif
192
193#ifndef pmdp_collapse_flush
194pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
195			  pmd_t *pmdp)
 
196{
197	/*
198	 * pmd and hugepage pte format are same. So we could
199	 * use the same function.
200	 */
201	pmd_t pmd;
202
203	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
204	VM_BUG_ON(pmd_trans_huge(*pmdp));
205	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
206
207	/* collapse entails shooting down ptes not pmd */
208	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
209	return pmd;
210}
211#endif
212#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
v3.1
 
  1/*
  2 *  mm/pgtable-generic.c
  3 *
  4 *  Generic pgtable methods declared in asm-generic/pgtable.h
  5 *
  6 *  Copyright (C) 2010  Linus Torvalds
  7 */
  8
  9#include <linux/pagemap.h>
 10#include <asm/tlb.h>
 11#include <asm-generic/pgtable.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 14/*
 15 * Only sets the access flags (dirty, accessed, and
 16 * writable). Furthermore, we know it always gets set to a "more
 17 * permissive" setting, which allows most architectures to optimize
 18 * this. We return whether the PTE actually changed, which in turn
 19 * instructs the caller to do things like update__mmu_cache.  This
 20 * used to be done in the caller, but sparc needs minor faults to
 21 * force that call on sun4c so we changed this macro slightly
 22 */
 23int ptep_set_access_flags(struct vm_area_struct *vma,
 24			  unsigned long address, pte_t *ptep,
 25			  pte_t entry, int dirty)
 26{
 27	int changed = !pte_same(*ptep, entry);
 28	if (changed) {
 29		set_pte_at(vma->vm_mm, address, ptep, entry);
 30		flush_tlb_page(vma, address);
 31	}
 32	return changed;
 33}
 34#endif
 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 37int pmdp_set_access_flags(struct vm_area_struct *vma,
 38			  unsigned long address, pmd_t *pmdp,
 39			  pmd_t entry, int dirty)
 40{
 41#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 42	int changed = !pmd_same(*pmdp, entry);
 43	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 44	if (changed) {
 45		set_pmd_at(vma->vm_mm, address, pmdp, entry);
 46		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 47	}
 48	return changed;
 49#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 50	BUG();
 51	return 0;
 52#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 53}
 54#endif
 55
 56#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 57int ptep_clear_flush_young(struct vm_area_struct *vma,
 58			   unsigned long address, pte_t *ptep)
 59{
 60	int young;
 61	young = ptep_test_and_clear_young(vma, address, ptep);
 
 62	if (young)
 63		flush_tlb_page(vma, address);
 64	return young;
 65}
 66#endif
 67
 68#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 69int pmdp_clear_flush_young(struct vm_area_struct *vma,
 70			   unsigned long address, pmd_t *pmdp)
 71{
 72	int young;
 73#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 74	BUG();
 75#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 76	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 77	young = pmdp_test_and_clear_young(vma, address, pmdp);
 78	if (young)
 79		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 80	return young;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81}
 82#endif
 83
 84#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
 85pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
 86		       pte_t *ptep)
 87{
 88	pte_t pte;
 89	pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
 90	flush_tlb_page(vma, address);
 91	return pte;
 
 
 
 
 
 
 
 92}
 93#endif
 94
 95#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
 96#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 97pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
 98		       pmd_t *pmdp)
 99{
100	pmd_t pmd;
101	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
102	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
103	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
104	return pmd;
105}
106#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
107#endif
108
109#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
110#ifdef CONFIG_TRANSPARENT_HUGEPAGE
111pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
112			   pmd_t *pmdp)
113{
114	pmd_t pmd = pmd_mksplitting(*pmdp);
 
 
 
 
 
115	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
116	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
117	/* tlb flush only to serialize against gup-fast */
 
 
118	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 
119}
 
120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
121#endif