Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * This file contains common routines for dealing with free of page tables
  4 * Along with common page table handling code
  5 *
  6 *  Derived from arch/powerpc/mm/tlb_64.c:
  7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8 *
  9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 11 *    Copyright (C) 1996 Paul Mackerras
 12 *
 13 *  Derived from "arch/i386/mm/init.c"
 14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 15 *
 16 *  Dave Engebretsen <engebret@us.ibm.com>
 17 *      Rework for PPC64 port.
 18 */
 19
 20#include <linux/kernel.h>
 21#include <linux/gfp.h>
 22#include <linux/mm.h>
 23#include <linux/percpu.h>
 24#include <linux/hardirq.h>
 25#include <linux/hugetlb.h>
 26#include <asm/tlbflush.h>
 27#include <asm/tlb.h>
 28#include <asm/hugetlb.h>
 29#include <asm/pte-walk.h>
 30
 31#ifdef CONFIG_PPC64
 32#define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
 33#else
 34#define PGD_ALIGN PAGE_SIZE
 35#endif
 36
 37pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
 38
 39static inline int is_exec_fault(void)
 40{
 41	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
 42}
 43
 44/* We only try to do i/d cache coherency on stuff that looks like
 45 * reasonably "normal" PTEs. We currently require a PTE to be present
 46 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
 47 * on userspace PTEs
 48 */
 49static inline int pte_looks_normal(pte_t pte)
 50{
 51
 52	if (pte_present(pte) && !pte_special(pte)) {
 53		if (pte_ci(pte))
 54			return 0;
 55		if (pte_user(pte))
 56			return 1;
 57	}
 58	return 0;
 59}
 60
 61static struct page *maybe_pte_to_page(pte_t pte)
 62{
 63	unsigned long pfn = pte_pfn(pte);
 64	struct page *page;
 65
 66	if (unlikely(!pfn_valid(pfn)))
 67		return NULL;
 68	page = pfn_to_page(pfn);
 69	if (PageReserved(page))
 70		return NULL;
 71	return page;
 72}
 73
 74#ifdef CONFIG_PPC_BOOK3S
 75
 76/* Server-style MMU handles coherency when hashing if HW exec permission
 77 * is supposed per page (currently 64-bit only). If not, then, we always
 78 * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
 79 * support falls into the same category.
 80 */
 81
 82static pte_t set_pte_filter_hash(pte_t pte)
 83{
 
 
 
 84	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
 85	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
 86				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
 87		struct page *pg = maybe_pte_to_page(pte);
 88		if (!pg)
 89			return pte;
 90		if (!test_bit(PG_dcache_clean, &pg->flags)) {
 91			flush_dcache_icache_page(pg);
 92			set_bit(PG_dcache_clean, &pg->flags);
 93		}
 94	}
 95	return pte;
 96}
 97
 98#else /* CONFIG_PPC_BOOK3S */
 99
100static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
101
102#endif /* CONFIG_PPC_BOOK3S */
103
104/* Embedded type MMU with HW exec support. This is a bit more complicated
105 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
106 * instead we "filter out" the exec permission for non clean pages.
107 */
108static inline pte_t set_pte_filter(pte_t pte)
109{
110	struct page *pg;
111
112	if (radix_enabled())
113		return pte;
114
115	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
116		return set_pte_filter_hash(pte);
117
118	/* No exec permission in the first place, move on */
119	if (!pte_exec(pte) || !pte_looks_normal(pte))
120		return pte;
121
122	/* If you set _PAGE_EXEC on weird pages you're on your own */
123	pg = maybe_pte_to_page(pte);
124	if (unlikely(!pg))
125		return pte;
126
127	/* If the page clean, we move on */
128	if (test_bit(PG_dcache_clean, &pg->flags))
129		return pte;
130
131	/* If it's an exec fault, we flush the cache and make it clean */
132	if (is_exec_fault()) {
133		flush_dcache_icache_page(pg);
134		set_bit(PG_dcache_clean, &pg->flags);
135		return pte;
136	}
137
138	/* Else, we filter out _PAGE_EXEC */
139	return pte_exprotect(pte);
140}
141
142static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
143				     int dirty)
144{
145	struct page *pg;
146
147	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
148		return pte;
149
150	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
151		return pte;
152
153	/* So here, we only care about exec faults, as we use them
154	 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
155	 * if necessary. Also if _PAGE_EXEC is already set, same deal,
156	 * we just bail out
157	 */
158	if (dirty || pte_exec(pte) || !is_exec_fault())
159		return pte;
160
161#ifdef CONFIG_DEBUG_VM
162	/* So this is an exec fault, _PAGE_EXEC is not set. If it was
163	 * an error we would have bailed out earlier in do_page_fault()
164	 * but let's make sure of it
165	 */
166	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
167		return pte;
168#endif /* CONFIG_DEBUG_VM */
169
170	/* If you set _PAGE_EXEC on weird pages you're on your own */
171	pg = maybe_pte_to_page(pte);
172	if (unlikely(!pg))
173		goto bail;
174
175	/* If the page is already clean, we move on */
176	if (test_bit(PG_dcache_clean, &pg->flags))
177		goto bail;
178
179	/* Clean the page and set PG_dcache_clean */
180	flush_dcache_icache_page(pg);
181	set_bit(PG_dcache_clean, &pg->flags);
182
183 bail:
184	return pte_mkexec(pte);
185}
186
187/*
188 * set_pte stores a linux PTE into the linux page table.
189 */
190void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
191		pte_t pte)
192{
193	/*
194	 * Make sure hardware valid bit is not set. We don't do
195	 * tlb flush for this update.
196	 */
197	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
198
 
 
 
199	/* Note: mm->context.id might not yet have been assigned as
200	 * this context might not have been activated yet when this
201	 * is called.
202	 */
203	pte = set_pte_filter(pte);
204
205	/* Perform the setting of the PTE */
206	__set_pte_at(mm, addr, ptep, pte, 0);
207}
208
209void unmap_kernel_page(unsigned long va)
210{
211	pmd_t *pmdp = pmd_off_k(va);
212	pte_t *ptep = pte_offset_kernel(pmdp, va);
213
214	pte_clear(&init_mm, va, ptep);
215	flush_tlb_kernel_range(va, va + PAGE_SIZE);
216}
217
218/*
219 * This is called when relaxing access to a PTE. It's also called in the page
220 * fault path when we don't hit any of the major fault cases, ie, a minor
221 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
222 * handled those two for us, we additionally deal with missing execute
223 * permission here on some processors
224 */
225int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
226			  pte_t *ptep, pte_t entry, int dirty)
227{
228	int changed;
229	entry = set_access_flags_filter(entry, vma, dirty);
230	changed = !pte_same(*(ptep), entry);
231	if (changed) {
232		assert_pte_locked(vma->vm_mm, address);
233		__ptep_set_access_flags(vma, ptep, entry,
234					address, mmu_virtual_psize);
235	}
236	return changed;
237}
238
239#ifdef CONFIG_HUGETLB_PAGE
240int huge_ptep_set_access_flags(struct vm_area_struct *vma,
241			       unsigned long addr, pte_t *ptep,
242			       pte_t pte, int dirty)
243{
244#ifdef HUGETLB_NEED_PRELOAD
245	/*
246	 * The "return 1" forces a call of update_mmu_cache, which will write a
247	 * TLB entry.  Without this, platforms that don't do a write of the TLB
248	 * entry in the TLB miss handler asm will fault ad infinitum.
249	 */
250	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
251	return 1;
252#else
253	int changed, psize;
254
255	pte = set_access_flags_filter(pte, vma, dirty);
256	changed = !pte_same(*(ptep), pte);
257	if (changed) {
258
259#ifdef CONFIG_PPC_BOOK3S_64
260		struct hstate *h = hstate_vma(vma);
261
262		psize = hstate_get_psize(h);
263#ifdef CONFIG_DEBUG_VM
264		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
265#endif
266
267#else
268		/*
269		 * Not used on non book3s64 platforms.
270		 * 8xx compares it with mmu_virtual_psize to
271		 * know if it is a huge page or not.
272		 */
273		psize = MMU_PAGE_COUNT;
274#endif
275		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
276	}
277	return changed;
278#endif
279}
280
281#if defined(CONFIG_PPC_8xx)
282void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
283{
284	pmd_t *pmd = pmd_off(mm, addr);
285	pte_basic_t val;
286	pte_basic_t *entry = (pte_basic_t *)ptep;
287	int num, i;
 
288
289	/*
290	 * Make sure hardware valid bit is not set. We don't do
291	 * tlb flush for this update.
292	 */
293	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
294
 
 
295	pte = set_pte_filter(pte);
296
297	val = pte_val(pte);
298
299	num = number_of_cells_per_pte(pmd, val, 1);
300
301	for (i = 0; i < num; i++, entry++, val += SZ_4K)
302		*entry = val;
303}
304#endif
305#endif /* CONFIG_HUGETLB_PAGE */
306
307#ifdef CONFIG_DEBUG_VM
308void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
309{
310	pgd_t *pgd;
311	p4d_t *p4d;
312	pud_t *pud;
313	pmd_t *pmd;
314
315	if (mm == &init_mm)
316		return;
317	pgd = mm->pgd + pgd_index(addr);
318	BUG_ON(pgd_none(*pgd));
319	p4d = p4d_offset(pgd, addr);
320	BUG_ON(p4d_none(*p4d));
321	pud = pud_offset(p4d, addr);
322	BUG_ON(pud_none(*pud));
323	pmd = pmd_offset(pud, addr);
324	/*
325	 * khugepaged to collapse normal pages to hugepage, first set
326	 * pmd to none to force page fault/gup to take mmap_lock. After
327	 * pmd is set to none, we do a pte_clear which does this assertion
328	 * so if we find pmd none, return.
329	 */
330	if (pmd_none(*pmd))
331		return;
332	BUG_ON(!pmd_present(*pmd));
333	assert_spin_locked(pte_lockptr(mm, pmd));
334}
335#endif /* CONFIG_DEBUG_VM */
336
337unsigned long vmalloc_to_phys(void *va)
338{
339	unsigned long pfn = vmalloc_to_pfn(va);
340
341	BUG_ON(!pfn);
342	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
343}
344EXPORT_SYMBOL_GPL(vmalloc_to_phys);
345
346/*
347 * We have 4 cases for pgds and pmds:
348 * (1) invalid (all zeroes)
349 * (2) pointer to next table, as normal; bottom 6 bits == 0
350 * (3) leaf pte for huge page _PAGE_PTE set
351 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
352 *
353 * So long as we atomically load page table pointers we are safe against teardown,
354 * we can follow the address down to the page and take a ref on it.
355 * This function need to be called with interrupts disabled. We use this variant
356 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
357 */
358pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
359			bool *is_thp, unsigned *hpage_shift)
360{
361	pgd_t *pgdp;
362	p4d_t p4d, *p4dp;
363	pud_t pud, *pudp;
364	pmd_t pmd, *pmdp;
365	pte_t *ret_pte;
366	hugepd_t *hpdp = NULL;
367	unsigned pdshift;
368
369	if (hpage_shift)
370		*hpage_shift = 0;
371
372	if (is_thp)
373		*is_thp = false;
374
375	/*
376	 * Always operate on the local stack value. This make sure the
377	 * value don't get updated by a parallel THP split/collapse,
378	 * page fault or a page unmap. The return pte_t * is still not
379	 * stable. So should be checked there for above conditions.
380	 * Top level is an exception because it is folded into p4d.
381	 */
382	pgdp = pgdir + pgd_index(ea);
383	p4dp = p4d_offset(pgdp, ea);
384	p4d  = READ_ONCE(*p4dp);
385	pdshift = P4D_SHIFT;
386
387	if (p4d_none(p4d))
388		return NULL;
389
390	if (p4d_is_leaf(p4d)) {
391		ret_pte = (pte_t *)p4dp;
392		goto out;
393	}
394
395	if (is_hugepd(__hugepd(p4d_val(p4d)))) {
396		hpdp = (hugepd_t *)&p4d;
397		goto out_huge;
398	}
399
400	/*
401	 * Even if we end up with an unmap, the pgtable will not
402	 * be freed, because we do an rcu free and here we are
403	 * irq disabled
404	 */
405	pdshift = PUD_SHIFT;
406	pudp = pud_offset(&p4d, ea);
407	pud  = READ_ONCE(*pudp);
408
409	if (pud_none(pud))
410		return NULL;
411
412	if (pud_is_leaf(pud)) {
413		ret_pte = (pte_t *)pudp;
414		goto out;
415	}
416
417	if (is_hugepd(__hugepd(pud_val(pud)))) {
418		hpdp = (hugepd_t *)&pud;
419		goto out_huge;
420	}
421
422	pdshift = PMD_SHIFT;
423	pmdp = pmd_offset(&pud, ea);
424	pmd  = READ_ONCE(*pmdp);
425
426	/*
427	 * A hugepage collapse is captured by this condition, see
428	 * pmdp_collapse_flush.
429	 */
430	if (pmd_none(pmd))
431		return NULL;
432
433#ifdef CONFIG_PPC_BOOK3S_64
434	/*
435	 * A hugepage split is captured by this condition, see
436	 * pmdp_invalidate.
437	 *
438	 * Huge page modification can be caught here too.
439	 */
440	if (pmd_is_serializing(pmd))
441		return NULL;
442#endif
443
444	if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
445		if (is_thp)
446			*is_thp = true;
447		ret_pte = (pte_t *)pmdp;
448		goto out;
449	}
450
451	if (pmd_is_leaf(pmd)) {
452		ret_pte = (pte_t *)pmdp;
453		goto out;
454	}
455
456	if (is_hugepd(__hugepd(pmd_val(pmd)))) {
457		hpdp = (hugepd_t *)&pmd;
458		goto out_huge;
459	}
460
461	return pte_offset_kernel(&pmd, ea);
462
463out_huge:
464	if (!hpdp)
465		return NULL;
466
467	ret_pte = hugepte_offset(*hpdp, ea, pdshift);
468	pdshift = hugepd_shift(*hpdp);
469out:
470	if (hpage_shift)
471		*hpage_shift = pdshift;
472	return ret_pte;
473}
474EXPORT_SYMBOL_GPL(__find_linux_pte);
475
476/* Note due to the way vm flags are laid out, the bits are XWR */
477const pgprot_t protection_map[16] = {
478	[VM_NONE]					= PAGE_NONE,
479	[VM_READ]					= PAGE_READONLY,
480	[VM_WRITE]					= PAGE_COPY,
481	[VM_WRITE | VM_READ]				= PAGE_COPY,
482	[VM_EXEC]					= PAGE_READONLY_X,
483	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
484	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
485	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
486	[VM_SHARED]					= PAGE_NONE,
487	[VM_SHARED | VM_READ]				= PAGE_READONLY,
488	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
489	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
490	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
491	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
492	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
493	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
494};
495
496#ifndef CONFIG_PPC_BOOK3S_64
497DECLARE_VM_GET_PAGE_PROT
498#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * This file contains common routines for dealing with free of page tables
  4 * Along with common page table handling code
  5 *
  6 *  Derived from arch/powerpc/mm/tlb_64.c:
  7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8 *
  9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 11 *    Copyright (C) 1996 Paul Mackerras
 12 *
 13 *  Derived from "arch/i386/mm/init.c"
 14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 15 *
 16 *  Dave Engebretsen <engebret@us.ibm.com>
 17 *      Rework for PPC64 port.
 18 */
 19
 20#include <linux/kernel.h>
 21#include <linux/gfp.h>
 22#include <linux/mm.h>
 23#include <linux/percpu.h>
 24#include <linux/hardirq.h>
 25#include <linux/hugetlb.h>
 26#include <asm/tlbflush.h>
 27#include <asm/tlb.h>
 28#include <asm/hugetlb.h>
 
 
 
 
 
 
 
 
 
 29
 30static inline int is_exec_fault(void)
 31{
 32	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
 33}
 34
 35/* We only try to do i/d cache coherency on stuff that looks like
 36 * reasonably "normal" PTEs. We currently require a PTE to be present
 37 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
 38 * on userspace PTEs
 39 */
 40static inline int pte_looks_normal(pte_t pte)
 41{
 42
 43	if (pte_present(pte) && !pte_special(pte)) {
 44		if (pte_ci(pte))
 45			return 0;
 46		if (pte_user(pte))
 47			return 1;
 48	}
 49	return 0;
 50}
 51
 52static struct page *maybe_pte_to_page(pte_t pte)
 53{
 54	unsigned long pfn = pte_pfn(pte);
 55	struct page *page;
 56
 57	if (unlikely(!pfn_valid(pfn)))
 58		return NULL;
 59	page = pfn_to_page(pfn);
 60	if (PageReserved(page))
 61		return NULL;
 62	return page;
 63}
 64
 65#ifdef CONFIG_PPC_BOOK3S
 66
 67/* Server-style MMU handles coherency when hashing if HW exec permission
 68 * is supposed per page (currently 64-bit only). If not, then, we always
 69 * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
 70 * support falls into the same category.
 71 */
 72
 73static pte_t set_pte_filter_hash(pte_t pte)
 74{
 75	if (radix_enabled())
 76		return pte;
 77
 78	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
 79	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
 80				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
 81		struct page *pg = maybe_pte_to_page(pte);
 82		if (!pg)
 83			return pte;
 84		if (!test_bit(PG_arch_1, &pg->flags)) {
 85			flush_dcache_icache_page(pg);
 86			set_bit(PG_arch_1, &pg->flags);
 87		}
 88	}
 89	return pte;
 90}
 91
 92#else /* CONFIG_PPC_BOOK3S */
 93
 94static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
 95
 96#endif /* CONFIG_PPC_BOOK3S */
 97
 98/* Embedded type MMU with HW exec support. This is a bit more complicated
 99 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
100 * instead we "filter out" the exec permission for non clean pages.
101 */
102static inline pte_t set_pte_filter(pte_t pte)
103{
104	struct page *pg;
105
 
 
 
106	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
107		return set_pte_filter_hash(pte);
108
109	/* No exec permission in the first place, move on */
110	if (!pte_exec(pte) || !pte_looks_normal(pte))
111		return pte;
112
113	/* If you set _PAGE_EXEC on weird pages you're on your own */
114	pg = maybe_pte_to_page(pte);
115	if (unlikely(!pg))
116		return pte;
117
118	/* If the page clean, we move on */
119	if (test_bit(PG_arch_1, &pg->flags))
120		return pte;
121
122	/* If it's an exec fault, we flush the cache and make it clean */
123	if (is_exec_fault()) {
124		flush_dcache_icache_page(pg);
125		set_bit(PG_arch_1, &pg->flags);
126		return pte;
127	}
128
129	/* Else, we filter out _PAGE_EXEC */
130	return pte_exprotect(pte);
131}
132
133static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
134				     int dirty)
135{
136	struct page *pg;
137
 
 
 
138	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
139		return pte;
140
141	/* So here, we only care about exec faults, as we use them
142	 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
143	 * if necessary. Also if _PAGE_EXEC is already set, same deal,
144	 * we just bail out
145	 */
146	if (dirty || pte_exec(pte) || !is_exec_fault())
147		return pte;
148
149#ifdef CONFIG_DEBUG_VM
150	/* So this is an exec fault, _PAGE_EXEC is not set. If it was
151	 * an error we would have bailed out earlier in do_page_fault()
152	 * but let's make sure of it
153	 */
154	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
155		return pte;
156#endif /* CONFIG_DEBUG_VM */
157
158	/* If you set _PAGE_EXEC on weird pages you're on your own */
159	pg = maybe_pte_to_page(pte);
160	if (unlikely(!pg))
161		goto bail;
162
163	/* If the page is already clean, we move on */
164	if (test_bit(PG_arch_1, &pg->flags))
165		goto bail;
166
167	/* Clean the page and set PG_arch_1 */
168	flush_dcache_icache_page(pg);
169	set_bit(PG_arch_1, &pg->flags);
170
171 bail:
172	return pte_mkexec(pte);
173}
174
175/*
176 * set_pte stores a linux PTE into the linux page table.
177 */
178void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
179		pte_t pte)
180{
181	/*
182	 * Make sure hardware valid bit is not set. We don't do
183	 * tlb flush for this update.
184	 */
185	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
186
187	/* Add the pte bit when trying to set a pte */
188	pte = pte_mkpte(pte);
189
190	/* Note: mm->context.id might not yet have been assigned as
191	 * this context might not have been activated yet when this
192	 * is called.
193	 */
194	pte = set_pte_filter(pte);
195
196	/* Perform the setting of the PTE */
197	__set_pte_at(mm, addr, ptep, pte, 0);
198}
199
 
 
 
 
 
 
 
 
 
200/*
201 * This is called when relaxing access to a PTE. It's also called in the page
202 * fault path when we don't hit any of the major fault cases, ie, a minor
203 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
204 * handled those two for us, we additionally deal with missing execute
205 * permission here on some processors
206 */
207int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
208			  pte_t *ptep, pte_t entry, int dirty)
209{
210	int changed;
211	entry = set_access_flags_filter(entry, vma, dirty);
212	changed = !pte_same(*(ptep), entry);
213	if (changed) {
214		assert_pte_locked(vma->vm_mm, address);
215		__ptep_set_access_flags(vma, ptep, entry,
216					address, mmu_virtual_psize);
217	}
218	return changed;
219}
220
221#ifdef CONFIG_HUGETLB_PAGE
222int huge_ptep_set_access_flags(struct vm_area_struct *vma,
223			       unsigned long addr, pte_t *ptep,
224			       pte_t pte, int dirty)
225{
226#ifdef HUGETLB_NEED_PRELOAD
227	/*
228	 * The "return 1" forces a call of update_mmu_cache, which will write a
229	 * TLB entry.  Without this, platforms that don't do a write of the TLB
230	 * entry in the TLB miss handler asm will fault ad infinitum.
231	 */
232	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
233	return 1;
234#else
235	int changed, psize;
236
237	pte = set_access_flags_filter(pte, vma, dirty);
238	changed = !pte_same(*(ptep), pte);
239	if (changed) {
240
241#ifdef CONFIG_PPC_BOOK3S_64
242		struct hstate *h = hstate_vma(vma);
243
244		psize = hstate_get_psize(h);
245#ifdef CONFIG_DEBUG_VM
246		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
247#endif
248
249#else
250		/*
251		 * Not used on non book3s64 platforms.
252		 * 8xx compares it with mmu_virtual_psize to
253		 * know if it is a huge page or not.
254		 */
255		psize = MMU_PAGE_COUNT;
256#endif
257		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
258	}
259	return changed;
260#endif
261}
262
263#if defined(CONFIG_PPC_8xx)
264void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
265{
266	pmd_t *pmd = pmd_off(mm, addr);
267	pte_basic_t val;
268	pte_basic_t *entry = &ptep->pte;
269	int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
270	int i;
271
272	/*
273	 * Make sure hardware valid bit is not set. We don't do
274	 * tlb flush for this update.
275	 */
276	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
277
278	pte = pte_mkpte(pte);
279
280	pte = set_pte_filter(pte);
281
282	val = pte_val(pte);
 
 
 
283	for (i = 0; i < num; i++, entry++, val += SZ_4K)
284		*entry = val;
285}
286#endif
287#endif /* CONFIG_HUGETLB_PAGE */
288
289#ifdef CONFIG_DEBUG_VM
290void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
291{
292	pgd_t *pgd;
293	p4d_t *p4d;
294	pud_t *pud;
295	pmd_t *pmd;
296
297	if (mm == &init_mm)
298		return;
299	pgd = mm->pgd + pgd_index(addr);
300	BUG_ON(pgd_none(*pgd));
301	p4d = p4d_offset(pgd, addr);
302	BUG_ON(p4d_none(*p4d));
303	pud = pud_offset(p4d, addr);
304	BUG_ON(pud_none(*pud));
305	pmd = pmd_offset(pud, addr);
306	/*
307	 * khugepaged to collapse normal pages to hugepage, first set
308	 * pmd to none to force page fault/gup to take mmap_lock. After
309	 * pmd is set to none, we do a pte_clear which does this assertion
310	 * so if we find pmd none, return.
311	 */
312	if (pmd_none(*pmd))
313		return;
314	BUG_ON(!pmd_present(*pmd));
315	assert_spin_locked(pte_lockptr(mm, pmd));
316}
317#endif /* CONFIG_DEBUG_VM */
318
319unsigned long vmalloc_to_phys(void *va)
320{
321	unsigned long pfn = vmalloc_to_pfn(va);
322
323	BUG_ON(!pfn);
324	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
325}
326EXPORT_SYMBOL_GPL(vmalloc_to_phys);
327
328/*
329 * We have 4 cases for pgds and pmds:
330 * (1) invalid (all zeroes)
331 * (2) pointer to next table, as normal; bottom 6 bits == 0
332 * (3) leaf pte for huge page _PAGE_PTE set
333 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
334 *
335 * So long as we atomically load page table pointers we are safe against teardown,
336 * we can follow the address down to the the page and take a ref on it.
337 * This function need to be called with interrupts disabled. We use this variant
338 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
339 */
340pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
341			bool *is_thp, unsigned *hpage_shift)
342{
343	pgd_t *pgdp;
344	p4d_t p4d, *p4dp;
345	pud_t pud, *pudp;
346	pmd_t pmd, *pmdp;
347	pte_t *ret_pte;
348	hugepd_t *hpdp = NULL;
349	unsigned pdshift;
350
351	if (hpage_shift)
352		*hpage_shift = 0;
353
354	if (is_thp)
355		*is_thp = false;
356
357	/*
358	 * Always operate on the local stack value. This make sure the
359	 * value don't get updated by a parallel THP split/collapse,
360	 * page fault or a page unmap. The return pte_t * is still not
361	 * stable. So should be checked there for above conditions.
362	 * Top level is an exception because it is folded into p4d.
363	 */
364	pgdp = pgdir + pgd_index(ea);
365	p4dp = p4d_offset(pgdp, ea);
366	p4d  = READ_ONCE(*p4dp);
367	pdshift = P4D_SHIFT;
368
369	if (p4d_none(p4d))
370		return NULL;
371
372	if (p4d_is_leaf(p4d)) {
373		ret_pte = (pte_t *)p4dp;
374		goto out;
375	}
376
377	if (is_hugepd(__hugepd(p4d_val(p4d)))) {
378		hpdp = (hugepd_t *)&p4d;
379		goto out_huge;
380	}
381
382	/*
383	 * Even if we end up with an unmap, the pgtable will not
384	 * be freed, because we do an rcu free and here we are
385	 * irq disabled
386	 */
387	pdshift = PUD_SHIFT;
388	pudp = pud_offset(&p4d, ea);
389	pud  = READ_ONCE(*pudp);
390
391	if (pud_none(pud))
392		return NULL;
393
394	if (pud_is_leaf(pud)) {
395		ret_pte = (pte_t *)pudp;
396		goto out;
397	}
398
399	if (is_hugepd(__hugepd(pud_val(pud)))) {
400		hpdp = (hugepd_t *)&pud;
401		goto out_huge;
402	}
403
404	pdshift = PMD_SHIFT;
405	pmdp = pmd_offset(&pud, ea);
406	pmd  = READ_ONCE(*pmdp);
407
408	/*
409	 * A hugepage collapse is captured by this condition, see
410	 * pmdp_collapse_flush.
411	 */
412	if (pmd_none(pmd))
413		return NULL;
414
415#ifdef CONFIG_PPC_BOOK3S_64
416	/*
417	 * A hugepage split is captured by this condition, see
418	 * pmdp_invalidate.
419	 *
420	 * Huge page modification can be caught here too.
421	 */
422	if (pmd_is_serializing(pmd))
423		return NULL;
424#endif
425
426	if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
427		if (is_thp)
428			*is_thp = true;
429		ret_pte = (pte_t *)pmdp;
430		goto out;
431	}
432
433	if (pmd_is_leaf(pmd)) {
434		ret_pte = (pte_t *)pmdp;
435		goto out;
436	}
437
438	if (is_hugepd(__hugepd(pmd_val(pmd)))) {
439		hpdp = (hugepd_t *)&pmd;
440		goto out_huge;
441	}
442
443	return pte_offset_kernel(&pmd, ea);
444
445out_huge:
446	if (!hpdp)
447		return NULL;
448
449	ret_pte = hugepte_offset(*hpdp, ea, pdshift);
450	pdshift = hugepd_shift(*hpdp);
451out:
452	if (hpage_shift)
453		*hpage_shift = pdshift;
454	return ret_pte;
455}
456EXPORT_SYMBOL_GPL(__find_linux_pte);