Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Common implementation of switch_mm_irqs_off
  4 *
  5 *  Copyright IBM Corp. 2017
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/cpu.h>
 10#include <linux/sched/mm.h>
 11
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14
 15#if defined(CONFIG_PPC32)
 16static inline void switch_mm_pgdir(struct task_struct *tsk,
 17				   struct mm_struct *mm)
 18{
 19	/* 32-bit keeps track of the current PGDIR in the thread struct */
 20	tsk->thread.pgdir = mm->pgd;
 
 
 
 
 
 
 21}
 22#elif defined(CONFIG_PPC_BOOK3E_64)
 23static inline void switch_mm_pgdir(struct task_struct *tsk,
 24				   struct mm_struct *mm)
 25{
 26	/* 64-bit Book3E keeps track of current PGD in the PACA */
 27	get_paca()->pgd = mm->pgd;
 
 
 
 28}
 29#else
 30static inline void switch_mm_pgdir(struct task_struct *tsk,
 31				   struct mm_struct *mm) { }
 32#endif
 33
 34void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 35			struct task_struct *tsk)
 36{
 
 37	bool new_on_cpu = false;
 38
 39	/* Mark this context has been used on the new CPU */
 40	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
 41		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 
 42		inc_mm_active_cpus(next);
 43
 44		/*
 45		 * This full barrier orders the store to the cpumask above vs
 46		 * a subsequent operation which allows this CPU to begin loading
 47		 * translations for next.
 48		 *
 49		 * When using the radix MMU that operation is the load of the
 50		 * MMU context id, which is then moved to SPRN_PID.
 51		 *
 52		 * For the hash MMU it is either the first load from slb_cache
 53		 * in switch_slb(), and/or the store of paca->mm_ctx_id in
 54		 * copy_mm_to_paca().
 
 55		 *
 56		 * On the other side, the barrier is in mm/tlb-radix.c for
 57		 * radix which orders earlier stores to clear the PTEs vs
 58		 * the load of mm_cpumask. And pte_xchg which does the same
 59		 * thing for hash.
 
 60		 *
 61		 * This full barrier is needed by membarrier when switching
 62		 * between processes after store to rq->curr, before user-space
 63		 * memory accesses.
 64		 */
 65		smp_mb();
 66
 67		new_on_cpu = true;
 68	}
 69
 70	/* Some subarchs need to track the PGD elsewhere */
 71	switch_mm_pgdir(tsk, next);
 72
 73	/* Nothing else to do if we aren't actually switching */
 74	if (prev == next)
 75		return;
 76
 77	/*
 78	 * We must stop all altivec streams before changing the HW
 79	 * context
 80	 */
 81	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 82		asm volatile ("dssall");
 83
 84	if (new_on_cpu)
 85		radix_kvm_prefetch_workaround(next);
 86	else
 87		membarrier_arch_switch_mm(prev, next, tsk);
 88
 89	/*
 90	 * The actual HW switching method differs between the various
 91	 * sub architectures. Out of line for now
 92	 */
 93	switch_mmu_context(prev, next, tsk);
 
 
 94}
 95
 96#ifndef CONFIG_PPC_BOOK3S_64
 97void arch_exit_mmap(struct mm_struct *mm)
 98{
 99	void *frag = pte_frag_get(&mm->context);
100
101	if (frag)
102		pte_frag_destroy(frag);
103}
104#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Common implementation of switch_mm_irqs_off
  4 *
  5 *  Copyright IBM Corp. 2017
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/cpu.h>
 10#include <linux/sched/mm.h>
 11
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14
 15#if defined(CONFIG_PPC32)
 16static inline void switch_mm_pgdir(struct task_struct *tsk,
 17				   struct mm_struct *mm)
 18{
 19	/* 32-bit keeps track of the current PGDIR in the thread struct */
 20	tsk->thread.pgdir = mm->pgd;
 21#ifdef CONFIG_PPC_BOOK3S_32
 22	tsk->thread.sr0 = mm->context.sr0;
 23#endif
 24#if defined(CONFIG_BOOKE) && defined(CONFIG_PPC_KUAP)
 25	tsk->thread.pid = mm->context.id;
 26#endif
 27}
 28#elif defined(CONFIG_PPC_BOOK3E_64)
 29static inline void switch_mm_pgdir(struct task_struct *tsk,
 30				   struct mm_struct *mm)
 31{
 32	/* 64-bit Book3E keeps track of current PGD in the PACA */
 33	get_paca()->pgd = mm->pgd;
 34#ifdef CONFIG_PPC_KUAP
 35	tsk->thread.pid = mm->context.id;
 36#endif
 37}
 38#else
 39static inline void switch_mm_pgdir(struct task_struct *tsk,
 40				   struct mm_struct *mm) { }
 41#endif
 42
 43void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 44			struct task_struct *tsk)
 45{
 46	int cpu = smp_processor_id();
 47	bool new_on_cpu = false;
 48
 49	/* Mark this context has been used on the new CPU */
 50	if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
 51		VM_WARN_ON_ONCE(next == &init_mm);
 52		cpumask_set_cpu(cpu, mm_cpumask(next));
 53		inc_mm_active_cpus(next);
 54
 55		/*
 56		 * This full barrier orders the store to the cpumask above vs
 57		 * a subsequent load which allows this CPU/MMU to begin loading
 58		 * translations for 'next' from page table PTEs into the TLB.
 59		 *
 60		 * When using the radix MMU, that operation is the load of the
 61		 * MMU context id, which is then moved to SPRN_PID.
 62		 *
 63		 * For the hash MMU it is either the first load from slb_cache
 64		 * in switch_slb() to preload the SLBs, or the load of
 65		 * get_user_context which loads the context for the VSID hash
 66		 * to insert a new SLB, in the SLB fault handler.
 67		 *
 68		 * On the other side, the barrier is in mm/tlb-radix.c for
 69		 * radix which orders earlier stores to clear the PTEs before
 70		 * the load of mm_cpumask to check which CPU TLBs should be
 71		 * flushed. For hash, pte_xchg to clear the PTE includes the
 72		 * barrier.
 73		 *
 74		 * This full barrier is also needed by membarrier when
 75		 * switching between processes after store to rq->curr, before
 76		 * user-space memory accesses.
 77		 */
 78		smp_mb();
 79
 80		new_on_cpu = true;
 81	}
 82
 83	/* Some subarchs need to track the PGD elsewhere */
 84	switch_mm_pgdir(tsk, next);
 85
 86	/* Nothing else to do if we aren't actually switching */
 87	if (prev == next)
 88		return;
 89
 90	/*
 91	 * We must stop all altivec streams before changing the HW
 92	 * context
 93	 */
 94	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 95		asm volatile (PPC_DSSALL);
 96
 97	if (!new_on_cpu)
 
 
 98		membarrier_arch_switch_mm(prev, next, tsk);
 99
100	/*
101	 * The actual HW switching method differs between the various
102	 * sub architectures. Out of line for now
103	 */
104	switch_mmu_context(prev, next, tsk);
105
106	VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev)));
107}
108
109#ifndef CONFIG_PPC_BOOK3S_64
110void arch_exit_mmap(struct mm_struct *mm)
111{
112	void *frag = pte_frag_get(&mm->context);
113
114	if (frag)
115		pte_frag_destroy(frag);
116}
117#endif