Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Common implementation of switch_mm_irqs_off
  4 *
  5 *  Copyright IBM Corp. 2017
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/cpu.h>
 10#include <linux/sched/mm.h>
 11
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14
 15#if defined(CONFIG_PPC32)
 16static inline void switch_mm_pgdir(struct task_struct *tsk,
 17				   struct mm_struct *mm)
 18{
 19	/* 32-bit keeps track of the current PGDIR in the thread struct */
 20	tsk->thread.pgdir = mm->pgd;
 21#ifdef CONFIG_PPC_BOOK3S_32
 22	tsk->thread.sr0 = mm->context.sr0;
 23#endif
 24#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
 25	tsk->thread.pid = mm->context.id;
 26#endif
 27}
 28#elif defined(CONFIG_PPC_BOOK3E_64)
 29static inline void switch_mm_pgdir(struct task_struct *tsk,
 30				   struct mm_struct *mm)
 31{
 32	/* 64-bit Book3E keeps track of current PGD in the PACA */
 33	get_paca()->pgd = mm->pgd;
 34#ifdef CONFIG_PPC_KUAP
 35	tsk->thread.pid = mm->context.id;
 36#endif
 37}
 38#else
 39static inline void switch_mm_pgdir(struct task_struct *tsk,
 40				   struct mm_struct *mm) { }
 41#endif
 42
 43void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 44			struct task_struct *tsk)
 45{
 46	bool new_on_cpu = false;
 47
 48	/* Mark this context has been used on the new CPU */
 49	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
 50		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 51		inc_mm_active_cpus(next);
 52
 53		/*
 54		 * This full barrier orders the store to the cpumask above vs
 55		 * a subsequent load which allows this CPU/MMU to begin loading
 56		 * translations for 'next' from page table PTEs into the TLB.
 57		 *
 58		 * When using the radix MMU, that operation is the load of the
 59		 * MMU context id, which is then moved to SPRN_PID.
 60		 *
 61		 * For the hash MMU it is either the first load from slb_cache
 62		 * in switch_slb() to preload the SLBs, or the load of
 63		 * get_user_context which loads the context for the VSID hash
 64		 * to insert a new SLB, in the SLB fault handler.
 65		 *
 66		 * On the other side, the barrier is in mm/tlb-radix.c for
 67		 * radix which orders earlier stores to clear the PTEs before
 68		 * the load of mm_cpumask to check which CPU TLBs should be
 69		 * flushed. For hash, pte_xchg to clear the PTE includes the
 70		 * barrier.
 71		 *
 72		 * This full barrier is also needed by membarrier when
 73		 * switching between processes after store to rq->curr, before
 74		 * user-space memory accesses.
 75		 */
 76		smp_mb();
 77
 78		new_on_cpu = true;
 79	}
 80
 81	/* Some subarchs need to track the PGD elsewhere */
 82	switch_mm_pgdir(tsk, next);
 83
 84	/* Nothing else to do if we aren't actually switching */
 85	if (prev == next)
 86		return;
 87
 88	/*
 89	 * We must stop all altivec streams before changing the HW
 90	 * context
 91	 */
 92	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 93		asm volatile (PPC_DSSALL);
 94
 95	if (!new_on_cpu)
 
 
 96		membarrier_arch_switch_mm(prev, next, tsk);
 97
 98	/*
 99	 * The actual HW switching method differs between the various
100	 * sub architectures. Out of line for now
101	 */
102	switch_mmu_context(prev, next, tsk);
103}
104
105#ifndef CONFIG_PPC_BOOK3S_64
106void arch_exit_mmap(struct mm_struct *mm)
107{
108	void *frag = pte_frag_get(&mm->context);
109
110	if (frag)
111		pte_frag_destroy(frag);
112}
113#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Common implementation of switch_mm_irqs_off
  4 *
  5 *  Copyright IBM Corp. 2017
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/cpu.h>
 10#include <linux/sched/mm.h>
 11
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14
 15#if defined(CONFIG_PPC32)
 16static inline void switch_mm_pgdir(struct task_struct *tsk,
 17				   struct mm_struct *mm)
 18{
 19	/* 32-bit keeps track of the current PGDIR in the thread struct */
 20	tsk->thread.pgdir = mm->pgd;
 
 
 
 
 
 
 21}
 22#elif defined(CONFIG_PPC_BOOK3E_64)
 23static inline void switch_mm_pgdir(struct task_struct *tsk,
 24				   struct mm_struct *mm)
 25{
 26	/* 64-bit Book3E keeps track of current PGD in the PACA */
 27	get_paca()->pgd = mm->pgd;
 
 
 
 28}
 29#else
 30static inline void switch_mm_pgdir(struct task_struct *tsk,
 31				   struct mm_struct *mm) { }
 32#endif
 33
 34void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 35			struct task_struct *tsk)
 36{
 37	bool new_on_cpu = false;
 38
 39	/* Mark this context has been used on the new CPU */
 40	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
 41		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 42		inc_mm_active_cpus(next);
 43
 44		/*
 45		 * This full barrier orders the store to the cpumask above vs
 46		 * a subsequent operation which allows this CPU to begin loading
 47		 * translations for next.
 48		 *
 49		 * When using the radix MMU that operation is the load of the
 50		 * MMU context id, which is then moved to SPRN_PID.
 51		 *
 52		 * For the hash MMU it is either the first load from slb_cache
 53		 * in switch_slb(), and/or the store of paca->mm_ctx_id in
 54		 * copy_mm_to_paca().
 
 55		 *
 56		 * On the other side, the barrier is in mm/tlb-radix.c for
 57		 * radix which orders earlier stores to clear the PTEs vs
 58		 * the load of mm_cpumask. And pte_xchg which does the same
 59		 * thing for hash.
 
 60		 *
 61		 * This full barrier is needed by membarrier when switching
 62		 * between processes after store to rq->curr, before user-space
 63		 * memory accesses.
 64		 */
 65		smp_mb();
 66
 67		new_on_cpu = true;
 68	}
 69
 70	/* Some subarchs need to track the PGD elsewhere */
 71	switch_mm_pgdir(tsk, next);
 72
 73	/* Nothing else to do if we aren't actually switching */
 74	if (prev == next)
 75		return;
 76
 77	/*
 78	 * We must stop all altivec streams before changing the HW
 79	 * context
 80	 */
 81	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 82		asm volatile ("dssall");
 83
 84	if (new_on_cpu)
 85		radix_kvm_prefetch_workaround(next);
 86	else
 87		membarrier_arch_switch_mm(prev, next, tsk);
 88
 89	/*
 90	 * The actual HW switching method differs between the various
 91	 * sub architectures. Out of line for now
 92	 */
 93	switch_mmu_context(prev, next, tsk);
 94}
 95
 96#ifndef CONFIG_PPC_BOOK3S_64
 97void arch_exit_mmap(struct mm_struct *mm)
 98{
 99	void *frag = pte_frag_get(&mm->context);
100
101	if (frag)
102		pte_frag_destroy(frag);
103}
104#endif