Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Common implementation of switch_mm_irqs_off
  4 *
  5 *  Copyright IBM Corp. 2017
 
 
 
 
 
 
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/cpu.h>
 10#include <linux/sched/mm.h>
 11
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14
 15#if defined(CONFIG_PPC32)
 16static inline void switch_mm_pgdir(struct task_struct *tsk,
 17				   struct mm_struct *mm)
 18{
 19	/* 32-bit keeps track of the current PGDIR in the thread struct */
 20	tsk->thread.pgdir = mm->pgd;
 21#ifdef CONFIG_PPC_BOOK3S_32
 22	tsk->thread.sr0 = mm->context.sr0;
 23#endif
 24#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
 25	tsk->thread.pid = mm->context.id;
 26#endif
 27}
 28#elif defined(CONFIG_PPC_BOOK3E_64)
 29static inline void switch_mm_pgdir(struct task_struct *tsk,
 30				   struct mm_struct *mm)
 31{
 32	/* 64-bit Book3E keeps track of current PGD in the PACA */
 33	get_paca()->pgd = mm->pgd;
 34#ifdef CONFIG_PPC_KUAP
 35	tsk->thread.pid = mm->context.id;
 36#endif
 37}
 38#else
 39static inline void switch_mm_pgdir(struct task_struct *tsk,
 40				   struct mm_struct *mm) { }
 41#endif
 42
 43void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 44			struct task_struct *tsk)
 45{
 46	int cpu = smp_processor_id();
 47	bool new_on_cpu = false;
 48
 49	/* Mark this context has been used on the new CPU */
 50	if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
 51		VM_WARN_ON_ONCE(next == &init_mm);
 52		cpumask_set_cpu(cpu, mm_cpumask(next));
 53		inc_mm_active_cpus(next);
 54
 55		/*
 56		 * This full barrier orders the store to the cpumask above vs
 57		 * a subsequent load which allows this CPU/MMU to begin loading
 58		 * translations for 'next' from page table PTEs into the TLB.
 59		 *
 60		 * When using the radix MMU, that operation is the load of the
 61		 * MMU context id, which is then moved to SPRN_PID.
 62		 *
 63		 * For the hash MMU it is either the first load from slb_cache
 64		 * in switch_slb() to preload the SLBs, or the load of
 65		 * get_user_context which loads the context for the VSID hash
 66		 * to insert a new SLB, in the SLB fault handler.
 67		 *
 68		 * On the other side, the barrier is in mm/tlb-radix.c for
 69		 * radix which orders earlier stores to clear the PTEs before
 70		 * the load of mm_cpumask to check which CPU TLBs should be
 71		 * flushed. For hash, pte_xchg to clear the PTE includes the
 72		 * barrier.
 73		 *
 74		 * This full barrier is also needed by membarrier when
 75		 * switching between processes after store to rq->curr, before
 76		 * user-space memory accesses.
 77		 */
 78		smp_mb();
 79
 80		new_on_cpu = true;
 81	}
 82
 83	/* Some subarchs need to track the PGD elsewhere */
 84	switch_mm_pgdir(tsk, next);
 85
 86	/* Nothing else to do if we aren't actually switching */
 87	if (prev == next)
 88		return;
 89
 90	/*
 91	 * We must stop all altivec streams before changing the HW
 92	 * context
 93	 */
 94	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 95		asm volatile (PPC_DSSALL);
 96
 97	if (!new_on_cpu)
 
 
 98		membarrier_arch_switch_mm(prev, next, tsk);
 99
100	/*
101	 * The actual HW switching method differs between the various
102	 * sub architectures. Out of line for now
103	 */
104	switch_mmu_context(prev, next, tsk);
105
106	VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(prev)));
107}
108
109#ifndef CONFIG_PPC_BOOK3S_64
110void arch_exit_mmap(struct mm_struct *mm)
111{
112	void *frag = pte_frag_get(&mm->context);
113
114	if (frag)
115		pte_frag_destroy(frag);
116}
117#endif
v4.17
 
 1/*
 2 *  Common implementation of switch_mm_irqs_off
 3 *
 4 *  Copyright IBM Corp. 2017
 5 *
 6 *  This program is free software; you can redistribute it and/or
 7 *  modify it under the terms of the GNU General Public License
 8 *  as published by the Free Software Foundation; either version
 9 *  2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/sched/mm.h>
16
17#include <asm/mmu_context.h>
 
18
19#if defined(CONFIG_PPC32)
20static inline void switch_mm_pgdir(struct task_struct *tsk,
21				   struct mm_struct *mm)
22{
23	/* 32-bit keeps track of the current PGDIR in the thread struct */
24	tsk->thread.pgdir = mm->pgd;
 
 
 
 
 
 
25}
26#elif defined(CONFIG_PPC_BOOK3E_64)
27static inline void switch_mm_pgdir(struct task_struct *tsk,
28				   struct mm_struct *mm)
29{
30	/* 64-bit Book3E keeps track of current PGD in the PACA */
31	get_paca()->pgd = mm->pgd;
 
 
 
32}
33#else
34static inline void switch_mm_pgdir(struct task_struct *tsk,
35				   struct mm_struct *mm) { }
36#endif
37
38void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
39			struct task_struct *tsk)
40{
 
41	bool new_on_cpu = false;
42
43	/* Mark this context has been used on the new CPU */
44	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
45		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 
46		inc_mm_active_cpus(next);
47
48		/*
49		 * This full barrier orders the store to the cpumask above vs
50		 * a subsequent operation which allows this CPU to begin loading
51		 * translations for next.
52		 *
53		 * When using the radix MMU that operation is the load of the
54		 * MMU context id, which is then moved to SPRN_PID.
55		 *
56		 * For the hash MMU it is either the first load from slb_cache
57		 * in switch_slb(), and/or the store of paca->mm_ctx_id in
58		 * copy_mm_to_paca().
 
59		 *
60		 * On the read side the barrier is in pte_xchg(), which orders
61		 * the store to the PTE vs the load of mm_cpumask.
 
 
 
62		 *
63		 * This full barrier is needed by membarrier when switching
64		 * between processes after store to rq->curr, before user-space
65		 * memory accesses.
66		 */
67		smp_mb();
68
69		new_on_cpu = true;
70	}
71
72	/* Some subarchs need to track the PGD elsewhere */
73	switch_mm_pgdir(tsk, next);
74
75	/* Nothing else to do if we aren't actually switching */
76	if (prev == next)
77		return;
78
79	/*
80	 * We must stop all altivec streams before changing the HW
81	 * context
82	 */
83	if (cpu_has_feature(CPU_FTR_ALTIVEC))
84		asm volatile ("dssall");
85
86	if (new_on_cpu)
87		radix_kvm_prefetch_workaround(next);
88	else
89		membarrier_arch_switch_mm(prev, next, tsk);
90
91	/*
92	 * The actual HW switching method differs between the various
93	 * sub architectures. Out of line for now
94	 */
95	switch_mmu_context(prev, next, tsk);
 
 
96}
97