Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  linux/arch/arm/mm/context.c
  3 *
  4 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  5 *  Copyright (C) 2012 ARM Limited
  6 *
  7 *  Author: Will Deacon <will.deacon@arm.com>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 */
 13#include <linux/init.h>
 14#include <linux/sched.h>
 15#include <linux/mm.h>
 16#include <linux/smp.h>
 17#include <linux/percpu.h>
 18
 19#include <asm/mmu_context.h>
 20#include <asm/smp_plat.h>
 21#include <asm/thread_notify.h>
 22#include <asm/tlbflush.h>
 23#include <asm/proc-fns.h>
 24
 25/*
 26 * On ARMv6, we have the following structure in the Context ID:
 27 *
 28 * 31                         7          0
 29 * +-------------------------+-----------+
 30 * |      process ID         |   ASID    |
 31 * +-------------------------+-----------+
 32 * |              context ID             |
 33 * +-------------------------------------+
 34 *
 35 * The ASID is used to tag entries in the CPU caches and TLBs.
 36 * The context ID is used by debuggers and trace logic, and
 37 * should be unique within all running processes.
 38 *
 39 * In big endian operation, the two 32 bit words are swapped if accessed
 40 * by non-64-bit operations.
 41 */
 42#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
 43#define NUM_USER_ASIDS		ASID_FIRST_VERSION
 44
 45static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 46static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 47static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 48
 49static DEFINE_PER_CPU(atomic64_t, active_asids);
 50static DEFINE_PER_CPU(u64, reserved_asids);
 51static cpumask_t tlb_flush_pending;
 52
 53#ifdef CONFIG_ARM_ERRATA_798181
 54void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
 55			     cpumask_t *mask)
 56{
 57	int cpu;
 58	unsigned long flags;
 59	u64 context_id, asid;
 60
 61	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 62	context_id = mm->context.id.counter;
 63	for_each_online_cpu(cpu) {
 64		if (cpu == this_cpu)
 65			continue;
 66		/*
 67		 * We only need to send an IPI if the other CPUs are
 68		 * running the same ASID as the one being invalidated.
 69		 */
 70		asid = per_cpu(active_asids, cpu).counter;
 71		if (asid == 0)
 72			asid = per_cpu(reserved_asids, cpu);
 73		if (context_id == asid)
 74			cpumask_set_cpu(cpu, mask);
 75	}
 76	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 77}
 78#endif
 79
 80#ifdef CONFIG_ARM_LPAE
 81/*
 82 * With LPAE, the ASID and page tables are updated atomicly, so there is
 83 * no need for a reserved set of tables (the active ASID tracking prevents
 84 * any issues across a rollover).
 85 */
 86#define cpu_set_reserved_ttbr0()
 87#else
 88static void cpu_set_reserved_ttbr0(void)
 89{
 90	u32 ttb;
 91	/*
 92	 * Copy TTBR1 into TTBR0.
 93	 * This points at swapper_pg_dir, which contains only global
 94	 * entries so any speculative walks are perfectly safe.
 95	 */
 96	asm volatile(
 97	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
 98	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
 99	: "=r" (ttb));
100	isb();
101}
102#endif
103
104#ifdef CONFIG_PID_IN_CONTEXTIDR
105static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
106			       void *t)
107{
108	u32 contextidr;
109	pid_t pid;
110	struct thread_info *thread = t;
111
112	if (cmd != THREAD_NOTIFY_SWITCH)
113		return NOTIFY_DONE;
114
115	pid = task_pid_nr(thread->task) << ASID_BITS;
116	asm volatile(
117	"	mrc	p15, 0, %0, c13, c0, 1\n"
118	"	and	%0, %0, %2\n"
119	"	orr	%0, %0, %1\n"
120	"	mcr	p15, 0, %0, c13, c0, 1\n"
121	: "=r" (contextidr), "+r" (pid)
122	: "I" (~ASID_MASK));
123	isb();
124
125	return NOTIFY_OK;
126}
127
128static struct notifier_block contextidr_notifier_block = {
129	.notifier_call = contextidr_notifier,
130};
131
132static int __init contextidr_notifier_init(void)
133{
134	return thread_register_notifier(&contextidr_notifier_block);
135}
136arch_initcall(contextidr_notifier_init);
137#endif
138
139static void flush_context(unsigned int cpu)
140{
141	int i;
142	u64 asid;
143
144	/* Update the list of reserved ASIDs and the ASID bitmap. */
145	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146	for_each_possible_cpu(i) {
147		asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148		/*
149		 * If this CPU has already been through a
150		 * rollover, but hasn't run another task in
151		 * the meantime, we must preserve its reserved
152		 * ASID, as this is the only trace we have of
153		 * the process it is still running.
154		 */
155		if (asid == 0)
156			asid = per_cpu(reserved_asids, i);
157		__set_bit(asid & ~ASID_MASK, asid_map);
158		per_cpu(reserved_asids, i) = asid;
159	}
160
161	/* Queue a TLB invalidate and flush the I-cache if necessary. */
162	cpumask_setall(&tlb_flush_pending);
163
164	if (icache_is_vivt_asid_tagged())
165		__flush_icache_all();
166}
167
168static bool check_update_reserved_asid(u64 asid, u64 newasid)
169{
170	int cpu;
171	bool hit = false;
172
173	/*
174	 * Iterate over the set of reserved ASIDs looking for a match.
175	 * If we find one, then we can update our mm to use newasid
176	 * (i.e. the same ASID in the current generation) but we can't
177	 * exit the loop early, since we need to ensure that all copies
178	 * of the old ASID are updated to reflect the mm. Failure to do
179	 * so could result in us missing the reserved ASID in a future
180	 * generation.
181	 */
182	for_each_possible_cpu(cpu) {
183		if (per_cpu(reserved_asids, cpu) == asid) {
184			hit = true;
185			per_cpu(reserved_asids, cpu) = newasid;
186		}
187	}
188
189	return hit;
190}
191
192static u64 new_context(struct mm_struct *mm, unsigned int cpu)
193{
194	static u32 cur_idx = 1;
195	u64 asid = atomic64_read(&mm->context.id);
196	u64 generation = atomic64_read(&asid_generation);
197
198	if (asid != 0) {
199		u64 newasid = generation | (asid & ~ASID_MASK);
200
201		/*
202		 * If our current ASID was active during a rollover, we
203		 * can continue to use it and this was just a false alarm.
204		 */
205		if (check_update_reserved_asid(asid, newasid))
206			return newasid;
207
208		/*
209		 * We had a valid ASID in a previous life, so try to re-use
210		 * it if possible.,
211		 */
212		asid &= ~ASID_MASK;
213		if (!__test_and_set_bit(asid, asid_map))
214			return newasid;
215	}
216
217	/*
218	 * Allocate a free ASID. If we can't find one, take a note of the
219	 * currently active ASIDs and mark the TLBs as requiring flushes.
220	 * We always count from ASID #1, as we reserve ASID #0 to switch
221	 * via TTBR0 and to avoid speculative page table walks from hitting
222	 * in any partial walk caches, which could be populated from
223	 * overlapping level-1 descriptors used to map both the module
224	 * area and the userspace stack.
225	 */
226	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
227	if (asid == NUM_USER_ASIDS) {
228		generation = atomic64_add_return(ASID_FIRST_VERSION,
229						 &asid_generation);
230		flush_context(cpu);
231		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
232	}
233
234	__set_bit(asid, asid_map);
235	cur_idx = asid;
236	cpumask_clear(mm_cpumask(mm));
237	return asid | generation;
238}
239
240void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
241{
242	unsigned long flags;
243	unsigned int cpu = smp_processor_id();
244	u64 asid;
245
246	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
247		__check_vmalloc_seq(mm);
248
249	/*
250	 * We cannot update the pgd and the ASID atomicly with classic
251	 * MMU, so switch exclusively to global mappings to avoid
252	 * speculative page table walking with the wrong TTBR.
253	 */
254	cpu_set_reserved_ttbr0();
255
256	asid = atomic64_read(&mm->context.id);
257	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
258	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
259		goto switch_mm_fastpath;
260
261	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
262	/* Check that our ASID belongs to the current generation. */
263	asid = atomic64_read(&mm->context.id);
264	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
265		asid = new_context(mm, cpu);
266		atomic64_set(&mm->context.id, asid);
267	}
268
269	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
270		local_flush_bp_all();
271		local_flush_tlb_all();
272	}
273
274	atomic64_set(&per_cpu(active_asids, cpu), asid);
275	cpumask_set_cpu(cpu, mm_cpumask(mm));
276	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
277
278switch_mm_fastpath:
279	cpu_switch_mm(mm->pgd, mm);
280}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/context.c
  4 *
  5 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  6 *  Copyright (C) 2012 ARM Limited
  7 *
  8 *  Author: Will Deacon <will.deacon@arm.com>
 
 
 
 
  9 */
 10#include <linux/init.h>
 11#include <linux/sched.h>
 12#include <linux/mm.h>
 13#include <linux/smp.h>
 14#include <linux/percpu.h>
 15
 16#include <asm/mmu_context.h>
 17#include <asm/smp_plat.h>
 18#include <asm/thread_notify.h>
 19#include <asm/tlbflush.h>
 20#include <asm/proc-fns.h>
 21
 22/*
 23 * On ARMv6, we have the following structure in the Context ID:
 24 *
 25 * 31                         7          0
 26 * +-------------------------+-----------+
 27 * |      process ID         |   ASID    |
 28 * +-------------------------+-----------+
 29 * |              context ID             |
 30 * +-------------------------------------+
 31 *
 32 * The ASID is used to tag entries in the CPU caches and TLBs.
 33 * The context ID is used by debuggers and trace logic, and
 34 * should be unique within all running processes.
 35 *
 36 * In big endian operation, the two 32 bit words are swapped if accessed
 37 * by non-64-bit operations.
 38 */
 39#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
 40#define NUM_USER_ASIDS		ASID_FIRST_VERSION
 41
 42static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 43static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 44static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 45
 46static DEFINE_PER_CPU(atomic64_t, active_asids);
 47static DEFINE_PER_CPU(u64, reserved_asids);
 48static cpumask_t tlb_flush_pending;
 49
 50#ifdef CONFIG_ARM_ERRATA_798181
 51void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
 52			     cpumask_t *mask)
 53{
 54	int cpu;
 55	unsigned long flags;
 56	u64 context_id, asid;
 57
 58	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 59	context_id = mm->context.id.counter;
 60	for_each_online_cpu(cpu) {
 61		if (cpu == this_cpu)
 62			continue;
 63		/*
 64		 * We only need to send an IPI if the other CPUs are
 65		 * running the same ASID as the one being invalidated.
 66		 */
 67		asid = per_cpu(active_asids, cpu).counter;
 68		if (asid == 0)
 69			asid = per_cpu(reserved_asids, cpu);
 70		if (context_id == asid)
 71			cpumask_set_cpu(cpu, mask);
 72	}
 73	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 74}
 75#endif
 76
 77#ifdef CONFIG_ARM_LPAE
 78/*
 79 * With LPAE, the ASID and page tables are updated atomicly, so there is
 80 * no need for a reserved set of tables (the active ASID tracking prevents
 81 * any issues across a rollover).
 82 */
 83#define cpu_set_reserved_ttbr0()
 84#else
 85static void cpu_set_reserved_ttbr0(void)
 86{
 87	u32 ttb;
 88	/*
 89	 * Copy TTBR1 into TTBR0.
 90	 * This points at swapper_pg_dir, which contains only global
 91	 * entries so any speculative walks are perfectly safe.
 92	 */
 93	asm volatile(
 94	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
 95	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
 96	: "=r" (ttb));
 97	isb();
 98}
 99#endif
100
101#ifdef CONFIG_PID_IN_CONTEXTIDR
102static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
103			       void *t)
104{
105	u32 contextidr;
106	pid_t pid;
107	struct thread_info *thread = t;
108
109	if (cmd != THREAD_NOTIFY_SWITCH)
110		return NOTIFY_DONE;
111
112	pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
113	asm volatile(
114	"	mrc	p15, 0, %0, c13, c0, 1\n"
115	"	and	%0, %0, %2\n"
116	"	orr	%0, %0, %1\n"
117	"	mcr	p15, 0, %0, c13, c0, 1\n"
118	: "=r" (contextidr), "+r" (pid)
119	: "I" (~ASID_MASK));
120	isb();
121
122	return NOTIFY_OK;
123}
124
125static struct notifier_block contextidr_notifier_block = {
126	.notifier_call = contextidr_notifier,
127};
128
129static int __init contextidr_notifier_init(void)
130{
131	return thread_register_notifier(&contextidr_notifier_block);
132}
133arch_initcall(contextidr_notifier_init);
134#endif
135
136static void flush_context(unsigned int cpu)
137{
138	int i;
139	u64 asid;
140
141	/* Update the list of reserved ASIDs and the ASID bitmap. */
142	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
143	for_each_possible_cpu(i) {
144		asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
145		/*
146		 * If this CPU has already been through a
147		 * rollover, but hasn't run another task in
148		 * the meantime, we must preserve its reserved
149		 * ASID, as this is the only trace we have of
150		 * the process it is still running.
151		 */
152		if (asid == 0)
153			asid = per_cpu(reserved_asids, i);
154		__set_bit(asid & ~ASID_MASK, asid_map);
155		per_cpu(reserved_asids, i) = asid;
156	}
157
158	/* Queue a TLB invalidate and flush the I-cache if necessary. */
159	cpumask_setall(&tlb_flush_pending);
160
161	if (icache_is_vivt_asid_tagged())
162		__flush_icache_all();
163}
164
165static bool check_update_reserved_asid(u64 asid, u64 newasid)
166{
167	int cpu;
168	bool hit = false;
169
170	/*
171	 * Iterate over the set of reserved ASIDs looking for a match.
172	 * If we find one, then we can update our mm to use newasid
173	 * (i.e. the same ASID in the current generation) but we can't
174	 * exit the loop early, since we need to ensure that all copies
175	 * of the old ASID are updated to reflect the mm. Failure to do
176	 * so could result in us missing the reserved ASID in a future
177	 * generation.
178	 */
179	for_each_possible_cpu(cpu) {
180		if (per_cpu(reserved_asids, cpu) == asid) {
181			hit = true;
182			per_cpu(reserved_asids, cpu) = newasid;
183		}
184	}
185
186	return hit;
187}
188
189static u64 new_context(struct mm_struct *mm, unsigned int cpu)
190{
191	static u32 cur_idx = 1;
192	u64 asid = atomic64_read(&mm->context.id);
193	u64 generation = atomic64_read(&asid_generation);
194
195	if (asid != 0) {
196		u64 newasid = generation | (asid & ~ASID_MASK);
197
198		/*
199		 * If our current ASID was active during a rollover, we
200		 * can continue to use it and this was just a false alarm.
201		 */
202		if (check_update_reserved_asid(asid, newasid))
203			return newasid;
204
205		/*
206		 * We had a valid ASID in a previous life, so try to re-use
207		 * it if possible.,
208		 */
209		asid &= ~ASID_MASK;
210		if (!__test_and_set_bit(asid, asid_map))
211			return newasid;
212	}
213
214	/*
215	 * Allocate a free ASID. If we can't find one, take a note of the
216	 * currently active ASIDs and mark the TLBs as requiring flushes.
217	 * We always count from ASID #1, as we reserve ASID #0 to switch
218	 * via TTBR0 and to avoid speculative page table walks from hitting
219	 * in any partial walk caches, which could be populated from
220	 * overlapping level-1 descriptors used to map both the module
221	 * area and the userspace stack.
222	 */
223	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
224	if (asid == NUM_USER_ASIDS) {
225		generation = atomic64_add_return(ASID_FIRST_VERSION,
226						 &asid_generation);
227		flush_context(cpu);
228		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
229	}
230
231	__set_bit(asid, asid_map);
232	cur_idx = asid;
233	cpumask_clear(mm_cpumask(mm));
234	return asid | generation;
235}
236
237void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
238{
239	unsigned long flags;
240	unsigned int cpu = smp_processor_id();
241	u64 asid;
242
243	check_vmalloc_seq(mm);
 
244
245	/*
246	 * We cannot update the pgd and the ASID atomicly with classic
247	 * MMU, so switch exclusively to global mappings to avoid
248	 * speculative page table walking with the wrong TTBR.
249	 */
250	cpu_set_reserved_ttbr0();
251
252	asid = atomic64_read(&mm->context.id);
253	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
254	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
255		goto switch_mm_fastpath;
256
257	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
258	/* Check that our ASID belongs to the current generation. */
259	asid = atomic64_read(&mm->context.id);
260	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
261		asid = new_context(mm, cpu);
262		atomic64_set(&mm->context.id, asid);
263	}
264
265	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
266		local_flush_bp_all();
267		local_flush_tlb_all();
268	}
269
270	atomic64_set(&per_cpu(active_asids, cpu), asid);
271	cpumask_set_cpu(cpu, mm_cpumask(mm));
272	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
273
274switch_mm_fastpath:
275	cpu_switch_mm(mm->pgd, mm);
276}