Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v3.1
  1/*
  2 *  linux/arch/arm/mm/context.c
  3 *
  4 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/init.h>
 11#include <linux/sched.h>
 12#include <linux/mm.h>
 13#include <linux/smp.h>
 14#include <linux/percpu.h>
 15
 16#include <asm/mmu_context.h>
 17#include <asm/tlbflush.h>
 18
 19static DEFINE_SPINLOCK(cpu_asid_lock);
 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
 21#ifdef CONFIG_SMP
 22DEFINE_PER_CPU(struct mm_struct *, current_mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23#endif
 24
 25/*
 26 * We fork()ed a process, and we need a new context for the child
 27 * to run in.  We reserve version 0 for initial tasks so we will
 28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
 29 * register changing sequence.
 30 */
 31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 32{
 33	mm->context.id = 0;
 34	spin_lock_init(&mm->context.id_lock);
 35}
 36
 37static void flush_context(void)
 38{
 39	/* set the reserved ASID before flushing the TLB */
 40	asm("mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (0));
 41	isb();
 42	local_flush_tlb_all();
 43	if (icache_is_vivt_asid_tagged()) {
 44		__flush_icache_all();
 45		dsb();
 46	}
 47}
 48
 49#ifdef CONFIG_SMP
 50
 51static void set_mm_context(struct mm_struct *mm, unsigned int asid)
 52{
 53	unsigned long flags;
 54
 55	/*
 56	 * Locking needed for multi-threaded applications where the
 57	 * same mm->context.id could be set from different CPUs during
 58	 * the broadcast. This function is also called via IPI so the
 59	 * mm->context.id_lock has to be IRQ-safe.
 60	 */
 61	spin_lock_irqsave(&mm->context.id_lock, flags);
 62	if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
 63		/*
 64		 * Old version of ASID found. Set the new one and
 65		 * reset mm_cpumask(mm).
 66		 */
 67		mm->context.id = asid;
 68		cpumask_clear(mm_cpumask(mm));
 69	}
 70	spin_unlock_irqrestore(&mm->context.id_lock, flags);
 71
 72	/*
 73	 * Set the mm_cpumask(mm) bit for the current CPU.
 74	 */
 75	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 76}
 77
 78/*
 79 * Reset the ASID on the current CPU. This function call is broadcast
 80 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
 81 */
 82static void reset_context(void *info)
 83{
 84	unsigned int asid;
 85	unsigned int cpu = smp_processor_id();
 86	struct mm_struct *mm = per_cpu(current_mm, cpu);
 87
 88	/*
 89	 * Check if a current_mm was set on this CPU as it might still
 90	 * be in the early booting stages and using the reserved ASID.
 91	 */
 92	if (!mm)
 93		return;
 94
 95	smp_rmb();
 96	asid = cpu_last_asid + cpu + 1;
 97
 98	flush_context();
 99	set_mm_context(mm, asid);
100
101	/* set the new ASID */
102	asm("mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
103	isb();
104}
105
106#else
107
108static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
109{
110	mm->context.id = asid;
111	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
112}
113
114#endif
115
116void __new_context(struct mm_struct *mm)
117{
118	unsigned int asid;
119
120	spin_lock(&cpu_asid_lock);
121#ifdef CONFIG_SMP
122	/*
123	 * Check the ASID again, in case the change was broadcast from
124	 * another CPU before we acquired the lock.
125	 */
126	if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
127		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
128		spin_unlock(&cpu_asid_lock);
129		return;
130	}
131#endif
132	/*
133	 * At this point, it is guaranteed that the current mm (with
134	 * an old ASID) isn't active on any other CPU since the ASIDs
135	 * are changed simultaneously via IPI.
136	 */
137	asid = ++cpu_last_asid;
138	if (asid == 0)
139		asid = cpu_last_asid = ASID_FIRST_VERSION;
140
141	/*
142	 * If we've used up all our ASIDs, we need
143	 * to start a new version and flush the TLB.
144	 */
145	if (unlikely((asid & ~ASID_MASK) == 0)) {
146		asid = cpu_last_asid + smp_processor_id() + 1;
147		flush_context();
148#ifdef CONFIG_SMP
149		smp_wmb();
150		smp_call_function(reset_context, NULL, 1);
151#endif
152		cpu_last_asid += NR_CPUS;
153	}
154
155	set_mm_context(mm, asid);
156	spin_unlock(&cpu_asid_lock);
157}
v3.5.6
  1/*
  2 *  linux/arch/arm/mm/context.c
  3 *
  4 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/init.h>
 11#include <linux/sched.h>
 12#include <linux/mm.h>
 13#include <linux/smp.h>
 14#include <linux/percpu.h>
 15
 16#include <asm/mmu_context.h>
 17#include <asm/tlbflush.h>
 18
 19static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
 21
 22#ifdef CONFIG_ARM_LPAE
 23void cpu_set_reserved_ttbr0(void)
 24{
 25	unsigned long ttbl = __pa(swapper_pg_dir);
 26	unsigned long ttbh = 0;
 27
 28	/*
 29	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
 30	 * ASID is set to 0.
 31	 */
 32	asm volatile(
 33	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"
 34	:
 35	: "r" (ttbl), "r" (ttbh));
 36	isb();
 37}
 38#else
 39void cpu_set_reserved_ttbr0(void)
 40{
 41	u32 ttb;
 42	/* Copy TTBR1 into TTBR0 */
 43	asm volatile(
 44	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
 45	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
 46	: "=r" (ttb));
 47	isb();
 48}
 49#endif
 50
 51/*
 52 * We fork()ed a process, and we need a new context for the child
 53 * to run in.
 
 
 54 */
 55void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 56{
 57	mm->context.id = 0;
 58	raw_spin_lock_init(&mm->context.id_lock);
 59}
 60
 61static void flush_context(void)
 62{
 63	cpu_set_reserved_ttbr0();
 
 
 64	local_flush_tlb_all();
 65	if (icache_is_vivt_asid_tagged()) {
 66		__flush_icache_all();
 67		dsb();
 68	}
 69}
 70
 71#ifdef CONFIG_SMP
 72
 73static void set_mm_context(struct mm_struct *mm, unsigned int asid)
 74{
 75	unsigned long flags;
 76
 77	/*
 78	 * Locking needed for multi-threaded applications where the
 79	 * same mm->context.id could be set from different CPUs during
 80	 * the broadcast. This function is also called via IPI so the
 81	 * mm->context.id_lock has to be IRQ-safe.
 82	 */
 83	raw_spin_lock_irqsave(&mm->context.id_lock, flags);
 84	if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
 85		/*
 86		 * Old version of ASID found. Set the new one and
 87		 * reset mm_cpumask(mm).
 88		 */
 89		mm->context.id = asid;
 90		cpumask_clear(mm_cpumask(mm));
 91	}
 92	raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
 93
 94	/*
 95	 * Set the mm_cpumask(mm) bit for the current CPU.
 96	 */
 97	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 98}
 99
100/*
101 * Reset the ASID on the current CPU. This function call is broadcast
102 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
103 */
104static void reset_context(void *info)
105{
106	unsigned int asid;
107	unsigned int cpu = smp_processor_id();
108	struct mm_struct *mm = current->active_mm;
 
 
 
 
 
 
 
109
110	smp_rmb();
111	asid = cpu_last_asid + cpu + 1;
112
113	flush_context();
114	set_mm_context(mm, asid);
115
116	/* set the new ASID */
117	cpu_switch_mm(mm->pgd, mm);
 
118}
119
120#else
121
122static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
123{
124	mm->context.id = asid;
125	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
126}
127
128#endif
129
130void __new_context(struct mm_struct *mm)
131{
132	unsigned int asid;
133
134	raw_spin_lock(&cpu_asid_lock);
135#ifdef CONFIG_SMP
136	/*
137	 * Check the ASID again, in case the change was broadcast from
138	 * another CPU before we acquired the lock.
139	 */
140	if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
141		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
142		raw_spin_unlock(&cpu_asid_lock);
143		return;
144	}
145#endif
146	/*
147	 * At this point, it is guaranteed that the current mm (with
148	 * an old ASID) isn't active on any other CPU since the ASIDs
149	 * are changed simultaneously via IPI.
150	 */
151	asid = ++cpu_last_asid;
152	if (asid == 0)
153		asid = cpu_last_asid = ASID_FIRST_VERSION;
154
155	/*
156	 * If we've used up all our ASIDs, we need
157	 * to start a new version and flush the TLB.
158	 */
159	if (unlikely((asid & ~ASID_MASK) == 0)) {
160		asid = cpu_last_asid + smp_processor_id() + 1;
161		flush_context();
162#ifdef CONFIG_SMP
163		smp_wmb();
164		smp_call_function(reset_context, NULL, 1);
165#endif
166		cpu_last_asid += NR_CPUS;
167	}
168
169	set_mm_context(mm, asid);
170	raw_spin_unlock(&cpu_asid_lock);
171}