Loading...
1#include <linux/init.h>
2
3#include <linux/mm.h>
4#include <linux/spinlock.h>
5#include <linux/smp.h>
6#include <linux/interrupt.h>
7#include <linux/module.h>
8#include <linux/cpu.h>
9
10#include <asm/tlbflush.h>
11#include <asm/mmu_context.h>
12#include <asm/cache.h>
13#include <asm/apic.h>
14#include <asm/uv/uv.h>
15
16DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
17 = { &init_mm, 0, };
18
19/*
20 * Smarter SMP flushing macros.
21 * c/o Linus Torvalds.
22 *
23 * These mean you can really definitely utterly forget about
24 * writing to user space from interrupts. (Its not allowed anyway).
25 *
26 * Optimizations Manfred Spraul <manfred@colorfullife.com>
27 *
28 * More scalable flush, from Andi Kleen
29 *
30 * To avoid global state use 8 different call vectors.
31 * Each CPU uses a specific vector to trigger flushes on other
32 * CPUs. Depending on the received vector the target CPUs look into
33 * the right array slot for the flush data.
34 *
35 * With more than 8 CPUs they are hashed to the 8 available
36 * vectors. The limited global vector space forces us to this right now.
37 * In future when interrupts are split into per CPU domains this could be
38 * fixed, at the cost of triggering multiple IPIs in some cases.
39 */
40
41union smp_flush_state {
42 struct {
43 struct mm_struct *flush_mm;
44 unsigned long flush_va;
45 raw_spinlock_t tlbstate_lock;
46 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
47 };
48 char pad[INTERNODE_CACHE_BYTES];
49} ____cacheline_internodealigned_in_smp;
50
51/* State is put into the per CPU data section, but padded
52 to a full cache line because other CPUs can access it and we don't
53 want false sharing in the per cpu data segment. */
54static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
55
56static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
57
58/*
59 * We cannot call mmdrop() because we are in interrupt context,
60 * instead update mm->cpu_vm_mask.
61 */
62void leave_mm(int cpu)
63{
64 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
65 BUG();
66 cpumask_clear_cpu(cpu,
67 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
68 load_cr3(swapper_pg_dir);
69}
70EXPORT_SYMBOL_GPL(leave_mm);
71
72/*
73 *
74 * The flush IPI assumes that a thread switch happens in this order:
75 * [cpu0: the cpu that switches]
76 * 1) switch_mm() either 1a) or 1b)
77 * 1a) thread switch to a different mm
78 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
79 * Stop ipi delivery for the old mm. This is not synchronized with
80 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
81 * for the wrong mm, and in the worst case we perform a superfluous
82 * tlb flush.
83 * 1a2) set cpu mmu_state to TLBSTATE_OK
84 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
85 * was in lazy tlb mode.
86 * 1a3) update cpu active_mm
87 * Now cpu0 accepts tlb flushes for the new mm.
88 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
89 * Now the other cpus will send tlb flush ipis.
90 * 1a4) change cr3.
91 * 1b) thread switch without mm change
92 * cpu active_mm is correct, cpu0 already handles
93 * flush ipis.
94 * 1b1) set cpu mmu_state to TLBSTATE_OK
95 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
96 * Atomically set the bit [other cpus will start sending flush ipis],
97 * and test the bit.
98 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
99 * 2) switch %%esp, ie current
100 *
101 * The interrupt must handle 2 special cases:
102 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
103 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
104 * runs in kernel space, the cpu could load tlb entries for user space
105 * pages.
106 *
107 * The good news is that cpu mmu_state is local to each cpu, no
108 * write/read ordering problems.
109 */
110
111/*
112 * TLB flush IPI:
113 *
114 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
115 * 2) Leave the mm if we are in the lazy tlb mode.
116 *
117 * Interrupts are disabled.
118 */
119
120/*
121 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
122 * but still used for documentation purpose but the usage is slightly
123 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
124 * entry calls in with the first parameter in %eax. Maybe define
125 * intrlinkage?
126 */
127#ifdef CONFIG_X86_64
128asmlinkage
129#endif
130void smp_invalidate_interrupt(struct pt_regs *regs)
131{
132 unsigned int cpu;
133 unsigned int sender;
134 union smp_flush_state *f;
135
136 cpu = smp_processor_id();
137 /*
138 * orig_rax contains the negated interrupt vector.
139 * Use that to determine where the sender put the data.
140 */
141 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
142 f = &flush_state[sender];
143
144 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
145 goto out;
146 /*
147 * This was a BUG() but until someone can quote me the
148 * line from the intel manual that guarantees an IPI to
149 * multiple CPUs is retried _only_ on the erroring CPUs
150 * its staying as a return
151 *
152 * BUG();
153 */
154
155 if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
156 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
157 if (f->flush_va == TLB_FLUSH_ALL)
158 local_flush_tlb();
159 else
160 __flush_tlb_one(f->flush_va);
161 } else
162 leave_mm(cpu);
163 }
164out:
165 ack_APIC_irq();
166 smp_mb__before_clear_bit();
167 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
168 smp_mb__after_clear_bit();
169 inc_irq_stat(irq_tlb_count);
170}
171
172static void flush_tlb_others_ipi(const struct cpumask *cpumask,
173 struct mm_struct *mm, unsigned long va)
174{
175 unsigned int sender;
176 union smp_flush_state *f;
177
178 /* Caller has disabled preemption */
179 sender = this_cpu_read(tlb_vector_offset);
180 f = &flush_state[sender];
181
182 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
183 raw_spin_lock(&f->tlbstate_lock);
184
185 f->flush_mm = mm;
186 f->flush_va = va;
187 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
188 /*
189 * We have to send the IPI only to
190 * CPUs affected.
191 */
192 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
193 INVALIDATE_TLB_VECTOR_START + sender);
194
195 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
196 cpu_relax();
197 }
198
199 f->flush_mm = NULL;
200 f->flush_va = 0;
201 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
202 raw_spin_unlock(&f->tlbstate_lock);
203}
204
205void native_flush_tlb_others(const struct cpumask *cpumask,
206 struct mm_struct *mm, unsigned long va)
207{
208 if (is_uv_system()) {
209 unsigned int cpu;
210
211 cpu = smp_processor_id();
212 cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
213 if (cpumask)
214 flush_tlb_others_ipi(cpumask, mm, va);
215 return;
216 }
217 flush_tlb_others_ipi(cpumask, mm, va);
218}
219
220static void __cpuinit calculate_tlb_offset(void)
221{
222 int cpu, node, nr_node_vecs, idx = 0;
223 /*
224 * we are changing tlb_vector_offset for each CPU in runtime, but this
225 * will not cause inconsistency, as the write is atomic under X86. we
226 * might see more lock contentions in a short time, but after all CPU's
227 * tlb_vector_offset are changed, everything should go normal
228 *
229 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
230 * waste some vectors.
231 **/
232 if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
233 nr_node_vecs = 1;
234 else
235 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
236
237 for_each_online_node(node) {
238 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
239 nr_node_vecs;
240 int cpu_offset = 0;
241 for_each_cpu(cpu, cpumask_of_node(node)) {
242 per_cpu(tlb_vector_offset, cpu) = node_offset +
243 cpu_offset;
244 cpu_offset++;
245 cpu_offset = cpu_offset % nr_node_vecs;
246 }
247 idx++;
248 }
249}
250
251static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
252 unsigned long action, void *hcpu)
253{
254 switch (action & 0xf) {
255 case CPU_ONLINE:
256 case CPU_DEAD:
257 calculate_tlb_offset();
258 }
259 return NOTIFY_OK;
260}
261
262static int __cpuinit init_smp_flush(void)
263{
264 int i;
265
266 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
267 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
268
269 calculate_tlb_offset();
270 hotcpu_notifier(tlb_cpuhp_notify, 0);
271 return 0;
272}
273core_initcall(init_smp_flush);
274
275void flush_tlb_current_task(void)
276{
277 struct mm_struct *mm = current->mm;
278
279 preempt_disable();
280
281 local_flush_tlb();
282 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
283 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
284 preempt_enable();
285}
286
287void flush_tlb_mm(struct mm_struct *mm)
288{
289 preempt_disable();
290
291 if (current->active_mm == mm) {
292 if (current->mm)
293 local_flush_tlb();
294 else
295 leave_mm(smp_processor_id());
296 }
297 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
298 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
299
300 preempt_enable();
301}
302
303void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
304{
305 struct mm_struct *mm = vma->vm_mm;
306
307 preempt_disable();
308
309 if (current->active_mm == mm) {
310 if (current->mm)
311 __flush_tlb_one(va);
312 else
313 leave_mm(smp_processor_id());
314 }
315
316 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
317 flush_tlb_others(mm_cpumask(mm), mm, va);
318
319 preempt_enable();
320}
321
322static void do_flush_tlb_all(void *info)
323{
324 __flush_tlb_all();
325 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
326 leave_mm(smp_processor_id());
327}
328
329void flush_tlb_all(void)
330{
331 on_each_cpu(do_flush_tlb_all, NULL, 1);
332}
1#include <linux/init.h>
2
3#include <linux/mm.h>
4#include <linux/spinlock.h>
5#include <linux/smp.h>
6#include <linux/interrupt.h>
7#include <linux/export.h>
8#include <linux/cpu.h>
9
10#include <asm/tlbflush.h>
11#include <asm/mmu_context.h>
12#include <asm/cache.h>
13#include <asm/apic.h>
14#include <asm/uv/uv.h>
15#include <linux/debugfs.h>
16
17/*
18 * Smarter SMP flushing macros.
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
29 */
30
31#ifdef CONFIG_SMP
32
33struct flush_tlb_info {
34 struct mm_struct *flush_mm;
35 unsigned long flush_start;
36 unsigned long flush_end;
37};
38
39/*
40 * We cannot call mmdrop() because we are in interrupt context,
41 * instead update mm->cpu_vm_mask.
42 */
43void leave_mm(int cpu)
44{
45 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
46 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
47 BUG();
48 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
49 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
50 load_cr3(swapper_pg_dir);
51 /*
52 * This gets called in the idle path where RCU
53 * functions differently. Tracing normally
54 * uses RCU, so we have to call the tracepoint
55 * specially here.
56 */
57 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
58 }
59}
60EXPORT_SYMBOL_GPL(leave_mm);
61
62#endif /* CONFIG_SMP */
63
64void switch_mm(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
66{
67 unsigned long flags;
68
69 local_irq_save(flags);
70 switch_mm_irqs_off(prev, next, tsk);
71 local_irq_restore(flags);
72}
73
74void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
75 struct task_struct *tsk)
76{
77 unsigned cpu = smp_processor_id();
78
79 if (likely(prev != next)) {
80 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
81 /*
82 * If our current stack is in vmalloc space and isn't
83 * mapped in the new pgd, we'll double-fault. Forcibly
84 * map it.
85 */
86 unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
87
88 pgd_t *pgd = next->pgd + stack_pgd_index;
89
90 if (unlikely(pgd_none(*pgd)))
91 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
92 }
93
94#ifdef CONFIG_SMP
95 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
96 this_cpu_write(cpu_tlbstate.active_mm, next);
97#endif
98
99 cpumask_set_cpu(cpu, mm_cpumask(next));
100
101 /*
102 * Re-load page tables.
103 *
104 * This logic has an ordering constraint:
105 *
106 * CPU 0: Write to a PTE for 'next'
107 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
108 * CPU 1: set bit 1 in next's mm_cpumask
109 * CPU 1: load from the PTE that CPU 0 writes (implicit)
110 *
111 * We need to prevent an outcome in which CPU 1 observes
112 * the new PTE value and CPU 0 observes bit 1 clear in
113 * mm_cpumask. (If that occurs, then the IPI will never
114 * be sent, and CPU 0's TLB will contain a stale entry.)
115 *
116 * The bad outcome can occur if either CPU's load is
117 * reordered before that CPU's store, so both CPUs must
118 * execute full barriers to prevent this from happening.
119 *
120 * Thus, switch_mm needs a full barrier between the
121 * store to mm_cpumask and any operation that could load
122 * from next->pgd. TLB fills are special and can happen
123 * due to instruction fetches or for no reason at all,
124 * and neither LOCK nor MFENCE orders them.
125 * Fortunately, load_cr3() is serializing and gives the
126 * ordering guarantee we need.
127 *
128 */
129 load_cr3(next->pgd);
130
131 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
132
133 /* Stop flush ipis for the previous mm */
134 cpumask_clear_cpu(cpu, mm_cpumask(prev));
135
136 /* Load per-mm CR4 state */
137 load_mm_cr4(next);
138
139#ifdef CONFIG_MODIFY_LDT_SYSCALL
140 /*
141 * Load the LDT, if the LDT is different.
142 *
143 * It's possible that prev->context.ldt doesn't match
144 * the LDT register. This can happen if leave_mm(prev)
145 * was called and then modify_ldt changed
146 * prev->context.ldt but suppressed an IPI to this CPU.
147 * In this case, prev->context.ldt != NULL, because we
148 * never set context.ldt to NULL while the mm still
149 * exists. That means that next->context.ldt !=
150 * prev->context.ldt, because mms never share an LDT.
151 */
152 if (unlikely(prev->context.ldt != next->context.ldt))
153 load_mm_ldt(next);
154#endif
155 }
156#ifdef CONFIG_SMP
157 else {
158 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
159 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
160
161 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
162 /*
163 * On established mms, the mm_cpumask is only changed
164 * from irq context, from ptep_clear_flush() while in
165 * lazy tlb mode, and here. Irqs are blocked during
166 * schedule, protecting us from simultaneous changes.
167 */
168 cpumask_set_cpu(cpu, mm_cpumask(next));
169
170 /*
171 * We were in lazy tlb mode and leave_mm disabled
172 * tlb flush IPI delivery. We must reload CR3
173 * to make sure to use no freed page tables.
174 *
175 * As above, load_cr3() is serializing and orders TLB
176 * fills with respect to the mm_cpumask write.
177 */
178 load_cr3(next->pgd);
179 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
180 load_mm_cr4(next);
181 load_mm_ldt(next);
182 }
183 }
184#endif
185}
186
187#ifdef CONFIG_SMP
188
189/*
190 * The flush IPI assumes that a thread switch happens in this order:
191 * [cpu0: the cpu that switches]
192 * 1) switch_mm() either 1a) or 1b)
193 * 1a) thread switch to a different mm
194 * 1a1) set cpu_tlbstate to TLBSTATE_OK
195 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
196 * if cpu0 was in lazy tlb mode.
197 * 1a2) update cpu active_mm
198 * Now cpu0 accepts tlb flushes for the new mm.
199 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
200 * Now the other cpus will send tlb flush ipis.
201 * 1a4) change cr3.
202 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
203 * Stop ipi delivery for the old mm. This is not synchronized with
204 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
205 * mm, and in the worst case we perform a superfluous tlb flush.
206 * 1b) thread switch without mm change
207 * cpu active_mm is correct, cpu0 already handles flush ipis.
208 * 1b1) set cpu_tlbstate to TLBSTATE_OK
209 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
210 * Atomically set the bit [other cpus will start sending flush ipis],
211 * and test the bit.
212 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
213 * 2) switch %%esp, ie current
214 *
215 * The interrupt must handle 2 special cases:
216 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
217 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
218 * runs in kernel space, the cpu could load tlb entries for user space
219 * pages.
220 *
221 * The good news is that cpu_tlbstate is local to each cpu, no
222 * write/read ordering problems.
223 */
224
225/*
226 * TLB flush funcation:
227 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
228 * 2) Leave the mm if we are in the lazy tlb mode.
229 */
230static void flush_tlb_func(void *info)
231{
232 struct flush_tlb_info *f = info;
233
234 inc_irq_stat(irq_tlb_count);
235
236 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
237 return;
238
239 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
240 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
241 if (f->flush_end == TLB_FLUSH_ALL) {
242 local_flush_tlb();
243 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
244 } else {
245 unsigned long addr;
246 unsigned long nr_pages =
247 (f->flush_end - f->flush_start) / PAGE_SIZE;
248 addr = f->flush_start;
249 while (addr < f->flush_end) {
250 __flush_tlb_single(addr);
251 addr += PAGE_SIZE;
252 }
253 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
254 }
255 } else
256 leave_mm(smp_processor_id());
257
258}
259
260void native_flush_tlb_others(const struct cpumask *cpumask,
261 struct mm_struct *mm, unsigned long start,
262 unsigned long end)
263{
264 struct flush_tlb_info info;
265
266 if (end == 0)
267 end = start + PAGE_SIZE;
268 info.flush_mm = mm;
269 info.flush_start = start;
270 info.flush_end = end;
271
272 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
273 if (end == TLB_FLUSH_ALL)
274 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
275 else
276 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
277 (end - start) >> PAGE_SHIFT);
278
279 if (is_uv_system()) {
280 unsigned int cpu;
281
282 cpu = smp_processor_id();
283 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
284 if (cpumask)
285 smp_call_function_many(cpumask, flush_tlb_func,
286 &info, 1);
287 return;
288 }
289 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
290}
291
292void flush_tlb_current_task(void)
293{
294 struct mm_struct *mm = current->mm;
295
296 preempt_disable();
297
298 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
299
300 /* This is an implicit full barrier that synchronizes with switch_mm. */
301 local_flush_tlb();
302
303 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
304 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
305 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
306 preempt_enable();
307}
308
309/*
310 * See Documentation/x86/tlb.txt for details. We choose 33
311 * because it is large enough to cover the vast majority (at
312 * least 95%) of allocations, and is small enough that we are
313 * confident it will not cause too much overhead. Each single
314 * flush is about 100 ns, so this caps the maximum overhead at
315 * _about_ 3,000 ns.
316 *
317 * This is in units of pages.
318 */
319static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
320
321void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
322 unsigned long end, unsigned long vmflag)
323{
324 unsigned long addr;
325 /* do a global flush by default */
326 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
327
328 preempt_disable();
329 if (current->active_mm != mm) {
330 /* Synchronize with switch_mm. */
331 smp_mb();
332
333 goto out;
334 }
335
336 if (!current->mm) {
337 leave_mm(smp_processor_id());
338
339 /* Synchronize with switch_mm. */
340 smp_mb();
341
342 goto out;
343 }
344
345 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
346 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
347
348 /*
349 * Both branches below are implicit full barriers (MOV to CR or
350 * INVLPG) that synchronize with switch_mm.
351 */
352 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
353 base_pages_to_flush = TLB_FLUSH_ALL;
354 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
355 local_flush_tlb();
356 } else {
357 /* flush range by one by one 'invlpg' */
358 for (addr = start; addr < end; addr += PAGE_SIZE) {
359 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
360 __flush_tlb_single(addr);
361 }
362 }
363 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
364out:
365 if (base_pages_to_flush == TLB_FLUSH_ALL) {
366 start = 0UL;
367 end = TLB_FLUSH_ALL;
368 }
369 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
370 flush_tlb_others(mm_cpumask(mm), mm, start, end);
371 preempt_enable();
372}
373
374void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
375{
376 struct mm_struct *mm = vma->vm_mm;
377
378 preempt_disable();
379
380 if (current->active_mm == mm) {
381 if (current->mm) {
382 /*
383 * Implicit full barrier (INVLPG) that synchronizes
384 * with switch_mm.
385 */
386 __flush_tlb_one(start);
387 } else {
388 leave_mm(smp_processor_id());
389
390 /* Synchronize with switch_mm. */
391 smp_mb();
392 }
393 }
394
395 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
396 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
397
398 preempt_enable();
399}
400
401static void do_flush_tlb_all(void *info)
402{
403 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
404 __flush_tlb_all();
405 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
406 leave_mm(smp_processor_id());
407}
408
409void flush_tlb_all(void)
410{
411 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
412 on_each_cpu(do_flush_tlb_all, NULL, 1);
413}
414
415static void do_kernel_range_flush(void *info)
416{
417 struct flush_tlb_info *f = info;
418 unsigned long addr;
419
420 /* flush range by one by one 'invlpg' */
421 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
422 __flush_tlb_single(addr);
423}
424
425void flush_tlb_kernel_range(unsigned long start, unsigned long end)
426{
427
428 /* Balance as user space task's flush, a bit conservative */
429 if (end == TLB_FLUSH_ALL ||
430 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
431 on_each_cpu(do_flush_tlb_all, NULL, 1);
432 } else {
433 struct flush_tlb_info info;
434 info.flush_start = start;
435 info.flush_end = end;
436 on_each_cpu(do_kernel_range_flush, &info, 1);
437 }
438}
439
440static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
441 size_t count, loff_t *ppos)
442{
443 char buf[32];
444 unsigned int len;
445
446 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
447 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
448}
449
450static ssize_t tlbflush_write_file(struct file *file,
451 const char __user *user_buf, size_t count, loff_t *ppos)
452{
453 char buf[32];
454 ssize_t len;
455 int ceiling;
456
457 len = min(count, sizeof(buf) - 1);
458 if (copy_from_user(buf, user_buf, len))
459 return -EFAULT;
460
461 buf[len] = '\0';
462 if (kstrtoint(buf, 0, &ceiling))
463 return -EINVAL;
464
465 if (ceiling < 0)
466 return -EINVAL;
467
468 tlb_single_page_flush_ceiling = ceiling;
469 return count;
470}
471
472static const struct file_operations fops_tlbflush = {
473 .read = tlbflush_read_file,
474 .write = tlbflush_write_file,
475 .llseek = default_llseek,
476};
477
478static int __init create_tlb_single_page_flush_ceiling(void)
479{
480 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
481 arch_debugfs_dir, NULL, &fops_tlbflush);
482 return 0;
483}
484late_initcall(create_tlb_single_page_flush_ceiling);
485
486#endif /* CONFIG_SMP */