Loading...
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
33#include <linux/cpu.h>
34#include <linux/err.h>
35#include <linux/ftrace.h>
36
37#include <linux/atomic.h>
38#include <asm/cpu.h>
39#include <asm/processor.h>
40#include <asm/r4k-timer.h>
41#include <asm/system.h>
42#include <asm/mmu_context.h>
43#include <asm/time.h>
44
45#ifdef CONFIG_MIPS_MT_SMTC
46#include <asm/mipsmtregs.h>
47#endif /* CONFIG_MIPS_MT_SMTC */
48
49volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
50
51int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
52EXPORT_SYMBOL(__cpu_number_map);
53
54int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
55EXPORT_SYMBOL(__cpu_logical_map);
56
57/* Number of TCs (or siblings in Intel speak) per CPU core */
58int smp_num_siblings = 1;
59EXPORT_SYMBOL(smp_num_siblings);
60
61/* representing the TCs (or siblings in Intel speak) of each logical CPU */
62cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
63EXPORT_SYMBOL(cpu_sibling_map);
64
65/* representing cpus for which sibling maps can be computed */
66static cpumask_t cpu_sibling_setup_map;
67
68static inline void set_cpu_sibling_map(int cpu)
69{
70 int i;
71
72 cpu_set(cpu, cpu_sibling_setup_map);
73
74 if (smp_num_siblings > 1) {
75 for_each_cpu_mask(i, cpu_sibling_setup_map) {
76 if (cpu_data[cpu].core == cpu_data[i].core) {
77 cpu_set(i, cpu_sibling_map[cpu]);
78 cpu_set(cpu, cpu_sibling_map[i]);
79 }
80 }
81 } else
82 cpu_set(cpu, cpu_sibling_map[cpu]);
83}
84
85struct plat_smp_ops *mp_ops;
86
87__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
88{
89 if (mp_ops)
90 printk(KERN_WARNING "Overriding previously set SMP ops\n");
91
92 mp_ops = ops;
93}
94
95/*
96 * First C code run on the secondary CPUs after being started up by
97 * the master.
98 */
99asmlinkage __cpuinit void start_secondary(void)
100{
101 unsigned int cpu;
102
103#ifdef CONFIG_MIPS_MT_SMTC
104 /* Only do cpu_probe for first TC of CPU */
105 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
106#endif /* CONFIG_MIPS_MT_SMTC */
107 cpu_probe();
108 cpu_report();
109 per_cpu_trap_init();
110 mips_clockevent_init();
111 mp_ops->init_secondary();
112
113 /*
114 * XXX parity protection should be folded in here when it's converted
115 * to an option instead of something based on .cputype
116 */
117
118 calibrate_delay();
119 preempt_disable();
120 cpu = smp_processor_id();
121 cpu_data[cpu].udelay_val = loops_per_jiffy;
122
123 notify_cpu_starting(cpu);
124
125 mp_ops->smp_finish();
126 set_cpu_sibling_map(cpu);
127
128 cpu_set(cpu, cpu_callin_map);
129
130 synchronise_count_slave();
131
132 cpu_idle();
133}
134
135/*
136 * Call into both interrupt handlers, as we share the IPI for them
137 */
138void __irq_entry smp_call_function_interrupt(void)
139{
140 irq_enter();
141 generic_smp_call_function_single_interrupt();
142 generic_smp_call_function_interrupt();
143 irq_exit();
144}
145
146static void stop_this_cpu(void *dummy)
147{
148 /*
149 * Remove this CPU:
150 */
151 cpu_clear(smp_processor_id(), cpu_online_map);
152 for (;;) {
153 if (cpu_wait)
154 (*cpu_wait)(); /* Wait if available. */
155 }
156}
157
158void smp_send_stop(void)
159{
160 smp_call_function(stop_this_cpu, NULL, 0);
161}
162
163void __init smp_cpus_done(unsigned int max_cpus)
164{
165 mp_ops->cpus_done();
166 synchronise_count_master();
167}
168
169/* called from main before smp_init() */
170void __init smp_prepare_cpus(unsigned int max_cpus)
171{
172 init_new_context(current, &init_mm);
173 current_thread_info()->cpu = 0;
174 mp_ops->prepare_cpus(max_cpus);
175 set_cpu_sibling_map(0);
176#ifndef CONFIG_HOTPLUG_CPU
177 init_cpu_present(&cpu_possible_map);
178#endif
179}
180
181/* preload SMP state for boot cpu */
182void __devinit smp_prepare_boot_cpu(void)
183{
184 set_cpu_possible(0, true);
185 set_cpu_online(0, true);
186 cpu_set(0, cpu_callin_map);
187}
188
189/*
190 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
191 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
192 * physical, not logical.
193 */
194static struct task_struct *cpu_idle_thread[NR_CPUS];
195
196struct create_idle {
197 struct work_struct work;
198 struct task_struct *idle;
199 struct completion done;
200 int cpu;
201};
202
203static void __cpuinit do_fork_idle(struct work_struct *work)
204{
205 struct create_idle *c_idle =
206 container_of(work, struct create_idle, work);
207
208 c_idle->idle = fork_idle(c_idle->cpu);
209 complete(&c_idle->done);
210}
211
212int __cpuinit __cpu_up(unsigned int cpu)
213{
214 struct task_struct *idle;
215
216 /*
217 * Processor goes to start_secondary(), sets online flag
218 * The following code is purely to make sure
219 * Linux can schedule processes on this slave.
220 */
221 if (!cpu_idle_thread[cpu]) {
222 /*
223 * Schedule work item to avoid forking user task
224 * Ported from arch/x86/kernel/smpboot.c
225 */
226 struct create_idle c_idle = {
227 .cpu = cpu,
228 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
229 };
230
231 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
232 schedule_work(&c_idle.work);
233 wait_for_completion(&c_idle.done);
234 idle = cpu_idle_thread[cpu] = c_idle.idle;
235
236 if (IS_ERR(idle))
237 panic(KERN_ERR "Fork failed for CPU %d", cpu);
238 } else {
239 idle = cpu_idle_thread[cpu];
240 init_idle(idle, cpu);
241 }
242
243 mp_ops->boot_secondary(cpu, idle);
244
245 /*
246 * Trust is futile. We should really have timeouts ...
247 */
248 while (!cpu_isset(cpu, cpu_callin_map))
249 udelay(100);
250
251 cpu_set(cpu, cpu_online_map);
252
253 return 0;
254}
255
256/* Not really SMP stuff ... */
257int setup_profiling_timer(unsigned int multiplier)
258{
259 return 0;
260}
261
262static void flush_tlb_all_ipi(void *info)
263{
264 local_flush_tlb_all();
265}
266
267void flush_tlb_all(void)
268{
269 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
270}
271
272static void flush_tlb_mm_ipi(void *mm)
273{
274 local_flush_tlb_mm((struct mm_struct *)mm);
275}
276
277/*
278 * Special Variant of smp_call_function for use by TLB functions:
279 *
280 * o No return value
281 * o collapses to normal function call on UP kernels
282 * o collapses to normal function call on systems with a single shared
283 * primary cache.
284 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
285 */
286static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
287{
288#ifndef CONFIG_MIPS_MT_SMTC
289 smp_call_function(func, info, 1);
290#endif
291}
292
293static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
294{
295 preempt_disable();
296
297 smp_on_other_tlbs(func, info);
298 func(info);
299
300 preempt_enable();
301}
302
303/*
304 * The following tlb flush calls are invoked when old translations are
305 * being torn down, or pte attributes are changing. For single threaded
306 * address spaces, a new context is obtained on the current cpu, and tlb
307 * context on other cpus are invalidated to force a new context allocation
308 * at switch_mm time, should the mm ever be used on other cpus. For
309 * multithreaded address spaces, intercpu interrupts have to be sent.
310 * Another case where intercpu interrupts are required is when the target
311 * mm might be active on another cpu (eg debuggers doing the flushes on
312 * behalf of debugees, kswapd stealing pages from another process etc).
313 * Kanoj 07/00.
314 */
315
316void flush_tlb_mm(struct mm_struct *mm)
317{
318 preempt_disable();
319
320 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
321 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
322 } else {
323 cpumask_t mask = cpu_online_map;
324 unsigned int cpu;
325
326 cpu_clear(smp_processor_id(), mask);
327 for_each_cpu_mask(cpu, mask)
328 if (cpu_context(cpu, mm))
329 cpu_context(cpu, mm) = 0;
330 }
331 local_flush_tlb_mm(mm);
332
333 preempt_enable();
334}
335
336struct flush_tlb_data {
337 struct vm_area_struct *vma;
338 unsigned long addr1;
339 unsigned long addr2;
340};
341
342static void flush_tlb_range_ipi(void *info)
343{
344 struct flush_tlb_data *fd = info;
345
346 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
347}
348
349void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
350{
351 struct mm_struct *mm = vma->vm_mm;
352
353 preempt_disable();
354 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
355 struct flush_tlb_data fd = {
356 .vma = vma,
357 .addr1 = start,
358 .addr2 = end,
359 };
360
361 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
362 } else {
363 cpumask_t mask = cpu_online_map;
364 unsigned int cpu;
365
366 cpu_clear(smp_processor_id(), mask);
367 for_each_cpu_mask(cpu, mask)
368 if (cpu_context(cpu, mm))
369 cpu_context(cpu, mm) = 0;
370 }
371 local_flush_tlb_range(vma, start, end);
372 preempt_enable();
373}
374
375static void flush_tlb_kernel_range_ipi(void *info)
376{
377 struct flush_tlb_data *fd = info;
378
379 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
380}
381
382void flush_tlb_kernel_range(unsigned long start, unsigned long end)
383{
384 struct flush_tlb_data fd = {
385 .addr1 = start,
386 .addr2 = end,
387 };
388
389 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
390}
391
392static void flush_tlb_page_ipi(void *info)
393{
394 struct flush_tlb_data *fd = info;
395
396 local_flush_tlb_page(fd->vma, fd->addr1);
397}
398
399void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
400{
401 preempt_disable();
402 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
403 struct flush_tlb_data fd = {
404 .vma = vma,
405 .addr1 = page,
406 };
407
408 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
409 } else {
410 cpumask_t mask = cpu_online_map;
411 unsigned int cpu;
412
413 cpu_clear(smp_processor_id(), mask);
414 for_each_cpu_mask(cpu, mask)
415 if (cpu_context(cpu, vma->vm_mm))
416 cpu_context(cpu, vma->vm_mm) = 0;
417 }
418 local_flush_tlb_page(vma, page);
419 preempt_enable();
420}
421
422static void flush_tlb_one_ipi(void *info)
423{
424 unsigned long vaddr = (unsigned long) info;
425
426 local_flush_tlb_one(vaddr);
427}
428
429void flush_tlb_one(unsigned long vaddr)
430{
431 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
432}
433
434EXPORT_SYMBOL(flush_tlb_page);
435EXPORT_SYMBOL(flush_tlb_one);
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
33#include <linux/cpu.h>
34#include <linux/err.h>
35#include <linux/ftrace.h>
36
37#include <linux/atomic.h>
38#include <asm/cpu.h>
39#include <asm/processor.h>
40#include <asm/idle.h>
41#include <asm/r4k-timer.h>
42#include <asm/mmu_context.h>
43#include <asm/time.h>
44#include <asm/setup.h>
45
46#ifdef CONFIG_MIPS_MT_SMTC
47#include <asm/mipsmtregs.h>
48#endif /* CONFIG_MIPS_MT_SMTC */
49
50volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
51
52int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
53EXPORT_SYMBOL(__cpu_number_map);
54
55int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
56EXPORT_SYMBOL(__cpu_logical_map);
57
58/* Number of TCs (or siblings in Intel speak) per CPU core */
59int smp_num_siblings = 1;
60EXPORT_SYMBOL(smp_num_siblings);
61
62/* representing the TCs (or siblings in Intel speak) of each logical CPU */
63cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
64EXPORT_SYMBOL(cpu_sibling_map);
65
66/* representing cpus for which sibling maps can be computed */
67static cpumask_t cpu_sibling_setup_map;
68
69static inline void set_cpu_sibling_map(int cpu)
70{
71 int i;
72
73 cpu_set(cpu, cpu_sibling_setup_map);
74
75 if (smp_num_siblings > 1) {
76 for_each_cpu_mask(i, cpu_sibling_setup_map) {
77 if (cpu_data[cpu].core == cpu_data[i].core) {
78 cpu_set(i, cpu_sibling_map[cpu]);
79 cpu_set(cpu, cpu_sibling_map[i]);
80 }
81 }
82 } else
83 cpu_set(cpu, cpu_sibling_map[cpu]);
84}
85
86struct plat_smp_ops *mp_ops;
87EXPORT_SYMBOL(mp_ops);
88
89void register_smp_ops(struct plat_smp_ops *ops)
90{
91 if (mp_ops)
92 printk(KERN_WARNING "Overriding previously set SMP ops\n");
93
94 mp_ops = ops;
95}
96
97/*
98 * First C code run on the secondary CPUs after being started up by
99 * the master.
100 */
101asmlinkage void start_secondary(void)
102{
103 unsigned int cpu;
104
105#ifdef CONFIG_MIPS_MT_SMTC
106 /* Only do cpu_probe for first TC of CPU */
107 if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
108 __cpu_name[smp_processor_id()] = __cpu_name[0];
109 else
110#endif /* CONFIG_MIPS_MT_SMTC */
111 cpu_probe();
112 cpu_report();
113 per_cpu_trap_init(false);
114 mips_clockevent_init();
115 mp_ops->init_secondary();
116
117 /*
118 * XXX parity protection should be folded in here when it's converted
119 * to an option instead of something based on .cputype
120 */
121
122 calibrate_delay();
123 preempt_disable();
124 cpu = smp_processor_id();
125 cpu_data[cpu].udelay_val = loops_per_jiffy;
126
127 notify_cpu_starting(cpu);
128
129 set_cpu_online(cpu, true);
130
131 set_cpu_sibling_map(cpu);
132
133 cpu_set(cpu, cpu_callin_map);
134
135 synchronise_count_slave(cpu);
136
137 /*
138 * irq will be enabled in ->smp_finish(), enabling it too early
139 * is dangerous.
140 */
141 WARN_ON_ONCE(!irqs_disabled());
142 mp_ops->smp_finish();
143
144 cpu_startup_entry(CPUHP_ONLINE);
145}
146
147/*
148 * Call into both interrupt handlers, as we share the IPI for them
149 */
150void __irq_entry smp_call_function_interrupt(void)
151{
152 irq_enter();
153 generic_smp_call_function_interrupt();
154 irq_exit();
155}
156
157static void stop_this_cpu(void *dummy)
158{
159 /*
160 * Remove this CPU:
161 */
162 set_cpu_online(smp_processor_id(), false);
163 for (;;) {
164 if (cpu_wait)
165 (*cpu_wait)(); /* Wait if available. */
166 }
167}
168
169void smp_send_stop(void)
170{
171 smp_call_function(stop_this_cpu, NULL, 0);
172}
173
174void __init smp_cpus_done(unsigned int max_cpus)
175{
176 mp_ops->cpus_done();
177}
178
179/* called from main before smp_init() */
180void __init smp_prepare_cpus(unsigned int max_cpus)
181{
182 init_new_context(current, &init_mm);
183 current_thread_info()->cpu = 0;
184 mp_ops->prepare_cpus(max_cpus);
185 set_cpu_sibling_map(0);
186#ifndef CONFIG_HOTPLUG_CPU
187 init_cpu_present(cpu_possible_mask);
188#endif
189}
190
191/* preload SMP state for boot cpu */
192void smp_prepare_boot_cpu(void)
193{
194 set_cpu_possible(0, true);
195 set_cpu_online(0, true);
196 cpu_set(0, cpu_callin_map);
197}
198
199int __cpu_up(unsigned int cpu, struct task_struct *tidle)
200{
201 mp_ops->boot_secondary(cpu, tidle);
202
203 /*
204 * Trust is futile. We should really have timeouts ...
205 */
206 while (!cpu_isset(cpu, cpu_callin_map))
207 udelay(100);
208
209 synchronise_count_master(cpu);
210 return 0;
211}
212
213/* Not really SMP stuff ... */
214int setup_profiling_timer(unsigned int multiplier)
215{
216 return 0;
217}
218
219static void flush_tlb_all_ipi(void *info)
220{
221 local_flush_tlb_all();
222}
223
224void flush_tlb_all(void)
225{
226 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
227}
228
229static void flush_tlb_mm_ipi(void *mm)
230{
231 local_flush_tlb_mm((struct mm_struct *)mm);
232}
233
234/*
235 * Special Variant of smp_call_function for use by TLB functions:
236 *
237 * o No return value
238 * o collapses to normal function call on UP kernels
239 * o collapses to normal function call on systems with a single shared
240 * primary cache.
241 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
242 */
243static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
244{
245#ifndef CONFIG_MIPS_MT_SMTC
246 smp_call_function(func, info, 1);
247#endif
248}
249
250static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
251{
252 preempt_disable();
253
254 smp_on_other_tlbs(func, info);
255 func(info);
256
257 preempt_enable();
258}
259
260/*
261 * The following tlb flush calls are invoked when old translations are
262 * being torn down, or pte attributes are changing. For single threaded
263 * address spaces, a new context is obtained on the current cpu, and tlb
264 * context on other cpus are invalidated to force a new context allocation
265 * at switch_mm time, should the mm ever be used on other cpus. For
266 * multithreaded address spaces, intercpu interrupts have to be sent.
267 * Another case where intercpu interrupts are required is when the target
268 * mm might be active on another cpu (eg debuggers doing the flushes on
269 * behalf of debugees, kswapd stealing pages from another process etc).
270 * Kanoj 07/00.
271 */
272
273void flush_tlb_mm(struct mm_struct *mm)
274{
275 preempt_disable();
276
277 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
278 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
279 } else {
280 unsigned int cpu;
281
282 for_each_online_cpu(cpu) {
283 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
284 cpu_context(cpu, mm) = 0;
285 }
286 }
287 local_flush_tlb_mm(mm);
288
289 preempt_enable();
290}
291
292struct flush_tlb_data {
293 struct vm_area_struct *vma;
294 unsigned long addr1;
295 unsigned long addr2;
296};
297
298static void flush_tlb_range_ipi(void *info)
299{
300 struct flush_tlb_data *fd = info;
301
302 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
303}
304
305void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
306{
307 struct mm_struct *mm = vma->vm_mm;
308
309 preempt_disable();
310 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
311 struct flush_tlb_data fd = {
312 .vma = vma,
313 .addr1 = start,
314 .addr2 = end,
315 };
316
317 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
318 } else {
319 unsigned int cpu;
320
321 for_each_online_cpu(cpu) {
322 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
323 cpu_context(cpu, mm) = 0;
324 }
325 }
326 local_flush_tlb_range(vma, start, end);
327 preempt_enable();
328}
329
330static void flush_tlb_kernel_range_ipi(void *info)
331{
332 struct flush_tlb_data *fd = info;
333
334 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
335}
336
337void flush_tlb_kernel_range(unsigned long start, unsigned long end)
338{
339 struct flush_tlb_data fd = {
340 .addr1 = start,
341 .addr2 = end,
342 };
343
344 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
345}
346
347static void flush_tlb_page_ipi(void *info)
348{
349 struct flush_tlb_data *fd = info;
350
351 local_flush_tlb_page(fd->vma, fd->addr1);
352}
353
354void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
355{
356 preempt_disable();
357 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
358 struct flush_tlb_data fd = {
359 .vma = vma,
360 .addr1 = page,
361 };
362
363 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
364 } else {
365 unsigned int cpu;
366
367 for_each_online_cpu(cpu) {
368 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
369 cpu_context(cpu, vma->vm_mm) = 0;
370 }
371 }
372 local_flush_tlb_page(vma, page);
373 preempt_enable();
374}
375
376static void flush_tlb_one_ipi(void *info)
377{
378 unsigned long vaddr = (unsigned long) info;
379
380 local_flush_tlb_one(vaddr);
381}
382
383void flush_tlb_one(unsigned long vaddr)
384{
385 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
386}
387
388EXPORT_SYMBOL(flush_tlb_page);
389EXPORT_SYMBOL(flush_tlb_one);
390
391#if defined(CONFIG_KEXEC)
392void (*dump_ipi_function_ptr)(void *) = NULL;
393void dump_send_ipi(void (*dump_ipi_callback)(void *))
394{
395 int i;
396 int cpu = smp_processor_id();
397
398 dump_ipi_function_ptr = dump_ipi_callback;
399 smp_mb();
400 for_each_online_cpu(i)
401 if (i != cpu)
402 mp_ops->send_ipi_single(i, SMP_DUMP);
403
404}
405EXPORT_SYMBOL(dump_send_ipi);
406#endif