Loading...
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <linux/atomic.h>
25#include <linux/clockchips.h>
26#include <asm/processor.h>
27#include <asm/mmu_context.h>
28#include <asm/smp.h>
29#include <asm/cacheflush.h>
30#include <asm/sections.h>
31#include <asm/setup.h>
32
33int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
34int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
35
36struct plat_smp_ops *mp_ops = NULL;
37
38/* State of each CPU */
39DEFINE_PER_CPU(int, cpu_state) = { 0 };
40
41void register_smp_ops(struct plat_smp_ops *ops)
42{
43 if (mp_ops)
44 printk(KERN_WARNING "Overriding previously set SMP ops\n");
45
46 mp_ops = ops;
47}
48
49static inline void smp_store_cpu_info(unsigned int cpu)
50{
51 struct sh_cpuinfo *c = cpu_data + cpu;
52
53 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
54
55 c->loops_per_jiffy = loops_per_jiffy;
56}
57
58void __init smp_prepare_cpus(unsigned int max_cpus)
59{
60 unsigned int cpu = smp_processor_id();
61
62 init_new_context(current, &init_mm);
63 current_thread_info()->cpu = cpu;
64 mp_ops->prepare_cpus(max_cpus);
65
66#ifndef CONFIG_HOTPLUG_CPU
67 init_cpu_present(cpu_possible_mask);
68#endif
69}
70
71void __init smp_prepare_boot_cpu(void)
72{
73 unsigned int cpu = smp_processor_id();
74
75 __cpu_number_map[0] = cpu;
76 __cpu_logical_map[0] = cpu;
77
78 set_cpu_online(cpu, true);
79 set_cpu_possible(cpu, true);
80
81 per_cpu(cpu_state, cpu) = CPU_ONLINE;
82}
83
84#ifdef CONFIG_HOTPLUG_CPU
85void native_cpu_die(unsigned int cpu)
86{
87 unsigned int i;
88
89 for (i = 0; i < 10; i++) {
90 smp_rmb();
91 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
92 if (system_state == SYSTEM_RUNNING)
93 pr_info("CPU %u is now offline\n", cpu);
94
95 return;
96 }
97
98 msleep(100);
99 }
100
101 pr_err("CPU %u didn't die...\n", cpu);
102}
103
104int native_cpu_disable(unsigned int cpu)
105{
106 return cpu == 0 ? -EPERM : 0;
107}
108
109void play_dead_common(void)
110{
111 idle_task_exit();
112 irq_ctx_exit(raw_smp_processor_id());
113 mb();
114
115 __this_cpu_write(cpu_state, CPU_DEAD);
116 local_irq_disable();
117}
118
119void native_play_dead(void)
120{
121 play_dead_common();
122}
123
124int __cpu_disable(void)
125{
126 unsigned int cpu = smp_processor_id();
127 int ret;
128
129 ret = mp_ops->cpu_disable(cpu);
130 if (ret)
131 return ret;
132
133 /*
134 * Take this CPU offline. Once we clear this, we can't return,
135 * and we must not schedule until we're ready to give up the cpu.
136 */
137 set_cpu_online(cpu, false);
138
139 /*
140 * OK - migrate IRQs away from this CPU
141 */
142 migrate_irqs();
143
144 /*
145 * Flush user cache and TLB mappings, and then remove this CPU
146 * from the vm mask set of all processes.
147 */
148 flush_cache_all();
149#ifdef CONFIG_MMU
150 local_flush_tlb_all();
151#endif
152
153 clear_tasks_mm_cpumask(cpu);
154
155 return 0;
156}
157#else /* ... !CONFIG_HOTPLUG_CPU */
158int native_cpu_disable(unsigned int cpu)
159{
160 return -ENOSYS;
161}
162
163void native_cpu_die(unsigned int cpu)
164{
165 /* We said "no" in __cpu_disable */
166 BUG();
167}
168
169void native_play_dead(void)
170{
171 BUG();
172}
173#endif
174
175asmlinkage void start_secondary(void)
176{
177 unsigned int cpu = smp_processor_id();
178 struct mm_struct *mm = &init_mm;
179
180 enable_mmu();
181 atomic_inc(&mm->mm_count);
182 atomic_inc(&mm->mm_users);
183 current->active_mm = mm;
184#ifdef CONFIG_MMU
185 enter_lazy_tlb(mm, current);
186 local_flush_tlb_all();
187#endif
188
189 per_cpu_trap_init();
190
191 preempt_disable();
192
193 notify_cpu_starting(cpu);
194
195 local_irq_enable();
196
197 calibrate_delay();
198
199 smp_store_cpu_info(cpu);
200
201 set_cpu_online(cpu, true);
202 per_cpu(cpu_state, cpu) = CPU_ONLINE;
203
204 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
205}
206
207extern struct {
208 unsigned long sp;
209 unsigned long bss_start;
210 unsigned long bss_end;
211 void *start_kernel_fn;
212 void *cpu_init_fn;
213 void *thread_info;
214} stack_start;
215
216int __cpu_up(unsigned int cpu, struct task_struct *tsk)
217{
218 unsigned long timeout;
219
220 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
221
222 /* Fill in data in head.S for secondary cpus */
223 stack_start.sp = tsk->thread.sp;
224 stack_start.thread_info = tsk->stack;
225 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
226 stack_start.start_kernel_fn = start_secondary;
227
228 flush_icache_range((unsigned long)&stack_start,
229 (unsigned long)&stack_start + sizeof(stack_start));
230 wmb();
231
232 mp_ops->start_cpu(cpu, (unsigned long)_stext);
233
234 timeout = jiffies + HZ;
235 while (time_before(jiffies, timeout)) {
236 if (cpu_online(cpu))
237 break;
238
239 udelay(10);
240 barrier();
241 }
242
243 if (cpu_online(cpu))
244 return 0;
245
246 return -ENOENT;
247}
248
249void __init smp_cpus_done(unsigned int max_cpus)
250{
251 unsigned long bogosum = 0;
252 int cpu;
253
254 for_each_online_cpu(cpu)
255 bogosum += cpu_data[cpu].loops_per_jiffy;
256
257 printk(KERN_INFO "SMP: Total of %d processors activated "
258 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
259 bogosum / (500000/HZ),
260 (bogosum / (5000/HZ)) % 100);
261}
262
263void smp_send_reschedule(int cpu)
264{
265 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
266}
267
268void smp_send_stop(void)
269{
270 smp_call_function(stop_this_cpu, 0, 0);
271}
272
273void arch_send_call_function_ipi_mask(const struct cpumask *mask)
274{
275 int cpu;
276
277 for_each_cpu(cpu, mask)
278 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
279}
280
281void arch_send_call_function_single_ipi(int cpu)
282{
283 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
284}
285
286#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
287void tick_broadcast(const struct cpumask *mask)
288{
289 int cpu;
290
291 for_each_cpu(cpu, mask)
292 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
293}
294
295static void ipi_timer(void)
296{
297 irq_enter();
298 tick_receive_broadcast();
299 irq_exit();
300}
301#endif
302
303void smp_message_recv(unsigned int msg)
304{
305 switch (msg) {
306 case SMP_MSG_FUNCTION:
307 generic_smp_call_function_interrupt();
308 break;
309 case SMP_MSG_RESCHEDULE:
310 scheduler_ipi();
311 break;
312 case SMP_MSG_FUNCTION_SINGLE:
313 generic_smp_call_function_single_interrupt();
314 break;
315#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
316 case SMP_MSG_TIMER:
317 ipi_timer();
318 break;
319#endif
320 default:
321 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
322 smp_processor_id(), __func__, msg);
323 break;
324 }
325}
326
327/* Not really SMP stuff ... */
328int setup_profiling_timer(unsigned int multiplier)
329{
330 return 0;
331}
332
333#ifdef CONFIG_MMU
334
335static void flush_tlb_all_ipi(void *info)
336{
337 local_flush_tlb_all();
338}
339
340void flush_tlb_all(void)
341{
342 on_each_cpu(flush_tlb_all_ipi, 0, 1);
343}
344
345static void flush_tlb_mm_ipi(void *mm)
346{
347 local_flush_tlb_mm((struct mm_struct *)mm);
348}
349
350/*
351 * The following tlb flush calls are invoked when old translations are
352 * being torn down, or pte attributes are changing. For single threaded
353 * address spaces, a new context is obtained on the current cpu, and tlb
354 * context on other cpus are invalidated to force a new context allocation
355 * at switch_mm time, should the mm ever be used on other cpus. For
356 * multithreaded address spaces, intercpu interrupts have to be sent.
357 * Another case where intercpu interrupts are required is when the target
358 * mm might be active on another cpu (eg debuggers doing the flushes on
359 * behalf of debugees, kswapd stealing pages from another process etc).
360 * Kanoj 07/00.
361 */
362void flush_tlb_mm(struct mm_struct *mm)
363{
364 preempt_disable();
365
366 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
367 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
368 } else {
369 int i;
370 for_each_online_cpu(i)
371 if (smp_processor_id() != i)
372 cpu_context(i, mm) = 0;
373 }
374 local_flush_tlb_mm(mm);
375
376 preempt_enable();
377}
378
379struct flush_tlb_data {
380 struct vm_area_struct *vma;
381 unsigned long addr1;
382 unsigned long addr2;
383};
384
385static void flush_tlb_range_ipi(void *info)
386{
387 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
388
389 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
390}
391
392void flush_tlb_range(struct vm_area_struct *vma,
393 unsigned long start, unsigned long end)
394{
395 struct mm_struct *mm = vma->vm_mm;
396
397 preempt_disable();
398 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
399 struct flush_tlb_data fd;
400
401 fd.vma = vma;
402 fd.addr1 = start;
403 fd.addr2 = end;
404 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
405 } else {
406 int i;
407 for_each_online_cpu(i)
408 if (smp_processor_id() != i)
409 cpu_context(i, mm) = 0;
410 }
411 local_flush_tlb_range(vma, start, end);
412 preempt_enable();
413}
414
415static void flush_tlb_kernel_range_ipi(void *info)
416{
417 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
418
419 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
420}
421
422void flush_tlb_kernel_range(unsigned long start, unsigned long end)
423{
424 struct flush_tlb_data fd;
425
426 fd.addr1 = start;
427 fd.addr2 = end;
428 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
429}
430
431static void flush_tlb_page_ipi(void *info)
432{
433 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
434
435 local_flush_tlb_page(fd->vma, fd->addr1);
436}
437
438void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
439{
440 preempt_disable();
441 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
442 (current->mm != vma->vm_mm)) {
443 struct flush_tlb_data fd;
444
445 fd.vma = vma;
446 fd.addr1 = page;
447 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
448 } else {
449 int i;
450 for_each_online_cpu(i)
451 if (smp_processor_id() != i)
452 cpu_context(i, vma->vm_mm) = 0;
453 }
454 local_flush_tlb_page(vma, page);
455 preempt_enable();
456}
457
458static void flush_tlb_one_ipi(void *info)
459{
460 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
461 local_flush_tlb_one(fd->addr1, fd->addr2);
462}
463
464void flush_tlb_one(unsigned long asid, unsigned long vaddr)
465{
466 struct flush_tlb_data fd;
467
468 fd.addr1 = asid;
469 fd.addr2 = vaddr;
470
471 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
472 local_flush_tlb_one(asid, vaddr);
473}
474
475#endif
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <linux/sched/mm.h>
24#include <linux/sched/hotplug.h>
25#include <linux/atomic.h>
26#include <linux/clockchips.h>
27#include <asm/processor.h>
28#include <asm/mmu_context.h>
29#include <asm/smp.h>
30#include <asm/cacheflush.h>
31#include <asm/sections.h>
32#include <asm/setup.h>
33
34int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
35int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
36
37struct plat_smp_ops *mp_ops = NULL;
38
39/* State of each CPU */
40DEFINE_PER_CPU(int, cpu_state) = { 0 };
41
42void register_smp_ops(struct plat_smp_ops *ops)
43{
44 if (mp_ops)
45 printk(KERN_WARNING "Overriding previously set SMP ops\n");
46
47 mp_ops = ops;
48}
49
50static inline void smp_store_cpu_info(unsigned int cpu)
51{
52 struct sh_cpuinfo *c = cpu_data + cpu;
53
54 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
55
56 c->loops_per_jiffy = loops_per_jiffy;
57}
58
59void __init smp_prepare_cpus(unsigned int max_cpus)
60{
61 unsigned int cpu = smp_processor_id();
62
63 init_new_context(current, &init_mm);
64 current_thread_info()->cpu = cpu;
65 mp_ops->prepare_cpus(max_cpus);
66
67#ifndef CONFIG_HOTPLUG_CPU
68 init_cpu_present(cpu_possible_mask);
69#endif
70}
71
72void __init smp_prepare_boot_cpu(void)
73{
74 unsigned int cpu = smp_processor_id();
75
76 __cpu_number_map[0] = cpu;
77 __cpu_logical_map[0] = cpu;
78
79 set_cpu_online(cpu, true);
80 set_cpu_possible(cpu, true);
81
82 per_cpu(cpu_state, cpu) = CPU_ONLINE;
83}
84
85#ifdef CONFIG_HOTPLUG_CPU
86void native_cpu_die(unsigned int cpu)
87{
88 unsigned int i;
89
90 for (i = 0; i < 10; i++) {
91 smp_rmb();
92 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
93 if (system_state == SYSTEM_RUNNING)
94 pr_info("CPU %u is now offline\n", cpu);
95
96 return;
97 }
98
99 msleep(100);
100 }
101
102 pr_err("CPU %u didn't die...\n", cpu);
103}
104
105int native_cpu_disable(unsigned int cpu)
106{
107 return cpu == 0 ? -EPERM : 0;
108}
109
110void play_dead_common(void)
111{
112 idle_task_exit();
113 irq_ctx_exit(raw_smp_processor_id());
114 mb();
115
116 __this_cpu_write(cpu_state, CPU_DEAD);
117 local_irq_disable();
118}
119
120void native_play_dead(void)
121{
122 play_dead_common();
123}
124
125int __cpu_disable(void)
126{
127 unsigned int cpu = smp_processor_id();
128 int ret;
129
130 ret = mp_ops->cpu_disable(cpu);
131 if (ret)
132 return ret;
133
134 /*
135 * Take this CPU offline. Once we clear this, we can't return,
136 * and we must not schedule until we're ready to give up the cpu.
137 */
138 set_cpu_online(cpu, false);
139
140 /*
141 * OK - migrate IRQs away from this CPU
142 */
143 migrate_irqs();
144
145 /*
146 * Flush user cache and TLB mappings, and then remove this CPU
147 * from the vm mask set of all processes.
148 */
149 flush_cache_all();
150#ifdef CONFIG_MMU
151 local_flush_tlb_all();
152#endif
153
154 clear_tasks_mm_cpumask(cpu);
155
156 return 0;
157}
158#else /* ... !CONFIG_HOTPLUG_CPU */
159int native_cpu_disable(unsigned int cpu)
160{
161 return -ENOSYS;
162}
163
164void native_cpu_die(unsigned int cpu)
165{
166 /* We said "no" in __cpu_disable */
167 BUG();
168}
169
170void native_play_dead(void)
171{
172 BUG();
173}
174#endif
175
176asmlinkage void start_secondary(void)
177{
178 unsigned int cpu = smp_processor_id();
179 struct mm_struct *mm = &init_mm;
180
181 enable_mmu();
182 mmgrab(mm);
183 mmget(mm);
184 current->active_mm = mm;
185#ifdef CONFIG_MMU
186 enter_lazy_tlb(mm, current);
187 local_flush_tlb_all();
188#endif
189
190 per_cpu_trap_init();
191
192 preempt_disable();
193
194 notify_cpu_starting(cpu);
195
196 local_irq_enable();
197
198 calibrate_delay();
199
200 smp_store_cpu_info(cpu);
201
202 set_cpu_online(cpu, true);
203 per_cpu(cpu_state, cpu) = CPU_ONLINE;
204
205 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
206}
207
208extern struct {
209 unsigned long sp;
210 unsigned long bss_start;
211 unsigned long bss_end;
212 void *start_kernel_fn;
213 void *cpu_init_fn;
214 void *thread_info;
215} stack_start;
216
217int __cpu_up(unsigned int cpu, struct task_struct *tsk)
218{
219 unsigned long timeout;
220
221 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
222
223 /* Fill in data in head.S for secondary cpus */
224 stack_start.sp = tsk->thread.sp;
225 stack_start.thread_info = tsk->stack;
226 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
227 stack_start.start_kernel_fn = start_secondary;
228
229 flush_icache_range((unsigned long)&stack_start,
230 (unsigned long)&stack_start + sizeof(stack_start));
231 wmb();
232
233 mp_ops->start_cpu(cpu, (unsigned long)_stext);
234
235 timeout = jiffies + HZ;
236 while (time_before(jiffies, timeout)) {
237 if (cpu_online(cpu))
238 break;
239
240 udelay(10);
241 barrier();
242 }
243
244 if (cpu_online(cpu))
245 return 0;
246
247 return -ENOENT;
248}
249
250void __init smp_cpus_done(unsigned int max_cpus)
251{
252 unsigned long bogosum = 0;
253 int cpu;
254
255 for_each_online_cpu(cpu)
256 bogosum += cpu_data[cpu].loops_per_jiffy;
257
258 printk(KERN_INFO "SMP: Total of %d processors activated "
259 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
260 bogosum / (500000/HZ),
261 (bogosum / (5000/HZ)) % 100);
262}
263
264void smp_send_reschedule(int cpu)
265{
266 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
267}
268
269void smp_send_stop(void)
270{
271 smp_call_function(stop_this_cpu, 0, 0);
272}
273
274void arch_send_call_function_ipi_mask(const struct cpumask *mask)
275{
276 int cpu;
277
278 for_each_cpu(cpu, mask)
279 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
280}
281
282void arch_send_call_function_single_ipi(int cpu)
283{
284 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
285}
286
287#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
288void tick_broadcast(const struct cpumask *mask)
289{
290 int cpu;
291
292 for_each_cpu(cpu, mask)
293 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
294}
295
296static void ipi_timer(void)
297{
298 irq_enter();
299 tick_receive_broadcast();
300 irq_exit();
301}
302#endif
303
304void smp_message_recv(unsigned int msg)
305{
306 switch (msg) {
307 case SMP_MSG_FUNCTION:
308 generic_smp_call_function_interrupt();
309 break;
310 case SMP_MSG_RESCHEDULE:
311 scheduler_ipi();
312 break;
313 case SMP_MSG_FUNCTION_SINGLE:
314 generic_smp_call_function_single_interrupt();
315 break;
316#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
317 case SMP_MSG_TIMER:
318 ipi_timer();
319 break;
320#endif
321 default:
322 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
323 smp_processor_id(), __func__, msg);
324 break;
325 }
326}
327
328/* Not really SMP stuff ... */
329int setup_profiling_timer(unsigned int multiplier)
330{
331 return 0;
332}
333
334#ifdef CONFIG_MMU
335
336static void flush_tlb_all_ipi(void *info)
337{
338 local_flush_tlb_all();
339}
340
341void flush_tlb_all(void)
342{
343 on_each_cpu(flush_tlb_all_ipi, 0, 1);
344}
345
346static void flush_tlb_mm_ipi(void *mm)
347{
348 local_flush_tlb_mm((struct mm_struct *)mm);
349}
350
351/*
352 * The following tlb flush calls are invoked when old translations are
353 * being torn down, or pte attributes are changing. For single threaded
354 * address spaces, a new context is obtained on the current cpu, and tlb
355 * context on other cpus are invalidated to force a new context allocation
356 * at switch_mm time, should the mm ever be used on other cpus. For
357 * multithreaded address spaces, intercpu interrupts have to be sent.
358 * Another case where intercpu interrupts are required is when the target
359 * mm might be active on another cpu (eg debuggers doing the flushes on
360 * behalf of debugees, kswapd stealing pages from another process etc).
361 * Kanoj 07/00.
362 */
363void flush_tlb_mm(struct mm_struct *mm)
364{
365 preempt_disable();
366
367 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
368 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
369 } else {
370 int i;
371 for_each_online_cpu(i)
372 if (smp_processor_id() != i)
373 cpu_context(i, mm) = 0;
374 }
375 local_flush_tlb_mm(mm);
376
377 preempt_enable();
378}
379
380struct flush_tlb_data {
381 struct vm_area_struct *vma;
382 unsigned long addr1;
383 unsigned long addr2;
384};
385
386static void flush_tlb_range_ipi(void *info)
387{
388 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389
390 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
391}
392
393void flush_tlb_range(struct vm_area_struct *vma,
394 unsigned long start, unsigned long end)
395{
396 struct mm_struct *mm = vma->vm_mm;
397
398 preempt_disable();
399 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
400 struct flush_tlb_data fd;
401
402 fd.vma = vma;
403 fd.addr1 = start;
404 fd.addr2 = end;
405 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
406 } else {
407 int i;
408 for_each_online_cpu(i)
409 if (smp_processor_id() != i)
410 cpu_context(i, mm) = 0;
411 }
412 local_flush_tlb_range(vma, start, end);
413 preempt_enable();
414}
415
416static void flush_tlb_kernel_range_ipi(void *info)
417{
418 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
419
420 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
421}
422
423void flush_tlb_kernel_range(unsigned long start, unsigned long end)
424{
425 struct flush_tlb_data fd;
426
427 fd.addr1 = start;
428 fd.addr2 = end;
429 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
430}
431
432static void flush_tlb_page_ipi(void *info)
433{
434 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
435
436 local_flush_tlb_page(fd->vma, fd->addr1);
437}
438
439void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
440{
441 preempt_disable();
442 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
443 (current->mm != vma->vm_mm)) {
444 struct flush_tlb_data fd;
445
446 fd.vma = vma;
447 fd.addr1 = page;
448 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
449 } else {
450 int i;
451 for_each_online_cpu(i)
452 if (smp_processor_id() != i)
453 cpu_context(i, vma->vm_mm) = 0;
454 }
455 local_flush_tlb_page(vma, page);
456 preempt_enable();
457}
458
459static void flush_tlb_one_ipi(void *info)
460{
461 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
462 local_flush_tlb_one(fd->addr1, fd->addr2);
463}
464
465void flush_tlb_one(unsigned long asid, unsigned long vaddr)
466{
467 struct flush_tlb_data fd;
468
469 fd.addr1 = asid;
470 fd.addr2 = vaddr;
471
472 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
473 local_flush_tlb_one(asid, vaddr);
474}
475
476#endif