Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sh/kernel/smp.c
4 *
5 * SMP support for the SuperH processors.
6 *
7 * Copyright (C) 2002 - 2010 Paul Mundt
8 * Copyright (C) 2006 - 2007 Akio Idehara
9 */
10#include <linux/err.h>
11#include <linux/cache.h>
12#include <linux/cpumask.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/cpu.h>
19#include <linux/interrupt.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/hotplug.h>
22#include <linux/atomic.h>
23#include <linux/clockchips.h>
24#include <asm/processor.h>
25#include <asm/mmu_context.h>
26#include <asm/smp.h>
27#include <asm/cacheflush.h>
28#include <asm/sections.h>
29#include <asm/setup.h>
30
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33
34struct plat_smp_ops *mp_ops = NULL;
35
36/* State of each CPU */
37DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39void register_smp_ops(struct plat_smp_ops *ops)
40{
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44 mp_ops = ops;
45}
46
47static inline void smp_store_cpu_info(unsigned int cpu)
48{
49 struct sh_cpuinfo *c = cpu_data + cpu;
50
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53 c->loops_per_jiffy = loops_per_jiffy;
54}
55
56void __init smp_prepare_cpus(unsigned int max_cpus)
57{
58 unsigned int cpu = smp_processor_id();
59
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
63
64#ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(cpu_possible_mask);
66#endif
67}
68
69void __init smp_prepare_boot_cpu(void)
70{
71 unsigned int cpu = smp_processor_id();
72
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
75
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
78
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
80}
81
82#ifdef CONFIG_HOTPLUG_CPU
83void native_cpu_die(unsigned int cpu)
84{
85 unsigned int i;
86
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
92
93 return;
94 }
95
96 msleep(100);
97 }
98
99 pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104 return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
112
113 __this_cpu_write(cpu_state, CPU_DEAD);
114 local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119 play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124 unsigned int cpu = smp_processor_id();
125 int ret;
126
127 ret = mp_ops->cpu_disable(cpu);
128 if (ret)
129 return ret;
130
131 /*
132 * Take this CPU offline. Once we clear this, we can't return,
133 * and we must not schedule until we're ready to give up the cpu.
134 */
135 set_cpu_online(cpu, false);
136
137 /*
138 * OK - migrate IRQs away from this CPU
139 */
140 migrate_irqs();
141
142 /*
143 * Flush user cache and TLB mappings, and then remove this CPU
144 * from the vm mask set of all processes.
145 */
146 flush_cache_all();
147#ifdef CONFIG_MMU
148 local_flush_tlb_all();
149#endif
150
151 clear_tasks_mm_cpumask(cpu);
152
153 return 0;
154}
155#else /* ... !CONFIG_HOTPLUG_CPU */
156int native_cpu_disable(unsigned int cpu)
157{
158 return -ENOSYS;
159}
160
161void native_cpu_die(unsigned int cpu)
162{
163 /* We said "no" in __cpu_disable */
164 BUG();
165}
166
167void native_play_dead(void)
168{
169 BUG();
170}
171#endif
172
173asmlinkage void start_secondary(void)
174{
175 unsigned int cpu = smp_processor_id();
176 struct mm_struct *mm = &init_mm;
177
178 enable_mmu();
179 mmgrab(mm);
180 mmget(mm);
181 current->active_mm = mm;
182#ifdef CONFIG_MMU
183 enter_lazy_tlb(mm, current);
184 local_flush_tlb_all();
185#endif
186
187 per_cpu_trap_init();
188
189 preempt_disable();
190
191 notify_cpu_starting(cpu);
192
193 local_irq_enable();
194
195 calibrate_delay();
196
197 smp_store_cpu_info(cpu);
198
199 set_cpu_online(cpu, true);
200 per_cpu(cpu_state, cpu) = CPU_ONLINE;
201
202 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
203}
204
205extern struct {
206 unsigned long sp;
207 unsigned long bss_start;
208 unsigned long bss_end;
209 void *start_kernel_fn;
210 void *cpu_init_fn;
211 void *thread_info;
212} stack_start;
213
214int __cpu_up(unsigned int cpu, struct task_struct *tsk)
215{
216 unsigned long timeout;
217
218 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
219
220 /* Fill in data in head.S for secondary cpus */
221 stack_start.sp = tsk->thread.sp;
222 stack_start.thread_info = tsk->stack;
223 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
224 stack_start.start_kernel_fn = start_secondary;
225
226 flush_icache_range((unsigned long)&stack_start,
227 (unsigned long)&stack_start + sizeof(stack_start));
228 wmb();
229
230 mp_ops->start_cpu(cpu, (unsigned long)_stext);
231
232 timeout = jiffies + HZ;
233 while (time_before(jiffies, timeout)) {
234 if (cpu_online(cpu))
235 break;
236
237 udelay(10);
238 barrier();
239 }
240
241 if (cpu_online(cpu))
242 return 0;
243
244 return -ENOENT;
245}
246
247void __init smp_cpus_done(unsigned int max_cpus)
248{
249 unsigned long bogosum = 0;
250 int cpu;
251
252 for_each_online_cpu(cpu)
253 bogosum += cpu_data[cpu].loops_per_jiffy;
254
255 printk(KERN_INFO "SMP: Total of %d processors activated "
256 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
257 bogosum / (500000/HZ),
258 (bogosum / (5000/HZ)) % 100);
259}
260
261void smp_send_reschedule(int cpu)
262{
263 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
264}
265
266void smp_send_stop(void)
267{
268 smp_call_function(stop_this_cpu, 0, 0);
269}
270
271void arch_send_call_function_ipi_mask(const struct cpumask *mask)
272{
273 int cpu;
274
275 for_each_cpu(cpu, mask)
276 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
277}
278
279void arch_send_call_function_single_ipi(int cpu)
280{
281 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
282}
283
284#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
285void tick_broadcast(const struct cpumask *mask)
286{
287 int cpu;
288
289 for_each_cpu(cpu, mask)
290 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
291}
292
293static void ipi_timer(void)
294{
295 irq_enter();
296 tick_receive_broadcast();
297 irq_exit();
298}
299#endif
300
301void smp_message_recv(unsigned int msg)
302{
303 switch (msg) {
304 case SMP_MSG_FUNCTION:
305 generic_smp_call_function_interrupt();
306 break;
307 case SMP_MSG_RESCHEDULE:
308 scheduler_ipi();
309 break;
310 case SMP_MSG_FUNCTION_SINGLE:
311 generic_smp_call_function_single_interrupt();
312 break;
313#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
314 case SMP_MSG_TIMER:
315 ipi_timer();
316 break;
317#endif
318 default:
319 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
320 smp_processor_id(), __func__, msg);
321 break;
322 }
323}
324
325/* Not really SMP stuff ... */
326int setup_profiling_timer(unsigned int multiplier)
327{
328 return 0;
329}
330
331#ifdef CONFIG_MMU
332
333static void flush_tlb_all_ipi(void *info)
334{
335 local_flush_tlb_all();
336}
337
338void flush_tlb_all(void)
339{
340 on_each_cpu(flush_tlb_all_ipi, 0, 1);
341}
342
343static void flush_tlb_mm_ipi(void *mm)
344{
345 local_flush_tlb_mm((struct mm_struct *)mm);
346}
347
348/*
349 * The following tlb flush calls are invoked when old translations are
350 * being torn down, or pte attributes are changing. For single threaded
351 * address spaces, a new context is obtained on the current cpu, and tlb
352 * context on other cpus are invalidated to force a new context allocation
353 * at switch_mm time, should the mm ever be used on other cpus. For
354 * multithreaded address spaces, intercpu interrupts have to be sent.
355 * Another case where intercpu interrupts are required is when the target
356 * mm might be active on another cpu (eg debuggers doing the flushes on
357 * behalf of debugees, kswapd stealing pages from another process etc).
358 * Kanoj 07/00.
359 */
360void flush_tlb_mm(struct mm_struct *mm)
361{
362 preempt_disable();
363
364 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
365 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
366 } else {
367 int i;
368 for_each_online_cpu(i)
369 if (smp_processor_id() != i)
370 cpu_context(i, mm) = 0;
371 }
372 local_flush_tlb_mm(mm);
373
374 preempt_enable();
375}
376
377struct flush_tlb_data {
378 struct vm_area_struct *vma;
379 unsigned long addr1;
380 unsigned long addr2;
381};
382
383static void flush_tlb_range_ipi(void *info)
384{
385 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
386
387 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
388}
389
390void flush_tlb_range(struct vm_area_struct *vma,
391 unsigned long start, unsigned long end)
392{
393 struct mm_struct *mm = vma->vm_mm;
394
395 preempt_disable();
396 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
397 struct flush_tlb_data fd;
398
399 fd.vma = vma;
400 fd.addr1 = start;
401 fd.addr2 = end;
402 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
403 } else {
404 int i;
405 for_each_online_cpu(i)
406 if (smp_processor_id() != i)
407 cpu_context(i, mm) = 0;
408 }
409 local_flush_tlb_range(vma, start, end);
410 preempt_enable();
411}
412
413static void flush_tlb_kernel_range_ipi(void *info)
414{
415 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
416
417 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
418}
419
420void flush_tlb_kernel_range(unsigned long start, unsigned long end)
421{
422 struct flush_tlb_data fd;
423
424 fd.addr1 = start;
425 fd.addr2 = end;
426 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
427}
428
429static void flush_tlb_page_ipi(void *info)
430{
431 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
432
433 local_flush_tlb_page(fd->vma, fd->addr1);
434}
435
436void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
437{
438 preempt_disable();
439 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
440 (current->mm != vma->vm_mm)) {
441 struct flush_tlb_data fd;
442
443 fd.vma = vma;
444 fd.addr1 = page;
445 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
446 } else {
447 int i;
448 for_each_online_cpu(i)
449 if (smp_processor_id() != i)
450 cpu_context(i, vma->vm_mm) = 0;
451 }
452 local_flush_tlb_page(vma, page);
453 preempt_enable();
454}
455
456static void flush_tlb_one_ipi(void *info)
457{
458 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
459 local_flush_tlb_one(fd->addr1, fd->addr2);
460}
461
462void flush_tlb_one(unsigned long asid, unsigned long vaddr)
463{
464 struct flush_tlb_data fd;
465
466 fd.addr1 = asid;
467 fd.addr2 = vaddr;
468
469 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
470 local_flush_tlb_one(asid, vaddr);
471}
472
473#endif
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <linux/atomic.h>
25#include <asm/processor.h>
26#include <asm/system.h>
27#include <asm/mmu_context.h>
28#include <asm/smp.h>
29#include <asm/cacheflush.h>
30#include <asm/sections.h>
31
32int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
33int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
34
35struct plat_smp_ops *mp_ops = NULL;
36
37/* State of each CPU */
38DEFINE_PER_CPU(int, cpu_state) = { 0 };
39
40void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
41{
42 if (mp_ops)
43 printk(KERN_WARNING "Overriding previously set SMP ops\n");
44
45 mp_ops = ops;
46}
47
48static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
49{
50 struct sh_cpuinfo *c = cpu_data + cpu;
51
52 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53
54 c->loops_per_jiffy = loops_per_jiffy;
55}
56
57void __init smp_prepare_cpus(unsigned int max_cpus)
58{
59 unsigned int cpu = smp_processor_id();
60
61 init_new_context(current, &init_mm);
62 current_thread_info()->cpu = cpu;
63 mp_ops->prepare_cpus(max_cpus);
64
65#ifndef CONFIG_HOTPLUG_CPU
66 init_cpu_present(&cpu_possible_map);
67#endif
68}
69
70void __init smp_prepare_boot_cpu(void)
71{
72 unsigned int cpu = smp_processor_id();
73
74 __cpu_number_map[0] = cpu;
75 __cpu_logical_map[0] = cpu;
76
77 set_cpu_online(cpu, true);
78 set_cpu_possible(cpu, true);
79
80 per_cpu(cpu_state, cpu) = CPU_ONLINE;
81}
82
83#ifdef CONFIG_HOTPLUG_CPU
84void native_cpu_die(unsigned int cpu)
85{
86 unsigned int i;
87
88 for (i = 0; i < 10; i++) {
89 smp_rmb();
90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
91 if (system_state == SYSTEM_RUNNING)
92 pr_info("CPU %u is now offline\n", cpu);
93
94 return;
95 }
96
97 msleep(100);
98 }
99
100 pr_err("CPU %u didn't die...\n", cpu);
101}
102
103int native_cpu_disable(unsigned int cpu)
104{
105 return cpu == 0 ? -EPERM : 0;
106}
107
108void play_dead_common(void)
109{
110 idle_task_exit();
111 irq_ctx_exit(raw_smp_processor_id());
112 mb();
113
114 __get_cpu_var(cpu_state) = CPU_DEAD;
115 local_irq_disable();
116}
117
118void native_play_dead(void)
119{
120 play_dead_common();
121}
122
123int __cpu_disable(void)
124{
125 unsigned int cpu = smp_processor_id();
126 struct task_struct *p;
127 int ret;
128
129 ret = mp_ops->cpu_disable(cpu);
130 if (ret)
131 return ret;
132
133 /*
134 * Take this CPU offline. Once we clear this, we can't return,
135 * and we must not schedule until we're ready to give up the cpu.
136 */
137 set_cpu_online(cpu, false);
138
139 /*
140 * OK - migrate IRQs away from this CPU
141 */
142 migrate_irqs();
143
144 /*
145 * Stop the local timer for this CPU.
146 */
147 local_timer_stop(cpu);
148
149 /*
150 * Flush user cache and TLB mappings, and then remove this CPU
151 * from the vm mask set of all processes.
152 */
153 flush_cache_all();
154 local_flush_tlb_all();
155
156 read_lock(&tasklist_lock);
157 for_each_process(p)
158 if (p->mm)
159 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
160 read_unlock(&tasklist_lock);
161
162 return 0;
163}
164#else /* ... !CONFIG_HOTPLUG_CPU */
165int native_cpu_disable(unsigned int cpu)
166{
167 return -ENOSYS;
168}
169
170void native_cpu_die(unsigned int cpu)
171{
172 /* We said "no" in __cpu_disable */
173 BUG();
174}
175
176void native_play_dead(void)
177{
178 BUG();
179}
180#endif
181
182asmlinkage void __cpuinit start_secondary(void)
183{
184 unsigned int cpu = smp_processor_id();
185 struct mm_struct *mm = &init_mm;
186
187 enable_mmu();
188 atomic_inc(&mm->mm_count);
189 atomic_inc(&mm->mm_users);
190 current->active_mm = mm;
191 enter_lazy_tlb(mm, current);
192 local_flush_tlb_all();
193
194 per_cpu_trap_init();
195
196 preempt_disable();
197
198 notify_cpu_starting(cpu);
199
200 local_irq_enable();
201
202 /* Enable local timers */
203 local_timer_setup(cpu);
204 calibrate_delay();
205
206 smp_store_cpu_info(cpu);
207
208 set_cpu_online(cpu, true);
209 per_cpu(cpu_state, cpu) = CPU_ONLINE;
210
211 cpu_idle();
212}
213
214extern struct {
215 unsigned long sp;
216 unsigned long bss_start;
217 unsigned long bss_end;
218 void *start_kernel_fn;
219 void *cpu_init_fn;
220 void *thread_info;
221} stack_start;
222
223int __cpuinit __cpu_up(unsigned int cpu)
224{
225 struct task_struct *tsk;
226 unsigned long timeout;
227
228 tsk = cpu_data[cpu].idle;
229 if (!tsk) {
230 tsk = fork_idle(cpu);
231 if (IS_ERR(tsk)) {
232 pr_err("Failed forking idle task for cpu %d\n", cpu);
233 return PTR_ERR(tsk);
234 }
235
236 cpu_data[cpu].idle = tsk;
237 }
238
239 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
240
241 /* Fill in data in head.S for secondary cpus */
242 stack_start.sp = tsk->thread.sp;
243 stack_start.thread_info = tsk->stack;
244 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
245 stack_start.start_kernel_fn = start_secondary;
246
247 flush_icache_range((unsigned long)&stack_start,
248 (unsigned long)&stack_start + sizeof(stack_start));
249 wmb();
250
251 mp_ops->start_cpu(cpu, (unsigned long)_stext);
252
253 timeout = jiffies + HZ;
254 while (time_before(jiffies, timeout)) {
255 if (cpu_online(cpu))
256 break;
257
258 udelay(10);
259 barrier();
260 }
261
262 if (cpu_online(cpu))
263 return 0;
264
265 return -ENOENT;
266}
267
268void __init smp_cpus_done(unsigned int max_cpus)
269{
270 unsigned long bogosum = 0;
271 int cpu;
272
273 for_each_online_cpu(cpu)
274 bogosum += cpu_data[cpu].loops_per_jiffy;
275
276 printk(KERN_INFO "SMP: Total of %d processors activated "
277 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
278 bogosum / (500000/HZ),
279 (bogosum / (5000/HZ)) % 100);
280}
281
282void smp_send_reschedule(int cpu)
283{
284 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
285}
286
287void smp_send_stop(void)
288{
289 smp_call_function(stop_this_cpu, 0, 0);
290}
291
292void arch_send_call_function_ipi_mask(const struct cpumask *mask)
293{
294 int cpu;
295
296 for_each_cpu(cpu, mask)
297 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
298}
299
300void arch_send_call_function_single_ipi(int cpu)
301{
302 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
303}
304
305void smp_timer_broadcast(const struct cpumask *mask)
306{
307 int cpu;
308
309 for_each_cpu(cpu, mask)
310 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
311}
312
313static void ipi_timer(void)
314{
315 irq_enter();
316 local_timer_interrupt();
317 irq_exit();
318}
319
320void smp_message_recv(unsigned int msg)
321{
322 switch (msg) {
323 case SMP_MSG_FUNCTION:
324 generic_smp_call_function_interrupt();
325 break;
326 case SMP_MSG_RESCHEDULE:
327 scheduler_ipi();
328 break;
329 case SMP_MSG_FUNCTION_SINGLE:
330 generic_smp_call_function_single_interrupt();
331 break;
332 case SMP_MSG_TIMER:
333 ipi_timer();
334 break;
335 default:
336 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
337 smp_processor_id(), __func__, msg);
338 break;
339 }
340}
341
342/* Not really SMP stuff ... */
343int setup_profiling_timer(unsigned int multiplier)
344{
345 return 0;
346}
347
348static void flush_tlb_all_ipi(void *info)
349{
350 local_flush_tlb_all();
351}
352
353void flush_tlb_all(void)
354{
355 on_each_cpu(flush_tlb_all_ipi, 0, 1);
356}
357
358static void flush_tlb_mm_ipi(void *mm)
359{
360 local_flush_tlb_mm((struct mm_struct *)mm);
361}
362
363/*
364 * The following tlb flush calls are invoked when old translations are
365 * being torn down, or pte attributes are changing. For single threaded
366 * address spaces, a new context is obtained on the current cpu, and tlb
367 * context on other cpus are invalidated to force a new context allocation
368 * at switch_mm time, should the mm ever be used on other cpus. For
369 * multithreaded address spaces, intercpu interrupts have to be sent.
370 * Another case where intercpu interrupts are required is when the target
371 * mm might be active on another cpu (eg debuggers doing the flushes on
372 * behalf of debugees, kswapd stealing pages from another process etc).
373 * Kanoj 07/00.
374 */
375void flush_tlb_mm(struct mm_struct *mm)
376{
377 preempt_disable();
378
379 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
380 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
381 } else {
382 int i;
383 for (i = 0; i < num_online_cpus(); i++)
384 if (smp_processor_id() != i)
385 cpu_context(i, mm) = 0;
386 }
387 local_flush_tlb_mm(mm);
388
389 preempt_enable();
390}
391
392struct flush_tlb_data {
393 struct vm_area_struct *vma;
394 unsigned long addr1;
395 unsigned long addr2;
396};
397
398static void flush_tlb_range_ipi(void *info)
399{
400 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
401
402 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
403}
404
405void flush_tlb_range(struct vm_area_struct *vma,
406 unsigned long start, unsigned long end)
407{
408 struct mm_struct *mm = vma->vm_mm;
409
410 preempt_disable();
411 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
412 struct flush_tlb_data fd;
413
414 fd.vma = vma;
415 fd.addr1 = start;
416 fd.addr2 = end;
417 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
418 } else {
419 int i;
420 for (i = 0; i < num_online_cpus(); i++)
421 if (smp_processor_id() != i)
422 cpu_context(i, mm) = 0;
423 }
424 local_flush_tlb_range(vma, start, end);
425 preempt_enable();
426}
427
428static void flush_tlb_kernel_range_ipi(void *info)
429{
430 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
431
432 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
433}
434
435void flush_tlb_kernel_range(unsigned long start, unsigned long end)
436{
437 struct flush_tlb_data fd;
438
439 fd.addr1 = start;
440 fd.addr2 = end;
441 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
442}
443
444static void flush_tlb_page_ipi(void *info)
445{
446 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
447
448 local_flush_tlb_page(fd->vma, fd->addr1);
449}
450
451void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
452{
453 preempt_disable();
454 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
455 (current->mm != vma->vm_mm)) {
456 struct flush_tlb_data fd;
457
458 fd.vma = vma;
459 fd.addr1 = page;
460 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
461 } else {
462 int i;
463 for (i = 0; i < num_online_cpus(); i++)
464 if (smp_processor_id() != i)
465 cpu_context(i, vma->vm_mm) = 0;
466 }
467 local_flush_tlb_page(vma, page);
468 preempt_enable();
469}
470
471static void flush_tlb_one_ipi(void *info)
472{
473 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
474 local_flush_tlb_one(fd->addr1, fd->addr2);
475}
476
477void flush_tlb_one(unsigned long asid, unsigned long vaddr)
478{
479 struct flush_tlb_data fd;
480
481 fd.addr1 = asid;
482 fd.addr2 = vaddr;
483
484 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
485 local_flush_tlb_one(asid, vaddr);
486}