Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sh/kernel/smp.c
4 *
5 * SMP support for the SuperH processors.
6 *
7 * Copyright (C) 2002 - 2010 Paul Mundt
8 * Copyright (C) 2006 - 2007 Akio Idehara
9 */
10#include <linux/err.h>
11#include <linux/cache.h>
12#include <linux/cpumask.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/cpu.h>
19#include <linux/interrupt.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/hotplug.h>
22#include <linux/atomic.h>
23#include <linux/clockchips.h>
24#include <asm/processor.h>
25#include <asm/mmu_context.h>
26#include <asm/smp.h>
27#include <asm/cacheflush.h>
28#include <asm/sections.h>
29#include <asm/setup.h>
30
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33
34struct plat_smp_ops *mp_ops = NULL;
35
36/* State of each CPU */
37DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39void register_smp_ops(struct plat_smp_ops *ops)
40{
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44 mp_ops = ops;
45}
46
47static inline void smp_store_cpu_info(unsigned int cpu)
48{
49 struct sh_cpuinfo *c = cpu_data + cpu;
50
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53 c->loops_per_jiffy = loops_per_jiffy;
54}
55
56void __init smp_prepare_cpus(unsigned int max_cpus)
57{
58 unsigned int cpu = smp_processor_id();
59
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
63
64#ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(cpu_possible_mask);
66#endif
67}
68
69void __init smp_prepare_boot_cpu(void)
70{
71 unsigned int cpu = smp_processor_id();
72
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
75
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
78
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
80}
81
82#ifdef CONFIG_HOTPLUG_CPU
83void native_cpu_die(unsigned int cpu)
84{
85 unsigned int i;
86
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
92
93 return;
94 }
95
96 msleep(100);
97 }
98
99 pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104 return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
112
113 __this_cpu_write(cpu_state, CPU_DEAD);
114 local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119 play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124 unsigned int cpu = smp_processor_id();
125 int ret;
126
127 ret = mp_ops->cpu_disable(cpu);
128 if (ret)
129 return ret;
130
131 /*
132 * Take this CPU offline. Once we clear this, we can't return,
133 * and we must not schedule until we're ready to give up the cpu.
134 */
135 set_cpu_online(cpu, false);
136
137 /*
138 * OK - migrate IRQs away from this CPU
139 */
140 migrate_irqs();
141
142 /*
143 * Flush user cache and TLB mappings, and then remove this CPU
144 * from the vm mask set of all processes.
145 */
146 flush_cache_all();
147#ifdef CONFIG_MMU
148 local_flush_tlb_all();
149#endif
150
151 clear_tasks_mm_cpumask(cpu);
152
153 return 0;
154}
155#else /* ... !CONFIG_HOTPLUG_CPU */
156int native_cpu_disable(unsigned int cpu)
157{
158 return -ENOSYS;
159}
160
161void native_cpu_die(unsigned int cpu)
162{
163 /* We said "no" in __cpu_disable */
164 BUG();
165}
166
167void native_play_dead(void)
168{
169 BUG();
170}
171#endif
172
173asmlinkage void start_secondary(void)
174{
175 unsigned int cpu = smp_processor_id();
176 struct mm_struct *mm = &init_mm;
177
178 enable_mmu();
179 mmgrab(mm);
180 mmget(mm);
181 current->active_mm = mm;
182#ifdef CONFIG_MMU
183 enter_lazy_tlb(mm, current);
184 local_flush_tlb_all();
185#endif
186
187 per_cpu_trap_init();
188
189 notify_cpu_starting(cpu);
190
191 local_irq_enable();
192
193 calibrate_delay();
194
195 smp_store_cpu_info(cpu);
196
197 set_cpu_online(cpu, true);
198 per_cpu(cpu_state, cpu) = CPU_ONLINE;
199
200 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
201}
202
203extern struct {
204 unsigned long sp;
205 unsigned long bss_start;
206 unsigned long bss_end;
207 void *start_kernel_fn;
208 void *cpu_init_fn;
209 void *thread_info;
210} stack_start;
211
212int __cpu_up(unsigned int cpu, struct task_struct *tsk)
213{
214 unsigned long timeout;
215
216 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
217
218 /* Fill in data in head.S for secondary cpus */
219 stack_start.sp = tsk->thread.sp;
220 stack_start.thread_info = tsk->stack;
221 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
222 stack_start.start_kernel_fn = start_secondary;
223
224 flush_icache_range((unsigned long)&stack_start,
225 (unsigned long)&stack_start + sizeof(stack_start));
226 wmb();
227
228 mp_ops->start_cpu(cpu, (unsigned long)_stext);
229
230 timeout = jiffies + HZ;
231 while (time_before(jiffies, timeout)) {
232 if (cpu_online(cpu))
233 break;
234
235 udelay(10);
236 barrier();
237 }
238
239 if (cpu_online(cpu))
240 return 0;
241
242 return -ENOENT;
243}
244
245void __init smp_cpus_done(unsigned int max_cpus)
246{
247 unsigned long bogosum = 0;
248 int cpu;
249
250 for_each_online_cpu(cpu)
251 bogosum += cpu_data[cpu].loops_per_jiffy;
252
253 printk(KERN_INFO "SMP: Total of %d processors activated "
254 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
255 bogosum / (500000/HZ),
256 (bogosum / (5000/HZ)) % 100);
257}
258
259void smp_send_reschedule(int cpu)
260{
261 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
262}
263
264void smp_send_stop(void)
265{
266 smp_call_function(stop_this_cpu, 0, 0);
267}
268
269void arch_send_call_function_ipi_mask(const struct cpumask *mask)
270{
271 int cpu;
272
273 for_each_cpu(cpu, mask)
274 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
275}
276
277void arch_send_call_function_single_ipi(int cpu)
278{
279 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
280}
281
282#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
283void tick_broadcast(const struct cpumask *mask)
284{
285 int cpu;
286
287 for_each_cpu(cpu, mask)
288 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
289}
290
291static void ipi_timer(void)
292{
293 irq_enter();
294 tick_receive_broadcast();
295 irq_exit();
296}
297#endif
298
299void smp_message_recv(unsigned int msg)
300{
301 switch (msg) {
302 case SMP_MSG_FUNCTION:
303 generic_smp_call_function_interrupt();
304 break;
305 case SMP_MSG_RESCHEDULE:
306 scheduler_ipi();
307 break;
308 case SMP_MSG_FUNCTION_SINGLE:
309 generic_smp_call_function_single_interrupt();
310 break;
311#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
312 case SMP_MSG_TIMER:
313 ipi_timer();
314 break;
315#endif
316 default:
317 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
318 smp_processor_id(), __func__, msg);
319 break;
320 }
321}
322
323/* Not really SMP stuff ... */
324int setup_profiling_timer(unsigned int multiplier)
325{
326 return 0;
327}
328
329#ifdef CONFIG_MMU
330
331static void flush_tlb_all_ipi(void *info)
332{
333 local_flush_tlb_all();
334}
335
336void flush_tlb_all(void)
337{
338 on_each_cpu(flush_tlb_all_ipi, 0, 1);
339}
340
341static void flush_tlb_mm_ipi(void *mm)
342{
343 local_flush_tlb_mm((struct mm_struct *)mm);
344}
345
346/*
347 * The following tlb flush calls are invoked when old translations are
348 * being torn down, or pte attributes are changing. For single threaded
349 * address spaces, a new context is obtained on the current cpu, and tlb
350 * context on other cpus are invalidated to force a new context allocation
351 * at switch_mm time, should the mm ever be used on other cpus. For
352 * multithreaded address spaces, intercpu interrupts have to be sent.
353 * Another case where intercpu interrupts are required is when the target
354 * mm might be active on another cpu (eg debuggers doing the flushes on
355 * behalf of debugees, kswapd stealing pages from another process etc).
356 * Kanoj 07/00.
357 */
358void flush_tlb_mm(struct mm_struct *mm)
359{
360 preempt_disable();
361
362 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
364 } else {
365 int i;
366 for_each_online_cpu(i)
367 if (smp_processor_id() != i)
368 cpu_context(i, mm) = 0;
369 }
370 local_flush_tlb_mm(mm);
371
372 preempt_enable();
373}
374
375struct flush_tlb_data {
376 struct vm_area_struct *vma;
377 unsigned long addr1;
378 unsigned long addr2;
379};
380
381static void flush_tlb_range_ipi(void *info)
382{
383 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
384
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
386}
387
388void flush_tlb_range(struct vm_area_struct *vma,
389 unsigned long start, unsigned long end)
390{
391 struct mm_struct *mm = vma->vm_mm;
392
393 preempt_disable();
394 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
395 struct flush_tlb_data fd;
396
397 fd.vma = vma;
398 fd.addr1 = start;
399 fd.addr2 = end;
400 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
401 } else {
402 int i;
403 for_each_online_cpu(i)
404 if (smp_processor_id() != i)
405 cpu_context(i, mm) = 0;
406 }
407 local_flush_tlb_range(vma, start, end);
408 preempt_enable();
409}
410
411static void flush_tlb_kernel_range_ipi(void *info)
412{
413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
414
415 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
416}
417
418void flush_tlb_kernel_range(unsigned long start, unsigned long end)
419{
420 struct flush_tlb_data fd;
421
422 fd.addr1 = start;
423 fd.addr2 = end;
424 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
425}
426
427static void flush_tlb_page_ipi(void *info)
428{
429 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430
431 local_flush_tlb_page(fd->vma, fd->addr1);
432}
433
434void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
435{
436 preempt_disable();
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
438 (current->mm != vma->vm_mm)) {
439 struct flush_tlb_data fd;
440
441 fd.vma = vma;
442 fd.addr1 = page;
443 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
444 } else {
445 int i;
446 for_each_online_cpu(i)
447 if (smp_processor_id() != i)
448 cpu_context(i, vma->vm_mm) = 0;
449 }
450 local_flush_tlb_page(vma, page);
451 preempt_enable();
452}
453
454static void flush_tlb_one_ipi(void *info)
455{
456 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
457 local_flush_tlb_one(fd->addr1, fd->addr2);
458}
459
460void flush_tlb_one(unsigned long asid, unsigned long vaddr)
461{
462 struct flush_tlb_data fd;
463
464 fd.addr1 = asid;
465 fd.addr2 = vaddr;
466
467 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
468 local_flush_tlb_one(asid, vaddr);
469}
470
471#endif
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <linux/atomic.h>
25#include <asm/processor.h>
26#include <asm/system.h>
27#include <asm/mmu_context.h>
28#include <asm/smp.h>
29#include <asm/cacheflush.h>
30#include <asm/sections.h>
31
32int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
33int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
34
35struct plat_smp_ops *mp_ops = NULL;
36
37/* State of each CPU */
38DEFINE_PER_CPU(int, cpu_state) = { 0 };
39
40void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
41{
42 if (mp_ops)
43 printk(KERN_WARNING "Overriding previously set SMP ops\n");
44
45 mp_ops = ops;
46}
47
48static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
49{
50 struct sh_cpuinfo *c = cpu_data + cpu;
51
52 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53
54 c->loops_per_jiffy = loops_per_jiffy;
55}
56
57void __init smp_prepare_cpus(unsigned int max_cpus)
58{
59 unsigned int cpu = smp_processor_id();
60
61 init_new_context(current, &init_mm);
62 current_thread_info()->cpu = cpu;
63 mp_ops->prepare_cpus(max_cpus);
64
65#ifndef CONFIG_HOTPLUG_CPU
66 init_cpu_present(&cpu_possible_map);
67#endif
68}
69
70void __init smp_prepare_boot_cpu(void)
71{
72 unsigned int cpu = smp_processor_id();
73
74 __cpu_number_map[0] = cpu;
75 __cpu_logical_map[0] = cpu;
76
77 set_cpu_online(cpu, true);
78 set_cpu_possible(cpu, true);
79
80 per_cpu(cpu_state, cpu) = CPU_ONLINE;
81}
82
83#ifdef CONFIG_HOTPLUG_CPU
84void native_cpu_die(unsigned int cpu)
85{
86 unsigned int i;
87
88 for (i = 0; i < 10; i++) {
89 smp_rmb();
90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
91 if (system_state == SYSTEM_RUNNING)
92 pr_info("CPU %u is now offline\n", cpu);
93
94 return;
95 }
96
97 msleep(100);
98 }
99
100 pr_err("CPU %u didn't die...\n", cpu);
101}
102
103int native_cpu_disable(unsigned int cpu)
104{
105 return cpu == 0 ? -EPERM : 0;
106}
107
108void play_dead_common(void)
109{
110 idle_task_exit();
111 irq_ctx_exit(raw_smp_processor_id());
112 mb();
113
114 __get_cpu_var(cpu_state) = CPU_DEAD;
115 local_irq_disable();
116}
117
118void native_play_dead(void)
119{
120 play_dead_common();
121}
122
123int __cpu_disable(void)
124{
125 unsigned int cpu = smp_processor_id();
126 struct task_struct *p;
127 int ret;
128
129 ret = mp_ops->cpu_disable(cpu);
130 if (ret)
131 return ret;
132
133 /*
134 * Take this CPU offline. Once we clear this, we can't return,
135 * and we must not schedule until we're ready to give up the cpu.
136 */
137 set_cpu_online(cpu, false);
138
139 /*
140 * OK - migrate IRQs away from this CPU
141 */
142 migrate_irqs();
143
144 /*
145 * Stop the local timer for this CPU.
146 */
147 local_timer_stop(cpu);
148
149 /*
150 * Flush user cache and TLB mappings, and then remove this CPU
151 * from the vm mask set of all processes.
152 */
153 flush_cache_all();
154 local_flush_tlb_all();
155
156 read_lock(&tasklist_lock);
157 for_each_process(p)
158 if (p->mm)
159 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
160 read_unlock(&tasklist_lock);
161
162 return 0;
163}
164#else /* ... !CONFIG_HOTPLUG_CPU */
165int native_cpu_disable(unsigned int cpu)
166{
167 return -ENOSYS;
168}
169
170void native_cpu_die(unsigned int cpu)
171{
172 /* We said "no" in __cpu_disable */
173 BUG();
174}
175
176void native_play_dead(void)
177{
178 BUG();
179}
180#endif
181
182asmlinkage void __cpuinit start_secondary(void)
183{
184 unsigned int cpu = smp_processor_id();
185 struct mm_struct *mm = &init_mm;
186
187 enable_mmu();
188 atomic_inc(&mm->mm_count);
189 atomic_inc(&mm->mm_users);
190 current->active_mm = mm;
191 enter_lazy_tlb(mm, current);
192 local_flush_tlb_all();
193
194 per_cpu_trap_init();
195
196 preempt_disable();
197
198 notify_cpu_starting(cpu);
199
200 local_irq_enable();
201
202 /* Enable local timers */
203 local_timer_setup(cpu);
204 calibrate_delay();
205
206 smp_store_cpu_info(cpu);
207
208 set_cpu_online(cpu, true);
209 per_cpu(cpu_state, cpu) = CPU_ONLINE;
210
211 cpu_idle();
212}
213
214extern struct {
215 unsigned long sp;
216 unsigned long bss_start;
217 unsigned long bss_end;
218 void *start_kernel_fn;
219 void *cpu_init_fn;
220 void *thread_info;
221} stack_start;
222
223int __cpuinit __cpu_up(unsigned int cpu)
224{
225 struct task_struct *tsk;
226 unsigned long timeout;
227
228 tsk = cpu_data[cpu].idle;
229 if (!tsk) {
230 tsk = fork_idle(cpu);
231 if (IS_ERR(tsk)) {
232 pr_err("Failed forking idle task for cpu %d\n", cpu);
233 return PTR_ERR(tsk);
234 }
235
236 cpu_data[cpu].idle = tsk;
237 }
238
239 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
240
241 /* Fill in data in head.S for secondary cpus */
242 stack_start.sp = tsk->thread.sp;
243 stack_start.thread_info = tsk->stack;
244 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
245 stack_start.start_kernel_fn = start_secondary;
246
247 flush_icache_range((unsigned long)&stack_start,
248 (unsigned long)&stack_start + sizeof(stack_start));
249 wmb();
250
251 mp_ops->start_cpu(cpu, (unsigned long)_stext);
252
253 timeout = jiffies + HZ;
254 while (time_before(jiffies, timeout)) {
255 if (cpu_online(cpu))
256 break;
257
258 udelay(10);
259 barrier();
260 }
261
262 if (cpu_online(cpu))
263 return 0;
264
265 return -ENOENT;
266}
267
268void __init smp_cpus_done(unsigned int max_cpus)
269{
270 unsigned long bogosum = 0;
271 int cpu;
272
273 for_each_online_cpu(cpu)
274 bogosum += cpu_data[cpu].loops_per_jiffy;
275
276 printk(KERN_INFO "SMP: Total of %d processors activated "
277 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
278 bogosum / (500000/HZ),
279 (bogosum / (5000/HZ)) % 100);
280}
281
282void smp_send_reschedule(int cpu)
283{
284 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
285}
286
287void smp_send_stop(void)
288{
289 smp_call_function(stop_this_cpu, 0, 0);
290}
291
292void arch_send_call_function_ipi_mask(const struct cpumask *mask)
293{
294 int cpu;
295
296 for_each_cpu(cpu, mask)
297 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
298}
299
300void arch_send_call_function_single_ipi(int cpu)
301{
302 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
303}
304
305void smp_timer_broadcast(const struct cpumask *mask)
306{
307 int cpu;
308
309 for_each_cpu(cpu, mask)
310 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
311}
312
313static void ipi_timer(void)
314{
315 irq_enter();
316 local_timer_interrupt();
317 irq_exit();
318}
319
320void smp_message_recv(unsigned int msg)
321{
322 switch (msg) {
323 case SMP_MSG_FUNCTION:
324 generic_smp_call_function_interrupt();
325 break;
326 case SMP_MSG_RESCHEDULE:
327 scheduler_ipi();
328 break;
329 case SMP_MSG_FUNCTION_SINGLE:
330 generic_smp_call_function_single_interrupt();
331 break;
332 case SMP_MSG_TIMER:
333 ipi_timer();
334 break;
335 default:
336 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
337 smp_processor_id(), __func__, msg);
338 break;
339 }
340}
341
342/* Not really SMP stuff ... */
343int setup_profiling_timer(unsigned int multiplier)
344{
345 return 0;
346}
347
348static void flush_tlb_all_ipi(void *info)
349{
350 local_flush_tlb_all();
351}
352
353void flush_tlb_all(void)
354{
355 on_each_cpu(flush_tlb_all_ipi, 0, 1);
356}
357
358static void flush_tlb_mm_ipi(void *mm)
359{
360 local_flush_tlb_mm((struct mm_struct *)mm);
361}
362
363/*
364 * The following tlb flush calls are invoked when old translations are
365 * being torn down, or pte attributes are changing. For single threaded
366 * address spaces, a new context is obtained on the current cpu, and tlb
367 * context on other cpus are invalidated to force a new context allocation
368 * at switch_mm time, should the mm ever be used on other cpus. For
369 * multithreaded address spaces, intercpu interrupts have to be sent.
370 * Another case where intercpu interrupts are required is when the target
371 * mm might be active on another cpu (eg debuggers doing the flushes on
372 * behalf of debugees, kswapd stealing pages from another process etc).
373 * Kanoj 07/00.
374 */
375void flush_tlb_mm(struct mm_struct *mm)
376{
377 preempt_disable();
378
379 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
380 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
381 } else {
382 int i;
383 for (i = 0; i < num_online_cpus(); i++)
384 if (smp_processor_id() != i)
385 cpu_context(i, mm) = 0;
386 }
387 local_flush_tlb_mm(mm);
388
389 preempt_enable();
390}
391
392struct flush_tlb_data {
393 struct vm_area_struct *vma;
394 unsigned long addr1;
395 unsigned long addr2;
396};
397
398static void flush_tlb_range_ipi(void *info)
399{
400 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
401
402 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
403}
404
405void flush_tlb_range(struct vm_area_struct *vma,
406 unsigned long start, unsigned long end)
407{
408 struct mm_struct *mm = vma->vm_mm;
409
410 preempt_disable();
411 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
412 struct flush_tlb_data fd;
413
414 fd.vma = vma;
415 fd.addr1 = start;
416 fd.addr2 = end;
417 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
418 } else {
419 int i;
420 for (i = 0; i < num_online_cpus(); i++)
421 if (smp_processor_id() != i)
422 cpu_context(i, mm) = 0;
423 }
424 local_flush_tlb_range(vma, start, end);
425 preempt_enable();
426}
427
428static void flush_tlb_kernel_range_ipi(void *info)
429{
430 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
431
432 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
433}
434
435void flush_tlb_kernel_range(unsigned long start, unsigned long end)
436{
437 struct flush_tlb_data fd;
438
439 fd.addr1 = start;
440 fd.addr2 = end;
441 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
442}
443
444static void flush_tlb_page_ipi(void *info)
445{
446 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
447
448 local_flush_tlb_page(fd->vma, fd->addr1);
449}
450
451void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
452{
453 preempt_disable();
454 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
455 (current->mm != vma->vm_mm)) {
456 struct flush_tlb_data fd;
457
458 fd.vma = vma;
459 fd.addr1 = page;
460 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
461 } else {
462 int i;
463 for (i = 0; i < num_online_cpus(); i++)
464 if (smp_processor_id() != i)
465 cpu_context(i, vma->vm_mm) = 0;
466 }
467 local_flush_tlb_page(vma, page);
468 preempt_enable();
469}
470
471static void flush_tlb_one_ipi(void *info)
472{
473 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
474 local_flush_tlb_one(fd->addr1, fd->addr2);
475}
476
477void flush_tlb_one(unsigned long asid, unsigned long vaddr)
478{
479 struct flush_tlb_data fd;
480
481 fd.addr1 = asid;
482 fd.addr2 = vaddr;
483
484 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
485 local_flush_tlb_one(asid, vaddr);
486}