Loading...
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <linux/sched.h>
24#include <linux/atomic.h>
25#include <linux/clockchips.h>
26#include <asm/processor.h>
27#include <asm/mmu_context.h>
28#include <asm/smp.h>
29#include <asm/cacheflush.h>
30#include <asm/sections.h>
31#include <asm/setup.h>
32
33int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
34int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
35
36struct plat_smp_ops *mp_ops = NULL;
37
38/* State of each CPU */
39DEFINE_PER_CPU(int, cpu_state) = { 0 };
40
41void register_smp_ops(struct plat_smp_ops *ops)
42{
43 if (mp_ops)
44 printk(KERN_WARNING "Overriding previously set SMP ops\n");
45
46 mp_ops = ops;
47}
48
49static inline void smp_store_cpu_info(unsigned int cpu)
50{
51 struct sh_cpuinfo *c = cpu_data + cpu;
52
53 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
54
55 c->loops_per_jiffy = loops_per_jiffy;
56}
57
58void __init smp_prepare_cpus(unsigned int max_cpus)
59{
60 unsigned int cpu = smp_processor_id();
61
62 init_new_context(current, &init_mm);
63 current_thread_info()->cpu = cpu;
64 mp_ops->prepare_cpus(max_cpus);
65
66#ifndef CONFIG_HOTPLUG_CPU
67 init_cpu_present(cpu_possible_mask);
68#endif
69}
70
71void __init smp_prepare_boot_cpu(void)
72{
73 unsigned int cpu = smp_processor_id();
74
75 __cpu_number_map[0] = cpu;
76 __cpu_logical_map[0] = cpu;
77
78 set_cpu_online(cpu, true);
79 set_cpu_possible(cpu, true);
80
81 per_cpu(cpu_state, cpu) = CPU_ONLINE;
82}
83
84#ifdef CONFIG_HOTPLUG_CPU
85void native_cpu_die(unsigned int cpu)
86{
87 unsigned int i;
88
89 for (i = 0; i < 10; i++) {
90 smp_rmb();
91 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
92 if (system_state == SYSTEM_RUNNING)
93 pr_info("CPU %u is now offline\n", cpu);
94
95 return;
96 }
97
98 msleep(100);
99 }
100
101 pr_err("CPU %u didn't die...\n", cpu);
102}
103
104int native_cpu_disable(unsigned int cpu)
105{
106 return cpu == 0 ? -EPERM : 0;
107}
108
109void play_dead_common(void)
110{
111 idle_task_exit();
112 irq_ctx_exit(raw_smp_processor_id());
113 mb();
114
115 __this_cpu_write(cpu_state, CPU_DEAD);
116 local_irq_disable();
117}
118
119void native_play_dead(void)
120{
121 play_dead_common();
122}
123
124int __cpu_disable(void)
125{
126 unsigned int cpu = smp_processor_id();
127 int ret;
128
129 ret = mp_ops->cpu_disable(cpu);
130 if (ret)
131 return ret;
132
133 /*
134 * Take this CPU offline. Once we clear this, we can't return,
135 * and we must not schedule until we're ready to give up the cpu.
136 */
137 set_cpu_online(cpu, false);
138
139 /*
140 * OK - migrate IRQs away from this CPU
141 */
142 migrate_irqs();
143
144 /*
145 * Flush user cache and TLB mappings, and then remove this CPU
146 * from the vm mask set of all processes.
147 */
148 flush_cache_all();
149#ifdef CONFIG_MMU
150 local_flush_tlb_all();
151#endif
152
153 clear_tasks_mm_cpumask(cpu);
154
155 return 0;
156}
157#else /* ... !CONFIG_HOTPLUG_CPU */
158int native_cpu_disable(unsigned int cpu)
159{
160 return -ENOSYS;
161}
162
163void native_cpu_die(unsigned int cpu)
164{
165 /* We said "no" in __cpu_disable */
166 BUG();
167}
168
169void native_play_dead(void)
170{
171 BUG();
172}
173#endif
174
175asmlinkage void start_secondary(void)
176{
177 unsigned int cpu = smp_processor_id();
178 struct mm_struct *mm = &init_mm;
179
180 enable_mmu();
181 atomic_inc(&mm->mm_count);
182 atomic_inc(&mm->mm_users);
183 current->active_mm = mm;
184#ifdef CONFIG_MMU
185 enter_lazy_tlb(mm, current);
186 local_flush_tlb_all();
187#endif
188
189 per_cpu_trap_init();
190
191 preempt_disable();
192
193 notify_cpu_starting(cpu);
194
195 local_irq_enable();
196
197 calibrate_delay();
198
199 smp_store_cpu_info(cpu);
200
201 set_cpu_online(cpu, true);
202 per_cpu(cpu_state, cpu) = CPU_ONLINE;
203
204 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
205}
206
207extern struct {
208 unsigned long sp;
209 unsigned long bss_start;
210 unsigned long bss_end;
211 void *start_kernel_fn;
212 void *cpu_init_fn;
213 void *thread_info;
214} stack_start;
215
216int __cpu_up(unsigned int cpu, struct task_struct *tsk)
217{
218 unsigned long timeout;
219
220 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
221
222 /* Fill in data in head.S for secondary cpus */
223 stack_start.sp = tsk->thread.sp;
224 stack_start.thread_info = tsk->stack;
225 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
226 stack_start.start_kernel_fn = start_secondary;
227
228 flush_icache_range((unsigned long)&stack_start,
229 (unsigned long)&stack_start + sizeof(stack_start));
230 wmb();
231
232 mp_ops->start_cpu(cpu, (unsigned long)_stext);
233
234 timeout = jiffies + HZ;
235 while (time_before(jiffies, timeout)) {
236 if (cpu_online(cpu))
237 break;
238
239 udelay(10);
240 barrier();
241 }
242
243 if (cpu_online(cpu))
244 return 0;
245
246 return -ENOENT;
247}
248
249void __init smp_cpus_done(unsigned int max_cpus)
250{
251 unsigned long bogosum = 0;
252 int cpu;
253
254 for_each_online_cpu(cpu)
255 bogosum += cpu_data[cpu].loops_per_jiffy;
256
257 printk(KERN_INFO "SMP: Total of %d processors activated "
258 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
259 bogosum / (500000/HZ),
260 (bogosum / (5000/HZ)) % 100);
261}
262
263void smp_send_reschedule(int cpu)
264{
265 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
266}
267
268void smp_send_stop(void)
269{
270 smp_call_function(stop_this_cpu, 0, 0);
271}
272
273void arch_send_call_function_ipi_mask(const struct cpumask *mask)
274{
275 int cpu;
276
277 for_each_cpu(cpu, mask)
278 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
279}
280
281void arch_send_call_function_single_ipi(int cpu)
282{
283 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
284}
285
286#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
287void tick_broadcast(const struct cpumask *mask)
288{
289 int cpu;
290
291 for_each_cpu(cpu, mask)
292 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
293}
294
295static void ipi_timer(void)
296{
297 irq_enter();
298 tick_receive_broadcast();
299 irq_exit();
300}
301#endif
302
303void smp_message_recv(unsigned int msg)
304{
305 switch (msg) {
306 case SMP_MSG_FUNCTION:
307 generic_smp_call_function_interrupt();
308 break;
309 case SMP_MSG_RESCHEDULE:
310 scheduler_ipi();
311 break;
312 case SMP_MSG_FUNCTION_SINGLE:
313 generic_smp_call_function_single_interrupt();
314 break;
315#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
316 case SMP_MSG_TIMER:
317 ipi_timer();
318 break;
319#endif
320 default:
321 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
322 smp_processor_id(), __func__, msg);
323 break;
324 }
325}
326
327/* Not really SMP stuff ... */
328int setup_profiling_timer(unsigned int multiplier)
329{
330 return 0;
331}
332
333#ifdef CONFIG_MMU
334
335static void flush_tlb_all_ipi(void *info)
336{
337 local_flush_tlb_all();
338}
339
340void flush_tlb_all(void)
341{
342 on_each_cpu(flush_tlb_all_ipi, 0, 1);
343}
344
345static void flush_tlb_mm_ipi(void *mm)
346{
347 local_flush_tlb_mm((struct mm_struct *)mm);
348}
349
350/*
351 * The following tlb flush calls are invoked when old translations are
352 * being torn down, or pte attributes are changing. For single threaded
353 * address spaces, a new context is obtained on the current cpu, and tlb
354 * context on other cpus are invalidated to force a new context allocation
355 * at switch_mm time, should the mm ever be used on other cpus. For
356 * multithreaded address spaces, intercpu interrupts have to be sent.
357 * Another case where intercpu interrupts are required is when the target
358 * mm might be active on another cpu (eg debuggers doing the flushes on
359 * behalf of debugees, kswapd stealing pages from another process etc).
360 * Kanoj 07/00.
361 */
362void flush_tlb_mm(struct mm_struct *mm)
363{
364 preempt_disable();
365
366 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
367 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
368 } else {
369 int i;
370 for_each_online_cpu(i)
371 if (smp_processor_id() != i)
372 cpu_context(i, mm) = 0;
373 }
374 local_flush_tlb_mm(mm);
375
376 preempt_enable();
377}
378
379struct flush_tlb_data {
380 struct vm_area_struct *vma;
381 unsigned long addr1;
382 unsigned long addr2;
383};
384
385static void flush_tlb_range_ipi(void *info)
386{
387 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
388
389 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
390}
391
392void flush_tlb_range(struct vm_area_struct *vma,
393 unsigned long start, unsigned long end)
394{
395 struct mm_struct *mm = vma->vm_mm;
396
397 preempt_disable();
398 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
399 struct flush_tlb_data fd;
400
401 fd.vma = vma;
402 fd.addr1 = start;
403 fd.addr2 = end;
404 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
405 } else {
406 int i;
407 for_each_online_cpu(i)
408 if (smp_processor_id() != i)
409 cpu_context(i, mm) = 0;
410 }
411 local_flush_tlb_range(vma, start, end);
412 preempt_enable();
413}
414
415static void flush_tlb_kernel_range_ipi(void *info)
416{
417 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
418
419 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
420}
421
422void flush_tlb_kernel_range(unsigned long start, unsigned long end)
423{
424 struct flush_tlb_data fd;
425
426 fd.addr1 = start;
427 fd.addr2 = end;
428 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
429}
430
431static void flush_tlb_page_ipi(void *info)
432{
433 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
434
435 local_flush_tlb_page(fd->vma, fd->addr1);
436}
437
438void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
439{
440 preempt_disable();
441 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
442 (current->mm != vma->vm_mm)) {
443 struct flush_tlb_data fd;
444
445 fd.vma = vma;
446 fd.addr1 = page;
447 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
448 } else {
449 int i;
450 for_each_online_cpu(i)
451 if (smp_processor_id() != i)
452 cpu_context(i, vma->vm_mm) = 0;
453 }
454 local_flush_tlb_page(vma, page);
455 preempt_enable();
456}
457
458static void flush_tlb_one_ipi(void *info)
459{
460 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
461 local_flush_tlb_one(fd->addr1, fd->addr2);
462}
463
464void flush_tlb_one(unsigned long asid, unsigned long vaddr)
465{
466 struct flush_tlb_data fd;
467
468 fd.addr1 = asid;
469 fd.addr2 = vaddr;
470
471 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
472 local_flush_tlb_one(asid, vaddr);
473}
474
475#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sh/kernel/smp.c
4 *
5 * SMP support for the SuperH processors.
6 *
7 * Copyright (C) 2002 - 2010 Paul Mundt
8 * Copyright (C) 2006 - 2007 Akio Idehara
9 */
10#include <linux/err.h>
11#include <linux/cache.h>
12#include <linux/cpumask.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/cpu.h>
19#include <linux/interrupt.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/hotplug.h>
22#include <linux/atomic.h>
23#include <linux/clockchips.h>
24#include <asm/processor.h>
25#include <asm/mmu_context.h>
26#include <asm/smp.h>
27#include <asm/cacheflush.h>
28#include <asm/sections.h>
29#include <asm/setup.h>
30
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33
34struct plat_smp_ops *mp_ops = NULL;
35
36/* State of each CPU */
37DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39void register_smp_ops(struct plat_smp_ops *ops)
40{
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44 mp_ops = ops;
45}
46
47static inline void smp_store_cpu_info(unsigned int cpu)
48{
49 struct sh_cpuinfo *c = cpu_data + cpu;
50
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53 c->loops_per_jiffy = loops_per_jiffy;
54}
55
56void __init smp_prepare_cpus(unsigned int max_cpus)
57{
58 unsigned int cpu = smp_processor_id();
59
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
63
64#ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(cpu_possible_mask);
66#endif
67}
68
69void __init smp_prepare_boot_cpu(void)
70{
71 unsigned int cpu = smp_processor_id();
72
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
75
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
78
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
80}
81
82#ifdef CONFIG_HOTPLUG_CPU
83void native_cpu_die(unsigned int cpu)
84{
85 unsigned int i;
86
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
92
93 return;
94 }
95
96 msleep(100);
97 }
98
99 pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104 return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
112
113 __this_cpu_write(cpu_state, CPU_DEAD);
114 local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119 play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124 unsigned int cpu = smp_processor_id();
125 int ret;
126
127 ret = mp_ops->cpu_disable(cpu);
128 if (ret)
129 return ret;
130
131 /*
132 * Take this CPU offline. Once we clear this, we can't return,
133 * and we must not schedule until we're ready to give up the cpu.
134 */
135 set_cpu_online(cpu, false);
136
137 /*
138 * OK - migrate IRQs away from this CPU
139 */
140 migrate_irqs();
141
142 /*
143 * Flush user cache and TLB mappings, and then remove this CPU
144 * from the vm mask set of all processes.
145 */
146 flush_cache_all();
147#ifdef CONFIG_MMU
148 local_flush_tlb_all();
149#endif
150
151 clear_tasks_mm_cpumask(cpu);
152
153 return 0;
154}
155#else /* ... !CONFIG_HOTPLUG_CPU */
156int native_cpu_disable(unsigned int cpu)
157{
158 return -ENOSYS;
159}
160
161void native_cpu_die(unsigned int cpu)
162{
163 /* We said "no" in __cpu_disable */
164 BUG();
165}
166
167void native_play_dead(void)
168{
169 BUG();
170}
171#endif
172
173asmlinkage void start_secondary(void)
174{
175 unsigned int cpu = smp_processor_id();
176 struct mm_struct *mm = &init_mm;
177
178 enable_mmu();
179 mmgrab(mm);
180 mmget(mm);
181 current->active_mm = mm;
182#ifdef CONFIG_MMU
183 enter_lazy_tlb(mm, current);
184 local_flush_tlb_all();
185#endif
186
187 per_cpu_trap_init();
188
189 notify_cpu_starting(cpu);
190
191 local_irq_enable();
192
193 calibrate_delay();
194
195 smp_store_cpu_info(cpu);
196
197 set_cpu_online(cpu, true);
198 per_cpu(cpu_state, cpu) = CPU_ONLINE;
199
200 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
201}
202
203extern struct {
204 unsigned long sp;
205 unsigned long bss_start;
206 unsigned long bss_end;
207 void *start_kernel_fn;
208 void *cpu_init_fn;
209 void *thread_info;
210} stack_start;
211
212int __cpu_up(unsigned int cpu, struct task_struct *tsk)
213{
214 unsigned long timeout;
215
216 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
217
218 /* Fill in data in head.S for secondary cpus */
219 stack_start.sp = tsk->thread.sp;
220 stack_start.thread_info = tsk->stack;
221 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
222 stack_start.start_kernel_fn = start_secondary;
223
224 flush_icache_range((unsigned long)&stack_start,
225 (unsigned long)&stack_start + sizeof(stack_start));
226 wmb();
227
228 mp_ops->start_cpu(cpu, (unsigned long)_stext);
229
230 timeout = jiffies + HZ;
231 while (time_before(jiffies, timeout)) {
232 if (cpu_online(cpu))
233 break;
234
235 udelay(10);
236 barrier();
237 }
238
239 if (cpu_online(cpu))
240 return 0;
241
242 return -ENOENT;
243}
244
245void __init smp_cpus_done(unsigned int max_cpus)
246{
247 unsigned long bogosum = 0;
248 int cpu;
249
250 for_each_online_cpu(cpu)
251 bogosum += cpu_data[cpu].loops_per_jiffy;
252
253 printk(KERN_INFO "SMP: Total of %d processors activated "
254 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
255 bogosum / (500000/HZ),
256 (bogosum / (5000/HZ)) % 100);
257}
258
259void arch_smp_send_reschedule(int cpu)
260{
261 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
262}
263
264void smp_send_stop(void)
265{
266 smp_call_function(stop_this_cpu, 0, 0);
267}
268
269void arch_send_call_function_ipi_mask(const struct cpumask *mask)
270{
271 int cpu;
272
273 for_each_cpu(cpu, mask)
274 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
275}
276
277void arch_send_call_function_single_ipi(int cpu)
278{
279 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
280}
281
282#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
283void tick_broadcast(const struct cpumask *mask)
284{
285 int cpu;
286
287 for_each_cpu(cpu, mask)
288 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
289}
290
291static void ipi_timer(void)
292{
293 irq_enter();
294 tick_receive_broadcast();
295 irq_exit();
296}
297#endif
298
299void smp_message_recv(unsigned int msg)
300{
301 switch (msg) {
302 case SMP_MSG_FUNCTION:
303 generic_smp_call_function_interrupt();
304 break;
305 case SMP_MSG_RESCHEDULE:
306 scheduler_ipi();
307 break;
308 case SMP_MSG_FUNCTION_SINGLE:
309 generic_smp_call_function_single_interrupt();
310 break;
311#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
312 case SMP_MSG_TIMER:
313 ipi_timer();
314 break;
315#endif
316 default:
317 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
318 smp_processor_id(), __func__, msg);
319 break;
320 }
321}
322
323/* Not really SMP stuff ... */
324int setup_profiling_timer(unsigned int multiplier)
325{
326 return 0;
327}
328
329#ifdef CONFIG_MMU
330
331static void flush_tlb_all_ipi(void *info)
332{
333 local_flush_tlb_all();
334}
335
336void flush_tlb_all(void)
337{
338 on_each_cpu(flush_tlb_all_ipi, 0, 1);
339}
340
341static void flush_tlb_mm_ipi(void *mm)
342{
343 local_flush_tlb_mm((struct mm_struct *)mm);
344}
345
346/*
347 * The following tlb flush calls are invoked when old translations are
348 * being torn down, or pte attributes are changing. For single threaded
349 * address spaces, a new context is obtained on the current cpu, and tlb
350 * context on other cpus are invalidated to force a new context allocation
351 * at switch_mm time, should the mm ever be used on other cpus. For
352 * multithreaded address spaces, intercpu interrupts have to be sent.
353 * Another case where intercpu interrupts are required is when the target
354 * mm might be active on another cpu (eg debuggers doing the flushes on
355 * behalf of debugees, kswapd stealing pages from another process etc).
356 * Kanoj 07/00.
357 */
358void flush_tlb_mm(struct mm_struct *mm)
359{
360 preempt_disable();
361
362 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
364 } else {
365 int i;
366 for_each_online_cpu(i)
367 if (smp_processor_id() != i)
368 cpu_context(i, mm) = 0;
369 }
370 local_flush_tlb_mm(mm);
371
372 preempt_enable();
373}
374
375struct flush_tlb_data {
376 struct vm_area_struct *vma;
377 unsigned long addr1;
378 unsigned long addr2;
379};
380
381static void flush_tlb_range_ipi(void *info)
382{
383 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
384
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
386}
387
388void flush_tlb_range(struct vm_area_struct *vma,
389 unsigned long start, unsigned long end)
390{
391 struct mm_struct *mm = vma->vm_mm;
392
393 preempt_disable();
394 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
395 struct flush_tlb_data fd;
396
397 fd.vma = vma;
398 fd.addr1 = start;
399 fd.addr2 = end;
400 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
401 } else {
402 int i;
403 for_each_online_cpu(i)
404 if (smp_processor_id() != i)
405 cpu_context(i, mm) = 0;
406 }
407 local_flush_tlb_range(vma, start, end);
408 preempt_enable();
409}
410
411static void flush_tlb_kernel_range_ipi(void *info)
412{
413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
414
415 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
416}
417
418void flush_tlb_kernel_range(unsigned long start, unsigned long end)
419{
420 struct flush_tlb_data fd;
421
422 fd.addr1 = start;
423 fd.addr2 = end;
424 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
425}
426
427static void flush_tlb_page_ipi(void *info)
428{
429 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430
431 local_flush_tlb_page(fd->vma, fd->addr1);
432}
433
434void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
435{
436 preempt_disable();
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
438 (current->mm != vma->vm_mm)) {
439 struct flush_tlb_data fd;
440
441 fd.vma = vma;
442 fd.addr1 = page;
443 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
444 } else {
445 int i;
446 for_each_online_cpu(i)
447 if (smp_processor_id() != i)
448 cpu_context(i, vma->vm_mm) = 0;
449 }
450 local_flush_tlb_page(vma, page);
451 preempt_enable();
452}
453
454static void flush_tlb_one_ipi(void *info)
455{
456 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
457 local_flush_tlb_one(fd->addr1, fd->addr2);
458}
459
460void flush_tlb_one(unsigned long asid, unsigned long vaddr)
461{
462 struct flush_tlb_data fd;
463
464 fd.addr1 = asid;
465 fd.addr2 = vaddr;
466
467 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
468 local_flush_tlb_one(asid, vaddr);
469}
470
471#endif