Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/sh/kernel/smp.c
  4 *
  5 * SMP support for the SuperH processors.
  6 *
  7 * Copyright (C) 2002 - 2010 Paul Mundt
  8 * Copyright (C) 2006 - 2007 Akio Idehara
 
 
 
 
  9 */
 10#include <linux/err.h>
 11#include <linux/cache.h>
 12#include <linux/cpumask.h>
 13#include <linux/delay.h>
 14#include <linux/init.h>
 15#include <linux/spinlock.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/cpu.h>
 19#include <linux/interrupt.h>
 20#include <linux/sched/mm.h>
 21#include <linux/sched/hotplug.h>
 22#include <linux/atomic.h>
 23#include <linux/clockchips.h>
 24#include <asm/processor.h>
 25#include <asm/mmu_context.h>
 26#include <asm/smp.h>
 27#include <asm/cacheflush.h>
 28#include <asm/sections.h>
 29#include <asm/setup.h>
 30
 31int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 32int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 33
 34struct plat_smp_ops *mp_ops = NULL;
 35
 36/* State of each CPU */
 37DEFINE_PER_CPU(int, cpu_state) = { 0 };
 38
 39void register_smp_ops(struct plat_smp_ops *ops)
 40{
 41	if (mp_ops)
 42		printk(KERN_WARNING "Overriding previously set SMP ops\n");
 43
 44	mp_ops = ops;
 45}
 46
 47static inline void smp_store_cpu_info(unsigned int cpu)
 48{
 49	struct sh_cpuinfo *c = cpu_data + cpu;
 50
 51	memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
 52
 53	c->loops_per_jiffy = loops_per_jiffy;
 54}
 55
 56void __init smp_prepare_cpus(unsigned int max_cpus)
 57{
 58	unsigned int cpu = smp_processor_id();
 59
 60	init_new_context(current, &init_mm);
 61	current_thread_info()->cpu = cpu;
 62	mp_ops->prepare_cpus(max_cpus);
 63
 64#ifndef CONFIG_HOTPLUG_CPU
 65	init_cpu_present(cpu_possible_mask);
 66#endif
 67}
 68
 69void __init smp_prepare_boot_cpu(void)
 70{
 71	unsigned int cpu = smp_processor_id();
 72
 73	__cpu_number_map[0] = cpu;
 74	__cpu_logical_map[0] = cpu;
 75
 76	set_cpu_online(cpu, true);
 77	set_cpu_possible(cpu, true);
 78
 79	per_cpu(cpu_state, cpu) = CPU_ONLINE;
 80}
 81
 82#ifdef CONFIG_HOTPLUG_CPU
 83void native_cpu_die(unsigned int cpu)
 84{
 85	unsigned int i;
 86
 87	for (i = 0; i < 10; i++) {
 88		smp_rmb();
 89		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
 90			if (system_state == SYSTEM_RUNNING)
 91				pr_info("CPU %u is now offline\n", cpu);
 92
 93			return;
 94		}
 95
 96		msleep(100);
 97	}
 98
 99	pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104	return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109	idle_task_exit();
110	irq_ctx_exit(raw_smp_processor_id());
111	mb();
112
113	__this_cpu_write(cpu_state, CPU_DEAD);
114	local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119	play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124	unsigned int cpu = smp_processor_id();
125	int ret;
126
127	ret = mp_ops->cpu_disable(cpu);
128	if (ret)
129		return ret;
130
131	/*
132	 * Take this CPU offline.  Once we clear this, we can't return,
133	 * and we must not schedule until we're ready to give up the cpu.
134	 */
135	set_cpu_online(cpu, false);
136
137	/*
138	 * OK - migrate IRQs away from this CPU
139	 */
140	migrate_irqs();
141
142	/*
143	 * Flush user cache and TLB mappings, and then remove this CPU
144	 * from the vm mask set of all processes.
145	 */
146	flush_cache_all();
147#ifdef CONFIG_MMU
148	local_flush_tlb_all();
149#endif
150
151	clear_tasks_mm_cpumask(cpu);
152
153	return 0;
154}
155#else /* ... !CONFIG_HOTPLUG_CPU */
156int native_cpu_disable(unsigned int cpu)
157{
158	return -ENOSYS;
159}
160
161void native_cpu_die(unsigned int cpu)
162{
163	/* We said "no" in __cpu_disable */
164	BUG();
165}
166
167void native_play_dead(void)
168{
169	BUG();
170}
171#endif
172
173asmlinkage void start_secondary(void)
174{
175	unsigned int cpu = smp_processor_id();
176	struct mm_struct *mm = &init_mm;
177
178	enable_mmu();
179	mmgrab(mm);
180	mmget(mm);
181	current->active_mm = mm;
182#ifdef CONFIG_MMU
183	enter_lazy_tlb(mm, current);
184	local_flush_tlb_all();
185#endif
186
187	per_cpu_trap_init();
188
189	preempt_disable();
190
191	notify_cpu_starting(cpu);
192
193	local_irq_enable();
194
195	calibrate_delay();
196
197	smp_store_cpu_info(cpu);
198
199	set_cpu_online(cpu, true);
200	per_cpu(cpu_state, cpu) = CPU_ONLINE;
201
202	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
203}
204
205extern struct {
206	unsigned long sp;
207	unsigned long bss_start;
208	unsigned long bss_end;
209	void *start_kernel_fn;
210	void *cpu_init_fn;
211	void *thread_info;
212} stack_start;
213
214int __cpu_up(unsigned int cpu, struct task_struct *tsk)
215{
216	unsigned long timeout;
217
218	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
219
220	/* Fill in data in head.S for secondary cpus */
221	stack_start.sp = tsk->thread.sp;
222	stack_start.thread_info = tsk->stack;
223	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
224	stack_start.start_kernel_fn = start_secondary;
225
226	flush_icache_range((unsigned long)&stack_start,
227			   (unsigned long)&stack_start + sizeof(stack_start));
228	wmb();
229
230	mp_ops->start_cpu(cpu, (unsigned long)_stext);
231
232	timeout = jiffies + HZ;
233	while (time_before(jiffies, timeout)) {
234		if (cpu_online(cpu))
235			break;
236
237		udelay(10);
238		barrier();
239	}
240
241	if (cpu_online(cpu))
242		return 0;
243
244	return -ENOENT;
245}
246
247void __init smp_cpus_done(unsigned int max_cpus)
248{
249	unsigned long bogosum = 0;
250	int cpu;
251
252	for_each_online_cpu(cpu)
253		bogosum += cpu_data[cpu].loops_per_jiffy;
254
255	printk(KERN_INFO "SMP: Total of %d processors activated "
256	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
257	       bogosum / (500000/HZ),
258	       (bogosum / (5000/HZ)) % 100);
259}
260
261void smp_send_reschedule(int cpu)
262{
263	mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
264}
265
266void smp_send_stop(void)
267{
268	smp_call_function(stop_this_cpu, 0, 0);
269}
270
271void arch_send_call_function_ipi_mask(const struct cpumask *mask)
272{
273	int cpu;
274
275	for_each_cpu(cpu, mask)
276		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
277}
278
279void arch_send_call_function_single_ipi(int cpu)
280{
281	mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
282}
283
284#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
285void tick_broadcast(const struct cpumask *mask)
286{
287	int cpu;
288
289	for_each_cpu(cpu, mask)
290		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
291}
292
293static void ipi_timer(void)
294{
295	irq_enter();
296	tick_receive_broadcast();
297	irq_exit();
298}
299#endif
300
301void smp_message_recv(unsigned int msg)
302{
303	switch (msg) {
304	case SMP_MSG_FUNCTION:
305		generic_smp_call_function_interrupt();
306		break;
307	case SMP_MSG_RESCHEDULE:
308		scheduler_ipi();
309		break;
310	case SMP_MSG_FUNCTION_SINGLE:
311		generic_smp_call_function_single_interrupt();
312		break;
313#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
314	case SMP_MSG_TIMER:
315		ipi_timer();
316		break;
317#endif
318	default:
319		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
320		       smp_processor_id(), __func__, msg);
321		break;
322	}
323}
324
325/* Not really SMP stuff ... */
326int setup_profiling_timer(unsigned int multiplier)
327{
328	return 0;
329}
330
331#ifdef CONFIG_MMU
332
333static void flush_tlb_all_ipi(void *info)
334{
335	local_flush_tlb_all();
336}
337
338void flush_tlb_all(void)
339{
340	on_each_cpu(flush_tlb_all_ipi, 0, 1);
341}
342
343static void flush_tlb_mm_ipi(void *mm)
344{
345	local_flush_tlb_mm((struct mm_struct *)mm);
346}
347
348/*
349 * The following tlb flush calls are invoked when old translations are
350 * being torn down, or pte attributes are changing. For single threaded
351 * address spaces, a new context is obtained on the current cpu, and tlb
352 * context on other cpus are invalidated to force a new context allocation
353 * at switch_mm time, should the mm ever be used on other cpus. For
354 * multithreaded address spaces, intercpu interrupts have to be sent.
355 * Another case where intercpu interrupts are required is when the target
356 * mm might be active on another cpu (eg debuggers doing the flushes on
357 * behalf of debugees, kswapd stealing pages from another process etc).
358 * Kanoj 07/00.
359 */
360void flush_tlb_mm(struct mm_struct *mm)
361{
362	preempt_disable();
363
364	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
365		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
366	} else {
367		int i;
368		for_each_online_cpu(i)
369			if (smp_processor_id() != i)
370				cpu_context(i, mm) = 0;
371	}
372	local_flush_tlb_mm(mm);
373
374	preempt_enable();
375}
376
377struct flush_tlb_data {
378	struct vm_area_struct *vma;
379	unsigned long addr1;
380	unsigned long addr2;
381};
382
383static void flush_tlb_range_ipi(void *info)
384{
385	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
386
387	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
388}
389
390void flush_tlb_range(struct vm_area_struct *vma,
391		     unsigned long start, unsigned long end)
392{
393	struct mm_struct *mm = vma->vm_mm;
394
395	preempt_disable();
396	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
397		struct flush_tlb_data fd;
398
399		fd.vma = vma;
400		fd.addr1 = start;
401		fd.addr2 = end;
402		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
403	} else {
404		int i;
405		for_each_online_cpu(i)
406			if (smp_processor_id() != i)
407				cpu_context(i, mm) = 0;
408	}
409	local_flush_tlb_range(vma, start, end);
410	preempt_enable();
411}
412
413static void flush_tlb_kernel_range_ipi(void *info)
414{
415	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
416
417	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
418}
419
420void flush_tlb_kernel_range(unsigned long start, unsigned long end)
421{
422	struct flush_tlb_data fd;
423
424	fd.addr1 = start;
425	fd.addr2 = end;
426	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
427}
428
429static void flush_tlb_page_ipi(void *info)
430{
431	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
432
433	local_flush_tlb_page(fd->vma, fd->addr1);
434}
435
436void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
437{
438	preempt_disable();
439	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
440	    (current->mm != vma->vm_mm)) {
441		struct flush_tlb_data fd;
442
443		fd.vma = vma;
444		fd.addr1 = page;
445		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
446	} else {
447		int i;
448		for_each_online_cpu(i)
449			if (smp_processor_id() != i)
450				cpu_context(i, vma->vm_mm) = 0;
451	}
452	local_flush_tlb_page(vma, page);
453	preempt_enable();
454}
455
456static void flush_tlb_one_ipi(void *info)
457{
458	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
459	local_flush_tlb_one(fd->addr1, fd->addr2);
460}
461
462void flush_tlb_one(unsigned long asid, unsigned long vaddr)
463{
464	struct flush_tlb_data fd;
465
466	fd.addr1 = asid;
467	fd.addr2 = vaddr;
468
469	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
470	local_flush_tlb_one(asid, vaddr);
471}
472
473#endif
v4.17
 
  1/*
  2 * arch/sh/kernel/smp.c
  3 *
  4 * SMP support for the SuperH processors.
  5 *
  6 * Copyright (C) 2002 - 2010 Paul Mundt
  7 * Copyright (C) 2006 - 2007 Akio Idehara
  8 *
  9 * This file is subject to the terms and conditions of the GNU General Public
 10 * License.  See the file "COPYING" in the main directory of this archive
 11 * for more details.
 12 */
 13#include <linux/err.h>
 14#include <linux/cache.h>
 15#include <linux/cpumask.h>
 16#include <linux/delay.h>
 17#include <linux/init.h>
 18#include <linux/spinlock.h>
 19#include <linux/mm.h>
 20#include <linux/module.h>
 21#include <linux/cpu.h>
 22#include <linux/interrupt.h>
 23#include <linux/sched/mm.h>
 24#include <linux/sched/hotplug.h>
 25#include <linux/atomic.h>
 26#include <linux/clockchips.h>
 27#include <asm/processor.h>
 28#include <asm/mmu_context.h>
 29#include <asm/smp.h>
 30#include <asm/cacheflush.h>
 31#include <asm/sections.h>
 32#include <asm/setup.h>
 33
 34int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 35int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 36
 37struct plat_smp_ops *mp_ops = NULL;
 38
 39/* State of each CPU */
 40DEFINE_PER_CPU(int, cpu_state) = { 0 };
 41
 42void register_smp_ops(struct plat_smp_ops *ops)
 43{
 44	if (mp_ops)
 45		printk(KERN_WARNING "Overriding previously set SMP ops\n");
 46
 47	mp_ops = ops;
 48}
 49
 50static inline void smp_store_cpu_info(unsigned int cpu)
 51{
 52	struct sh_cpuinfo *c = cpu_data + cpu;
 53
 54	memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
 55
 56	c->loops_per_jiffy = loops_per_jiffy;
 57}
 58
 59void __init smp_prepare_cpus(unsigned int max_cpus)
 60{
 61	unsigned int cpu = smp_processor_id();
 62
 63	init_new_context(current, &init_mm);
 64	current_thread_info()->cpu = cpu;
 65	mp_ops->prepare_cpus(max_cpus);
 66
 67#ifndef CONFIG_HOTPLUG_CPU
 68	init_cpu_present(cpu_possible_mask);
 69#endif
 70}
 71
 72void __init smp_prepare_boot_cpu(void)
 73{
 74	unsigned int cpu = smp_processor_id();
 75
 76	__cpu_number_map[0] = cpu;
 77	__cpu_logical_map[0] = cpu;
 78
 79	set_cpu_online(cpu, true);
 80	set_cpu_possible(cpu, true);
 81
 82	per_cpu(cpu_state, cpu) = CPU_ONLINE;
 83}
 84
 85#ifdef CONFIG_HOTPLUG_CPU
 86void native_cpu_die(unsigned int cpu)
 87{
 88	unsigned int i;
 89
 90	for (i = 0; i < 10; i++) {
 91		smp_rmb();
 92		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
 93			if (system_state == SYSTEM_RUNNING)
 94				pr_info("CPU %u is now offline\n", cpu);
 95
 96			return;
 97		}
 98
 99		msleep(100);
100	}
101
102	pr_err("CPU %u didn't die...\n", cpu);
103}
104
105int native_cpu_disable(unsigned int cpu)
106{
107	return cpu == 0 ? -EPERM : 0;
108}
109
110void play_dead_common(void)
111{
112	idle_task_exit();
113	irq_ctx_exit(raw_smp_processor_id());
114	mb();
115
116	__this_cpu_write(cpu_state, CPU_DEAD);
117	local_irq_disable();
118}
119
120void native_play_dead(void)
121{
122	play_dead_common();
123}
124
125int __cpu_disable(void)
126{
127	unsigned int cpu = smp_processor_id();
128	int ret;
129
130	ret = mp_ops->cpu_disable(cpu);
131	if (ret)
132		return ret;
133
134	/*
135	 * Take this CPU offline.  Once we clear this, we can't return,
136	 * and we must not schedule until we're ready to give up the cpu.
137	 */
138	set_cpu_online(cpu, false);
139
140	/*
141	 * OK - migrate IRQs away from this CPU
142	 */
143	migrate_irqs();
144
145	/*
146	 * Flush user cache and TLB mappings, and then remove this CPU
147	 * from the vm mask set of all processes.
148	 */
149	flush_cache_all();
150#ifdef CONFIG_MMU
151	local_flush_tlb_all();
152#endif
153
154	clear_tasks_mm_cpumask(cpu);
155
156	return 0;
157}
158#else /* ... !CONFIG_HOTPLUG_CPU */
159int native_cpu_disable(unsigned int cpu)
160{
161	return -ENOSYS;
162}
163
164void native_cpu_die(unsigned int cpu)
165{
166	/* We said "no" in __cpu_disable */
167	BUG();
168}
169
170void native_play_dead(void)
171{
172	BUG();
173}
174#endif
175
176asmlinkage void start_secondary(void)
177{
178	unsigned int cpu = smp_processor_id();
179	struct mm_struct *mm = &init_mm;
180
181	enable_mmu();
182	mmgrab(mm);
183	mmget(mm);
184	current->active_mm = mm;
185#ifdef CONFIG_MMU
186	enter_lazy_tlb(mm, current);
187	local_flush_tlb_all();
188#endif
189
190	per_cpu_trap_init();
191
192	preempt_disable();
193
194	notify_cpu_starting(cpu);
195
196	local_irq_enable();
197
198	calibrate_delay();
199
200	smp_store_cpu_info(cpu);
201
202	set_cpu_online(cpu, true);
203	per_cpu(cpu_state, cpu) = CPU_ONLINE;
204
205	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
206}
207
208extern struct {
209	unsigned long sp;
210	unsigned long bss_start;
211	unsigned long bss_end;
212	void *start_kernel_fn;
213	void *cpu_init_fn;
214	void *thread_info;
215} stack_start;
216
217int __cpu_up(unsigned int cpu, struct task_struct *tsk)
218{
219	unsigned long timeout;
220
221	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
222
223	/* Fill in data in head.S for secondary cpus */
224	stack_start.sp = tsk->thread.sp;
225	stack_start.thread_info = tsk->stack;
226	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
227	stack_start.start_kernel_fn = start_secondary;
228
229	flush_icache_range((unsigned long)&stack_start,
230			   (unsigned long)&stack_start + sizeof(stack_start));
231	wmb();
232
233	mp_ops->start_cpu(cpu, (unsigned long)_stext);
234
235	timeout = jiffies + HZ;
236	while (time_before(jiffies, timeout)) {
237		if (cpu_online(cpu))
238			break;
239
240		udelay(10);
241		barrier();
242	}
243
244	if (cpu_online(cpu))
245		return 0;
246
247	return -ENOENT;
248}
249
250void __init smp_cpus_done(unsigned int max_cpus)
251{
252	unsigned long bogosum = 0;
253	int cpu;
254
255	for_each_online_cpu(cpu)
256		bogosum += cpu_data[cpu].loops_per_jiffy;
257
258	printk(KERN_INFO "SMP: Total of %d processors activated "
259	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
260	       bogosum / (500000/HZ),
261	       (bogosum / (5000/HZ)) % 100);
262}
263
264void smp_send_reschedule(int cpu)
265{
266	mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
267}
268
269void smp_send_stop(void)
270{
271	smp_call_function(stop_this_cpu, 0, 0);
272}
273
274void arch_send_call_function_ipi_mask(const struct cpumask *mask)
275{
276	int cpu;
277
278	for_each_cpu(cpu, mask)
279		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
280}
281
282void arch_send_call_function_single_ipi(int cpu)
283{
284	mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
285}
286
287#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
288void tick_broadcast(const struct cpumask *mask)
289{
290	int cpu;
291
292	for_each_cpu(cpu, mask)
293		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
294}
295
296static void ipi_timer(void)
297{
298	irq_enter();
299	tick_receive_broadcast();
300	irq_exit();
301}
302#endif
303
304void smp_message_recv(unsigned int msg)
305{
306	switch (msg) {
307	case SMP_MSG_FUNCTION:
308		generic_smp_call_function_interrupt();
309		break;
310	case SMP_MSG_RESCHEDULE:
311		scheduler_ipi();
312		break;
313	case SMP_MSG_FUNCTION_SINGLE:
314		generic_smp_call_function_single_interrupt();
315		break;
316#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
317	case SMP_MSG_TIMER:
318		ipi_timer();
319		break;
320#endif
321	default:
322		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
323		       smp_processor_id(), __func__, msg);
324		break;
325	}
326}
327
328/* Not really SMP stuff ... */
329int setup_profiling_timer(unsigned int multiplier)
330{
331	return 0;
332}
333
334#ifdef CONFIG_MMU
335
336static void flush_tlb_all_ipi(void *info)
337{
338	local_flush_tlb_all();
339}
340
341void flush_tlb_all(void)
342{
343	on_each_cpu(flush_tlb_all_ipi, 0, 1);
344}
345
346static void flush_tlb_mm_ipi(void *mm)
347{
348	local_flush_tlb_mm((struct mm_struct *)mm);
349}
350
351/*
352 * The following tlb flush calls are invoked when old translations are
353 * being torn down, or pte attributes are changing. For single threaded
354 * address spaces, a new context is obtained on the current cpu, and tlb
355 * context on other cpus are invalidated to force a new context allocation
356 * at switch_mm time, should the mm ever be used on other cpus. For
357 * multithreaded address spaces, intercpu interrupts have to be sent.
358 * Another case where intercpu interrupts are required is when the target
359 * mm might be active on another cpu (eg debuggers doing the flushes on
360 * behalf of debugees, kswapd stealing pages from another process etc).
361 * Kanoj 07/00.
362 */
363void flush_tlb_mm(struct mm_struct *mm)
364{
365	preempt_disable();
366
367	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
368		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
369	} else {
370		int i;
371		for_each_online_cpu(i)
372			if (smp_processor_id() != i)
373				cpu_context(i, mm) = 0;
374	}
375	local_flush_tlb_mm(mm);
376
377	preempt_enable();
378}
379
380struct flush_tlb_data {
381	struct vm_area_struct *vma;
382	unsigned long addr1;
383	unsigned long addr2;
384};
385
386static void flush_tlb_range_ipi(void *info)
387{
388	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389
390	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
391}
392
393void flush_tlb_range(struct vm_area_struct *vma,
394		     unsigned long start, unsigned long end)
395{
396	struct mm_struct *mm = vma->vm_mm;
397
398	preempt_disable();
399	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
400		struct flush_tlb_data fd;
401
402		fd.vma = vma;
403		fd.addr1 = start;
404		fd.addr2 = end;
405		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
406	} else {
407		int i;
408		for_each_online_cpu(i)
409			if (smp_processor_id() != i)
410				cpu_context(i, mm) = 0;
411	}
412	local_flush_tlb_range(vma, start, end);
413	preempt_enable();
414}
415
416static void flush_tlb_kernel_range_ipi(void *info)
417{
418	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
419
420	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
421}
422
423void flush_tlb_kernel_range(unsigned long start, unsigned long end)
424{
425	struct flush_tlb_data fd;
426
427	fd.addr1 = start;
428	fd.addr2 = end;
429	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
430}
431
432static void flush_tlb_page_ipi(void *info)
433{
434	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
435
436	local_flush_tlb_page(fd->vma, fd->addr1);
437}
438
439void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
440{
441	preempt_disable();
442	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
443	    (current->mm != vma->vm_mm)) {
444		struct flush_tlb_data fd;
445
446		fd.vma = vma;
447		fd.addr1 = page;
448		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
449	} else {
450		int i;
451		for_each_online_cpu(i)
452			if (smp_processor_id() != i)
453				cpu_context(i, vma->vm_mm) = 0;
454	}
455	local_flush_tlb_page(vma, page);
456	preempt_enable();
457}
458
459static void flush_tlb_one_ipi(void *info)
460{
461	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
462	local_flush_tlb_one(fd->addr1, fd->addr2);
463}
464
465void flush_tlb_one(unsigned long asid, unsigned long vaddr)
466{
467	struct flush_tlb_data fd;
468
469	fd.addr1 = asid;
470	fd.addr2 = vaddr;
471
472	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
473	local_flush_tlb_one(asid, vaddr);
474}
475
476#endif