Loading...
Note: File does not exist in v5.4.
1/*
2 * linux/arch/m32r/kernel/smp.c
3 *
4 * M32R SMP support routines.
5 *
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
7 *
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11 *
12 * This code is released under the GNU General Public License version 2 or
13 * later.
14 */
15
16#undef DEBUG_SMP
17
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/sched.h>
21#include <linux/spinlock.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/profile.h>
25#include <linux/cpu.h>
26
27#include <asm/cacheflush.h>
28#include <asm/pgalloc.h>
29#include <linux/atomic.h>
30#include <asm/io.h>
31#include <asm/mmu_context.h>
32#include <asm/m32r.h>
33#include <asm/tlbflush.h>
34
35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
36/* Data structures and variables */
37/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
38
39/*
40 * For flush_cache_all()
41 */
42static DEFINE_SPINLOCK(flushcache_lock);
43static volatile unsigned long flushcache_cpumask = 0;
44
45/*
46 * For flush_tlb_others()
47 */
48static cpumask_t flush_cpumask;
49static struct mm_struct *flush_mm;
50static struct vm_area_struct *flush_vma;
51static volatile unsigned long flush_va;
52static DEFINE_SPINLOCK(tlbstate_lock);
53#define FLUSH_ALL 0xffffffff
54
55DECLARE_PER_CPU(int, prof_multiplier);
56DECLARE_PER_CPU(int, prof_old_multiplier);
57DECLARE_PER_CPU(int, prof_counter);
58
59extern spinlock_t ipi_lock[];
60
61/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
62/* Function Prototypes */
63/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
64
65void smp_reschedule_interrupt(void);
66void smp_flush_cache_all_interrupt(void);
67
68static void flush_tlb_all_ipi(void *);
69static void flush_tlb_others(cpumask_t, struct mm_struct *,
70 struct vm_area_struct *, unsigned long);
71
72void smp_invalidate_interrupt(void);
73
74static void stop_this_cpu(void *);
75
76void smp_ipi_timer_interrupt(struct pt_regs *);
77void smp_local_timer_interrupt(void);
78
79static void send_IPI_allbutself(int, int);
80static void send_IPI_mask(const struct cpumask *, int, int);
81
82/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
83/* Rescheduling request Routines */
84/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
85
86/*==========================================================================*
87 * Name: smp_send_reschedule
88 *
89 * Description: This routine requests other CPU to execute rescheduling.
90 * 1.Send 'RESCHEDULE_IPI' to other CPU.
91 * Request other CPU to execute 'smp_reschedule_interrupt()'.
92 *
93 * Born on Date: 2002.02.05
94 *
95 * Arguments: cpu_id - Target CPU ID
96 *
97 * Returns: void (cannot fail)
98 *
99 * Modification log:
100 * Date Who Description
101 * ---------- --- --------------------------------------------------------
102 *
103 *==========================================================================*/
104void smp_send_reschedule(int cpu_id)
105{
106 WARN_ON(cpu_is_offline(cpu_id));
107 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
108}
109
110/*==========================================================================*
111 * Name: smp_reschedule_interrupt
112 *
113 * Description: This routine executes on CPU which received
114 * 'RESCHEDULE_IPI'.
115 *
116 * Born on Date: 2002.02.05
117 *
118 * Arguments: NONE
119 *
120 * Returns: void (cannot fail)
121 *
122 * Modification log:
123 * Date Who Description
124 * ---------- --- --------------------------------------------------------
125 *
126 *==========================================================================*/
127void smp_reschedule_interrupt(void)
128{
129 scheduler_ipi();
130}
131
132/*==========================================================================*
133 * Name: smp_flush_cache_all
134 *
135 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
136 * CPUs in the system.
137 *
138 * Born on Date: 2003-05-28
139 *
140 * Arguments: NONE
141 *
142 * Returns: void (cannot fail)
143 *
144 * Modification log:
145 * Date Who Description
146 * ---------- --- --------------------------------------------------------
147 *
148 *==========================================================================*/
149void smp_flush_cache_all(void)
150{
151 cpumask_t cpumask;
152 unsigned long *mask;
153
154 preempt_disable();
155 cpumask_copy(&cpumask, cpu_online_mask);
156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
157 spin_lock(&flushcache_lock);
158 mask=cpumask_bits(&cpumask);
159 atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161 _flush_cache_copyback_all();
162 while (flushcache_cpumask)
163 mb();
164 spin_unlock(&flushcache_lock);
165 preempt_enable();
166}
167EXPORT_SYMBOL(smp_flush_cache_all);
168
169void smp_flush_cache_all_interrupt(void)
170{
171 _flush_cache_copyback_all();
172 clear_bit(smp_processor_id(), &flushcache_cpumask);
173}
174
175/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
176/* TLB flush request Routines */
177/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
178
179/*==========================================================================*
180 * Name: smp_flush_tlb_all
181 *
182 * Description: This routine flushes all processes TLBs.
183 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
184 * 2.Execute 'do_flush_tlb_all_local()'.
185 *
186 * Born on Date: 2002.02.05
187 *
188 * Arguments: NONE
189 *
190 * Returns: void (cannot fail)
191 *
192 * Modification log:
193 * Date Who Description
194 * ---------- --- --------------------------------------------------------
195 *
196 *==========================================================================*/
197void smp_flush_tlb_all(void)
198{
199 unsigned long flags;
200
201 preempt_disable();
202 local_irq_save(flags);
203 __flush_tlb_all();
204 local_irq_restore(flags);
205 smp_call_function(flush_tlb_all_ipi, NULL, 1);
206 preempt_enable();
207}
208
209/*==========================================================================*
210 * Name: flush_tlb_all_ipi
211 *
212 * Description: This routine flushes all local TLBs.
213 * 1.Execute 'do_flush_tlb_all_local()'.
214 *
215 * Born on Date: 2002.02.05
216 *
217 * Arguments: *info - not used
218 *
219 * Returns: void (cannot fail)
220 *
221 * Modification log:
222 * Date Who Description
223 * ---------- --- --------------------------------------------------------
224 *
225 *==========================================================================*/
226static void flush_tlb_all_ipi(void *info)
227{
228 __flush_tlb_all();
229}
230
231/*==========================================================================*
232 * Name: smp_flush_tlb_mm
233 *
234 * Description: This routine flushes the specified mm context TLB's.
235 *
236 * Born on Date: 2002.02.05
237 *
238 * Arguments: *mm - a pointer to the mm struct for flush TLB
239 *
240 * Returns: void (cannot fail)
241 *
242 * Modification log:
243 * Date Who Description
244 * ---------- --- --------------------------------------------------------
245 *
246 *==========================================================================*/
247void smp_flush_tlb_mm(struct mm_struct *mm)
248{
249 int cpu_id;
250 cpumask_t cpu_mask;
251 unsigned long *mmc;
252 unsigned long flags;
253
254 preempt_disable();
255 cpu_id = smp_processor_id();
256 mmc = &mm->context[cpu_id];
257 cpumask_copy(&cpu_mask, mm_cpumask(mm));
258 cpumask_clear_cpu(cpu_id, &cpu_mask);
259
260 if (*mmc != NO_CONTEXT) {
261 local_irq_save(flags);
262 *mmc = NO_CONTEXT;
263 if (mm == current->mm)
264 activate_context(mm);
265 else
266 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
267 local_irq_restore(flags);
268 }
269 if (!cpumask_empty(&cpu_mask))
270 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
271
272 preempt_enable();
273}
274
275/*==========================================================================*
276 * Name: smp_flush_tlb_range
277 *
278 * Description: This routine flushes a range of pages.
279 *
280 * Born on Date: 2002.02.05
281 *
282 * Arguments: *mm - a pointer to the mm struct for flush TLB
283 * start - not used
284 * end - not used
285 *
286 * Returns: void (cannot fail)
287 *
288 * Modification log:
289 * Date Who Description
290 * ---------- --- --------------------------------------------------------
291 *
292 *==========================================================================*/
293void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
294 unsigned long end)
295{
296 smp_flush_tlb_mm(vma->vm_mm);
297}
298
299/*==========================================================================*
300 * Name: smp_flush_tlb_page
301 *
302 * Description: This routine flushes one page.
303 *
304 * Born on Date: 2002.02.05
305 *
306 * Arguments: *vma - a pointer to the vma struct include va
307 * va - virtual address for flush TLB
308 *
309 * Returns: void (cannot fail)
310 *
311 * Modification log:
312 * Date Who Description
313 * ---------- --- --------------------------------------------------------
314 *
315 *==========================================================================*/
316void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
317{
318 struct mm_struct *mm = vma->vm_mm;
319 int cpu_id;
320 cpumask_t cpu_mask;
321 unsigned long *mmc;
322 unsigned long flags;
323
324 preempt_disable();
325 cpu_id = smp_processor_id();
326 mmc = &mm->context[cpu_id];
327 cpumask_copy(&cpu_mask, mm_cpumask(mm));
328 cpumask_clear_cpu(cpu_id, &cpu_mask);
329
330#ifdef DEBUG_SMP
331 if (!mm)
332 BUG();
333#endif
334
335 if (*mmc != NO_CONTEXT) {
336 local_irq_save(flags);
337 va &= PAGE_MASK;
338 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
339 __flush_tlb_page(va);
340 local_irq_restore(flags);
341 }
342 if (!cpumask_empty(&cpu_mask))
343 flush_tlb_others(cpu_mask, mm, vma, va);
344
345 preempt_enable();
346}
347
348/*==========================================================================*
349 * Name: flush_tlb_others
350 *
351 * Description: This routine requests other CPU to execute flush TLB.
352 * 1.Setup parameters.
353 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
354 * Request other CPU to execute 'smp_invalidate_interrupt()'.
355 * 3.Wait for other CPUs operation finished.
356 *
357 * Born on Date: 2002.02.05
358 *
359 * Arguments: cpumask - bitmap of target CPUs
360 * *mm - a pointer to the mm struct for flush TLB
361 * *vma - a pointer to the vma struct include va
362 * va - virtual address for flush TLB
363 *
364 * Returns: void (cannot fail)
365 *
366 * Modification log:
367 * Date Who Description
368 * ---------- --- --------------------------------------------------------
369 *
370 *==========================================================================*/
371static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
372 struct vm_area_struct *vma, unsigned long va)
373{
374 unsigned long *mask;
375#ifdef DEBUG_SMP
376 unsigned long flags;
377 __save_flags(flags);
378 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
379 BUG();
380#endif /* DEBUG_SMP */
381
382 /*
383 * A couple of (to be removed) sanity checks:
384 *
385 * - we do not send IPIs to not-yet booted CPUs.
386 * - current CPU must not be in mask
387 * - mask must exist :)
388 */
389 BUG_ON(cpumask_empty(&cpumask));
390
391 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
392 BUG_ON(!mm);
393
394 /* If a CPU which we ran on has gone down, OK. */
395 cpumask_and(&cpumask, &cpumask, cpu_online_mask);
396 if (cpumask_empty(&cpumask))
397 return;
398
399 /*
400 * i'm not happy about this global shared spinlock in the
401 * MM hot path, but we'll see how contended it is.
402 * Temporarily this turns IRQs off, so that lockups are
403 * detected by the NMI watchdog.
404 */
405 spin_lock(&tlbstate_lock);
406
407 flush_mm = mm;
408 flush_vma = vma;
409 flush_va = va;
410 mask=cpumask_bits(&cpumask);
411 atomic_or(*mask, (atomic_t *)&flush_cpumask);
412
413 /*
414 * We have to send the IPI only to
415 * CPUs affected.
416 */
417 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
418
419 while (!cpumask_empty(&flush_cpumask)) {
420 /* nothing. lockup detection does not belong here */
421 mb();
422 }
423
424 flush_mm = NULL;
425 flush_vma = NULL;
426 flush_va = 0;
427 spin_unlock(&tlbstate_lock);
428}
429
430/*==========================================================================*
431 * Name: smp_invalidate_interrupt
432 *
433 * Description: This routine executes on CPU which received
434 * 'INVALIDATE_TLB_IPI'.
435 * 1.Flush local TLB.
436 * 2.Report flush TLB process was finished.
437 *
438 * Born on Date: 2002.02.05
439 *
440 * Arguments: NONE
441 *
442 * Returns: void (cannot fail)
443 *
444 * Modification log:
445 * Date Who Description
446 * ---------- --- --------------------------------------------------------
447 *
448 *==========================================================================*/
449void smp_invalidate_interrupt(void)
450{
451 int cpu_id = smp_processor_id();
452 unsigned long *mmc = &flush_mm->context[cpu_id];
453
454 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
455 return;
456
457 if (flush_va == FLUSH_ALL) {
458 *mmc = NO_CONTEXT;
459 if (flush_mm == current->active_mm)
460 activate_context(flush_mm);
461 else
462 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
463 } else {
464 unsigned long va = flush_va;
465
466 if (*mmc != NO_CONTEXT) {
467 va &= PAGE_MASK;
468 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
469 __flush_tlb_page(va);
470 }
471 }
472 cpumask_clear_cpu(cpu_id, &flush_cpumask);
473}
474
475/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
476/* Stop CPU request Routines */
477/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
478
479/*==========================================================================*
480 * Name: smp_send_stop
481 *
482 * Description: This routine requests stop all CPUs.
483 * 1.Request other CPU to execute 'stop_this_cpu()'.
484 *
485 * Born on Date: 2002.02.05
486 *
487 * Arguments: NONE
488 *
489 * Returns: void (cannot fail)
490 *
491 * Modification log:
492 * Date Who Description
493 * ---------- --- --------------------------------------------------------
494 *
495 *==========================================================================*/
496void smp_send_stop(void)
497{
498 smp_call_function(stop_this_cpu, NULL, 0);
499}
500
501/*==========================================================================*
502 * Name: stop_this_cpu
503 *
504 * Description: This routine halt CPU.
505 *
506 * Born on Date: 2002.02.05
507 *
508 * Arguments: NONE
509 *
510 * Returns: void (cannot fail)
511 *
512 * Modification log:
513 * Date Who Description
514 * ---------- --- --------------------------------------------------------
515 *
516 *==========================================================================*/
517static void stop_this_cpu(void *dummy)
518{
519 int cpu_id = smp_processor_id();
520
521 /*
522 * Remove this CPU:
523 */
524 set_cpu_online(cpu_id, false);
525
526 /*
527 * PSW IE = 1;
528 * IMASK = 0;
529 * goto SLEEP
530 */
531 local_irq_disable();
532 outl(0, M32R_ICU_IMASK_PORTL);
533 inl(M32R_ICU_IMASK_PORTL); /* dummy read */
534 local_irq_enable();
535
536 for ( ; ; );
537}
538
539void arch_send_call_function_ipi_mask(const struct cpumask *mask)
540{
541 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
542}
543
544void arch_send_call_function_single_ipi(int cpu)
545{
546 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
547}
548
549/*==========================================================================*
550 * Name: smp_call_function_interrupt
551 *
552 * Description: This routine executes on CPU which received
553 * 'CALL_FUNCTION_IPI'.
554 *
555 * Born on Date: 2002.02.05
556 *
557 * Arguments: NONE
558 *
559 * Returns: void (cannot fail)
560 *
561 * Modification log:
562 * Date Who Description
563 * ---------- --- --------------------------------------------------------
564 *
565 *==========================================================================*/
566void smp_call_function_interrupt(void)
567{
568 irq_enter();
569 generic_smp_call_function_interrupt();
570 irq_exit();
571}
572
573void smp_call_function_single_interrupt(void)
574{
575 irq_enter();
576 generic_smp_call_function_single_interrupt();
577 irq_exit();
578}
579
580/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
581/* Timer Routines */
582/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
583
584/*==========================================================================*
585 * Name: smp_send_timer
586 *
587 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
588 * in the system.
589 *
590 * Born on Date: 2002.02.05
591 *
592 * Arguments: NONE
593 *
594 * Returns: void (cannot fail)
595 *
596 * Modification log:
597 * Date Who Description
598 * ---------- --- --------------------------------------------------------
599 *
600 *==========================================================================*/
601void smp_send_timer(void)
602{
603 send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
604}
605
606/*==========================================================================*
607 * Name: smp_send_timer
608 *
609 * Description: This routine executes on CPU which received
610 * 'LOCAL_TIMER_IPI'.
611 *
612 * Born on Date: 2002.02.05
613 *
614 * Arguments: *regs - a pointer to the saved regster info
615 *
616 * Returns: void (cannot fail)
617 *
618 * Modification log:
619 * Date Who Description
620 * ---------- --- --------------------------------------------------------
621 *
622 *==========================================================================*/
623void smp_ipi_timer_interrupt(struct pt_regs *regs)
624{
625 struct pt_regs *old_regs;
626 old_regs = set_irq_regs(regs);
627 irq_enter();
628 smp_local_timer_interrupt();
629 irq_exit();
630 set_irq_regs(old_regs);
631}
632
633/*==========================================================================*
634 * Name: smp_local_timer_interrupt
635 *
636 * Description: Local timer interrupt handler. It does both profiling and
637 * process statistics/rescheduling.
638 * We do profiling in every local tick, statistics/rescheduling
639 * happen only every 'profiling multiplier' ticks. The default
640 * multiplier is 1 and it can be changed by writing the new
641 * multiplier value into /proc/profile.
642 *
643 * Born on Date: 2002.02.05
644 *
645 * Arguments: *regs - a pointer to the saved regster info
646 *
647 * Returns: void (cannot fail)
648 *
649 * Original: arch/i386/kernel/apic.c
650 *
651 * Modification log:
652 * Date Who Description
653 * ---------- --- --------------------------------------------------------
654 * 2003-06-24 hy use per_cpu structure.
655 *==========================================================================*/
656void smp_local_timer_interrupt(void)
657{
658 int user = user_mode(get_irq_regs());
659 int cpu_id = smp_processor_id();
660
661 /*
662 * The profiling function is SMP safe. (nothing can mess
663 * around with "current", and the profiling counters are
664 * updated with atomic operations). This is especially
665 * useful with a profiling multiplier != 1
666 */
667
668 profile_tick(CPU_PROFILING);
669
670 if (--per_cpu(prof_counter, cpu_id) <= 0) {
671 /*
672 * The multiplier may have changed since the last time we got
673 * to this point as a result of the user writing to
674 * /proc/profile. In this case we need to adjust the APIC
675 * timer accordingly.
676 *
677 * Interrupts are already masked off at this point.
678 */
679 per_cpu(prof_counter, cpu_id)
680 = per_cpu(prof_multiplier, cpu_id);
681 if (per_cpu(prof_counter, cpu_id)
682 != per_cpu(prof_old_multiplier, cpu_id))
683 {
684 per_cpu(prof_old_multiplier, cpu_id)
685 = per_cpu(prof_counter, cpu_id);
686 }
687
688 update_process_times(user);
689 }
690}
691
692/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
693/* Send IPI Routines */
694/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
695
696/*==========================================================================*
697 * Name: send_IPI_allbutself
698 *
699 * Description: This routine sends a IPI to all other CPUs in the system.
700 *
701 * Born on Date: 2002.02.05
702 *
703 * Arguments: ipi_num - Number of IPI
704 * try - 0 : Send IPI certainly.
705 * !0 : The following IPI is not sent when Target CPU
706 * has not received the before IPI.
707 *
708 * Returns: void (cannot fail)
709 *
710 * Modification log:
711 * Date Who Description
712 * ---------- --- --------------------------------------------------------
713 *
714 *==========================================================================*/
715static void send_IPI_allbutself(int ipi_num, int try)
716{
717 cpumask_t cpumask;
718
719 cpumask_copy(&cpumask, cpu_online_mask);
720 cpumask_clear_cpu(smp_processor_id(), &cpumask);
721
722 send_IPI_mask(&cpumask, ipi_num, try);
723}
724
725/*==========================================================================*
726 * Name: send_IPI_mask
727 *
728 * Description: This routine sends a IPI to CPUs in the system.
729 *
730 * Born on Date: 2002.02.05
731 *
732 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
733 * ipi_num - Number of IPI
734 * try - 0 : Send IPI certainly.
735 * !0 : The following IPI is not sent when Target CPU
736 * has not received the before IPI.
737 *
738 * Returns: void (cannot fail)
739 *
740 * Modification log:
741 * Date Who Description
742 * ---------- --- --------------------------------------------------------
743 *
744 *==========================================================================*/
745static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
746{
747 cpumask_t physid_mask, tmp;
748 int cpu_id, phys_id;
749 int num_cpus = num_online_cpus();
750
751 if (num_cpus <= 1) /* NO MP */
752 return;
753
754 cpumask_and(&tmp, cpumask, cpu_online_mask);
755 BUG_ON(!cpumask_equal(cpumask, &tmp));
756
757 cpumask_clear(&physid_mask);
758 for_each_cpu(cpu_id, cpumask) {
759 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
760 cpumask_set_cpu(phys_id, &physid_mask);
761 }
762
763 send_IPI_mask_phys(&physid_mask, ipi_num, try);
764}
765
766/*==========================================================================*
767 * Name: send_IPI_mask_phys
768 *
769 * Description: This routine sends a IPI to other CPUs in the system.
770 *
771 * Born on Date: 2002.02.05
772 *
773 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
774 * ipi_num - Number of IPI
775 * try - 0 : Send IPI certainly.
776 * !0 : The following IPI is not sent when Target CPU
777 * has not received the before IPI.
778 *
779 * Returns: IPICRi regster value.
780 *
781 * Modification log:
782 * Date Who Description
783 * ---------- --- --------------------------------------------------------
784 *
785 *==========================================================================*/
786unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
787 int try)
788{
789 spinlock_t *ipilock;
790 volatile unsigned long *ipicr_addr;
791 unsigned long ipicr_val;
792 unsigned long my_physid_mask;
793 unsigned long mask = cpumask_bits(physid_mask)[0];
794
795
796 if (mask & ~physids_coerce(phys_cpu_present_map))
797 BUG();
798 if (ipi_num >= NR_IPIS || ipi_num < 0)
799 BUG();
800
801 mask <<= IPI_SHIFT;
802 ipilock = &ipi_lock[ipi_num];
803 ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
804 + (ipi_num << 2));
805 my_physid_mask = ~(1 << smp_processor_id());
806
807 /*
808 * lock ipi_lock[i]
809 * check IPICRi == 0
810 * write IPICRi (send IPIi)
811 * unlock ipi_lock[i]
812 */
813 spin_lock(ipilock);
814 __asm__ __volatile__ (
815 ";; CHECK IPICRi == 0 \n\t"
816 ".fillinsn \n"
817 "1: \n\t"
818 "ld %0, @%1 \n\t"
819 "and %0, %4 \n\t"
820 "beqz %0, 2f \n\t"
821 "bnez %3, 3f \n\t"
822 "bra 1b \n\t"
823 ";; WRITE IPICRi (send IPIi) \n\t"
824 ".fillinsn \n"
825 "2: \n\t"
826 "st %2, @%1 \n\t"
827 ".fillinsn \n"
828 "3: \n\t"
829 : "=&r"(ipicr_val)
830 : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
831 : "memory"
832 );
833 spin_unlock(ipilock);
834
835 return ipicr_val;
836}