Loading...
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/export.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16#include "smpboot.h"
17
18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19static struct {
20 struct list_head queue;
21 raw_spinlock_t lock;
22} call_function __cacheline_aligned_in_smp =
23 {
24 .queue = LIST_HEAD_INIT(call_function.queue),
25 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
26 };
27
28enum {
29 CSD_FLAG_LOCK = 0x01,
30};
31
32struct call_function_data {
33 struct call_single_data csd;
34 atomic_t refs;
35 cpumask_var_t cpumask;
36};
37
38static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
39
40struct call_single_queue {
41 struct list_head list;
42 raw_spinlock_t lock;
43};
44
45static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
46
47static int
48hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
49{
50 long cpu = (long)hcpu;
51 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
52
53 switch (action) {
54 case CPU_UP_PREPARE:
55 case CPU_UP_PREPARE_FROZEN:
56 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
57 cpu_to_node(cpu)))
58 return notifier_from_errno(-ENOMEM);
59 break;
60
61#ifdef CONFIG_HOTPLUG_CPU
62 case CPU_UP_CANCELED:
63 case CPU_UP_CANCELED_FROZEN:
64
65 case CPU_DEAD:
66 case CPU_DEAD_FROZEN:
67 free_cpumask_var(cfd->cpumask);
68 break;
69#endif
70 };
71
72 return NOTIFY_OK;
73}
74
75static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
76 .notifier_call = hotplug_cfd,
77};
78
79void __init call_function_init(void)
80{
81 void *cpu = (void *)(long)smp_processor_id();
82 int i;
83
84 for_each_possible_cpu(i) {
85 struct call_single_queue *q = &per_cpu(call_single_queue, i);
86
87 raw_spin_lock_init(&q->lock);
88 INIT_LIST_HEAD(&q->list);
89 }
90
91 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
92 register_cpu_notifier(&hotplug_cfd_notifier);
93}
94
95/*
96 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
97 *
98 * For non-synchronous ipi calls the csd can still be in use by the
99 * previous function call. For multi-cpu calls its even more interesting
100 * as we'll have to ensure no other cpu is observing our csd.
101 */
102static void csd_lock_wait(struct call_single_data *data)
103{
104 while (data->flags & CSD_FLAG_LOCK)
105 cpu_relax();
106}
107
108static void csd_lock(struct call_single_data *data)
109{
110 csd_lock_wait(data);
111 data->flags = CSD_FLAG_LOCK;
112
113 /*
114 * prevent CPU from reordering the above assignment
115 * to ->flags with any subsequent assignments to other
116 * fields of the specified call_single_data structure:
117 */
118 smp_mb();
119}
120
121static void csd_unlock(struct call_single_data *data)
122{
123 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
124
125 /*
126 * ensure we're all done before releasing data:
127 */
128 smp_mb();
129
130 data->flags &= ~CSD_FLAG_LOCK;
131}
132
133/*
134 * Insert a previously allocated call_single_data element
135 * for execution on the given CPU. data must already have
136 * ->func, ->info, and ->flags set.
137 */
138static
139void generic_exec_single(int cpu, struct call_single_data *data, int wait)
140{
141 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
142 unsigned long flags;
143 int ipi;
144
145 raw_spin_lock_irqsave(&dst->lock, flags);
146 ipi = list_empty(&dst->list);
147 list_add_tail(&data->list, &dst->list);
148 raw_spin_unlock_irqrestore(&dst->lock, flags);
149
150 /*
151 * The list addition should be visible before sending the IPI
152 * handler locks the list to pull the entry off it because of
153 * normal cache coherency rules implied by spinlocks.
154 *
155 * If IPIs can go out of order to the cache coherency protocol
156 * in an architecture, sufficient synchronisation should be added
157 * to arch code to make it appear to obey cache coherency WRT
158 * locking and barrier primitives. Generic code isn't really
159 * equipped to do the right thing...
160 */
161 if (ipi)
162 arch_send_call_function_single_ipi(cpu);
163
164 if (wait)
165 csd_lock_wait(data);
166}
167
168/*
169 * Invoked by arch to handle an IPI for call function. Must be called with
170 * interrupts disabled.
171 */
172void generic_smp_call_function_interrupt(void)
173{
174 struct call_function_data *data;
175 int cpu = smp_processor_id();
176
177 /*
178 * Shouldn't receive this interrupt on a cpu that is not yet online.
179 */
180 WARN_ON_ONCE(!cpu_online(cpu));
181
182 /*
183 * Ensure entry is visible on call_function_queue after we have
184 * entered the IPI. See comment in smp_call_function_many.
185 * If we don't have this, then we may miss an entry on the list
186 * and never get another IPI to process it.
187 */
188 smp_mb();
189
190 /*
191 * It's ok to use list_for_each_rcu() here even though we may
192 * delete 'pos', since list_del_rcu() doesn't clear ->next
193 */
194 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
195 int refs;
196 smp_call_func_t func;
197
198 /*
199 * Since we walk the list without any locks, we might
200 * see an entry that was completed, removed from the
201 * list and is in the process of being reused.
202 *
203 * We must check that the cpu is in the cpumask before
204 * checking the refs, and both must be set before
205 * executing the callback on this cpu.
206 */
207
208 if (!cpumask_test_cpu(cpu, data->cpumask))
209 continue;
210
211 smp_rmb();
212
213 if (atomic_read(&data->refs) == 0)
214 continue;
215
216 func = data->csd.func; /* save for later warn */
217 func(data->csd.info);
218
219 /*
220 * If the cpu mask is not still set then func enabled
221 * interrupts (BUG), and this cpu took another smp call
222 * function interrupt and executed func(info) twice
223 * on this cpu. That nested execution decremented refs.
224 */
225 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
226 WARN(1, "%pf enabled interrupts and double executed\n", func);
227 continue;
228 }
229
230 refs = atomic_dec_return(&data->refs);
231 WARN_ON(refs < 0);
232
233 if (refs)
234 continue;
235
236 WARN_ON(!cpumask_empty(data->cpumask));
237
238 raw_spin_lock(&call_function.lock);
239 list_del_rcu(&data->csd.list);
240 raw_spin_unlock(&call_function.lock);
241
242 csd_unlock(&data->csd);
243 }
244
245}
246
247/*
248 * Invoked by arch to handle an IPI for call function single. Must be
249 * called from the arch with interrupts disabled.
250 */
251void generic_smp_call_function_single_interrupt(void)
252{
253 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
254 unsigned int data_flags;
255 LIST_HEAD(list);
256
257 /*
258 * Shouldn't receive this interrupt on a cpu that is not yet online.
259 */
260 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
261
262 raw_spin_lock(&q->lock);
263 list_replace_init(&q->list, &list);
264 raw_spin_unlock(&q->lock);
265
266 while (!list_empty(&list)) {
267 struct call_single_data *data;
268
269 data = list_entry(list.next, struct call_single_data, list);
270 list_del(&data->list);
271
272 /*
273 * 'data' can be invalid after this call if flags == 0
274 * (when called through generic_exec_single()),
275 * so save them away before making the call:
276 */
277 data_flags = data->flags;
278
279 data->func(data->info);
280
281 /*
282 * Unlocked CSDs are valid through generic_exec_single():
283 */
284 if (data_flags & CSD_FLAG_LOCK)
285 csd_unlock(data);
286 }
287}
288
289static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
290
291/*
292 * smp_call_function_single - Run a function on a specific CPU
293 * @func: The function to run. This must be fast and non-blocking.
294 * @info: An arbitrary pointer to pass to the function.
295 * @wait: If true, wait until function has completed on other CPUs.
296 *
297 * Returns 0 on success, else a negative status code.
298 */
299int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
300 int wait)
301{
302 struct call_single_data d = {
303 .flags = 0,
304 };
305 unsigned long flags;
306 int this_cpu;
307 int err = 0;
308
309 /*
310 * prevent preemption and reschedule on another processor,
311 * as well as CPU removal
312 */
313 this_cpu = get_cpu();
314
315 /*
316 * Can deadlock when called with interrupts disabled.
317 * We allow cpu's that are not yet online though, as no one else can
318 * send smp call function interrupt to this cpu and as such deadlocks
319 * can't happen.
320 */
321 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
322 && !oops_in_progress);
323
324 if (cpu == this_cpu) {
325 local_irq_save(flags);
326 func(info);
327 local_irq_restore(flags);
328 } else {
329 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
330 struct call_single_data *data = &d;
331
332 if (!wait)
333 data = &__get_cpu_var(csd_data);
334
335 csd_lock(data);
336
337 data->func = func;
338 data->info = info;
339 generic_exec_single(cpu, data, wait);
340 } else {
341 err = -ENXIO; /* CPU not online */
342 }
343 }
344
345 put_cpu();
346
347 return err;
348}
349EXPORT_SYMBOL(smp_call_function_single);
350
351/*
352 * smp_call_function_any - Run a function on any of the given cpus
353 * @mask: The mask of cpus it can run on.
354 * @func: The function to run. This must be fast and non-blocking.
355 * @info: An arbitrary pointer to pass to the function.
356 * @wait: If true, wait until function has completed.
357 *
358 * Returns 0 on success, else a negative status code (if no cpus were online).
359 * Note that @wait will be implicitly turned on in case of allocation failures,
360 * since we fall back to on-stack allocation.
361 *
362 * Selection preference:
363 * 1) current cpu if in @mask
364 * 2) any cpu of current node if in @mask
365 * 3) any other online cpu in @mask
366 */
367int smp_call_function_any(const struct cpumask *mask,
368 smp_call_func_t func, void *info, int wait)
369{
370 unsigned int cpu;
371 const struct cpumask *nodemask;
372 int ret;
373
374 /* Try for same CPU (cheapest) */
375 cpu = get_cpu();
376 if (cpumask_test_cpu(cpu, mask))
377 goto call;
378
379 /* Try for same node. */
380 nodemask = cpumask_of_node(cpu_to_node(cpu));
381 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
382 cpu = cpumask_next_and(cpu, nodemask, mask)) {
383 if (cpu_online(cpu))
384 goto call;
385 }
386
387 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
388 cpu = cpumask_any_and(mask, cpu_online_mask);
389call:
390 ret = smp_call_function_single(cpu, func, info, wait);
391 put_cpu();
392 return ret;
393}
394EXPORT_SYMBOL_GPL(smp_call_function_any);
395
396/**
397 * __smp_call_function_single(): Run a function on a specific CPU
398 * @cpu: The CPU to run on.
399 * @data: Pre-allocated and setup data structure
400 * @wait: If true, wait until function has completed on specified CPU.
401 *
402 * Like smp_call_function_single(), but allow caller to pass in a
403 * pre-allocated data structure. Useful for embedding @data inside
404 * other structures, for instance.
405 */
406void __smp_call_function_single(int cpu, struct call_single_data *data,
407 int wait)
408{
409 unsigned int this_cpu;
410 unsigned long flags;
411
412 this_cpu = get_cpu();
413 /*
414 * Can deadlock when called with interrupts disabled.
415 * We allow cpu's that are not yet online though, as no one else can
416 * send smp call function interrupt to this cpu and as such deadlocks
417 * can't happen.
418 */
419 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
420 && !oops_in_progress);
421
422 if (cpu == this_cpu) {
423 local_irq_save(flags);
424 data->func(data->info);
425 local_irq_restore(flags);
426 } else {
427 csd_lock(data);
428 generic_exec_single(cpu, data, wait);
429 }
430 put_cpu();
431}
432
433/**
434 * smp_call_function_many(): Run a function on a set of other CPUs.
435 * @mask: The set of cpus to run on (only runs on online subset).
436 * @func: The function to run. This must be fast and non-blocking.
437 * @info: An arbitrary pointer to pass to the function.
438 * @wait: If true, wait (atomically) until function has completed
439 * on other CPUs.
440 *
441 * If @wait is true, then returns once @func has returned.
442 *
443 * You must not call this function with disabled interrupts or from a
444 * hardware interrupt handler or from a bottom half handler. Preemption
445 * must be disabled when calling this function.
446 */
447void smp_call_function_many(const struct cpumask *mask,
448 smp_call_func_t func, void *info, bool wait)
449{
450 struct call_function_data *data;
451 unsigned long flags;
452 int refs, cpu, next_cpu, this_cpu = smp_processor_id();
453
454 /*
455 * Can deadlock when called with interrupts disabled.
456 * We allow cpu's that are not yet online though, as no one else can
457 * send smp call function interrupt to this cpu and as such deadlocks
458 * can't happen.
459 */
460 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
461 && !oops_in_progress && !early_boot_irqs_disabled);
462
463 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
464 cpu = cpumask_first_and(mask, cpu_online_mask);
465 if (cpu == this_cpu)
466 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
467
468 /* No online cpus? We're done. */
469 if (cpu >= nr_cpu_ids)
470 return;
471
472 /* Do we have another CPU which isn't us? */
473 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
474 if (next_cpu == this_cpu)
475 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
476
477 /* Fastpath: do that cpu by itself. */
478 if (next_cpu >= nr_cpu_ids) {
479 smp_call_function_single(cpu, func, info, wait);
480 return;
481 }
482
483 data = &__get_cpu_var(cfd_data);
484 csd_lock(&data->csd);
485
486 /* This BUG_ON verifies our reuse assertions and can be removed */
487 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
488
489 /*
490 * The global call function queue list add and delete are protected
491 * by a lock, but the list is traversed without any lock, relying
492 * on the rcu list add and delete to allow safe concurrent traversal.
493 * We reuse the call function data without waiting for any grace
494 * period after some other cpu removes it from the global queue.
495 * This means a cpu might find our data block as it is being
496 * filled out.
497 *
498 * We hold off the interrupt handler on the other cpu by
499 * ordering our writes to the cpu mask vs our setting of the
500 * refs counter. We assert only the cpu owning the data block
501 * will set a bit in cpumask, and each bit will only be cleared
502 * by the subject cpu. Each cpu must first find its bit is
503 * set and then check that refs is set indicating the element is
504 * ready to be processed, otherwise it must skip the entry.
505 *
506 * On the previous iteration refs was set to 0 by another cpu.
507 * To avoid the use of transitivity, set the counter to 0 here
508 * so the wmb will pair with the rmb in the interrupt handler.
509 */
510 atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
511
512 data->csd.func = func;
513 data->csd.info = info;
514
515 /* Ensure 0 refs is visible before mask. Also orders func and info */
516 smp_wmb();
517
518 /* We rely on the "and" being processed before the store */
519 cpumask_and(data->cpumask, mask, cpu_online_mask);
520 cpumask_clear_cpu(this_cpu, data->cpumask);
521 refs = cpumask_weight(data->cpumask);
522
523 /* Some callers race with other cpus changing the passed mask */
524 if (unlikely(!refs)) {
525 csd_unlock(&data->csd);
526 return;
527 }
528
529 raw_spin_lock_irqsave(&call_function.lock, flags);
530 /*
531 * Place entry at the _HEAD_ of the list, so that any cpu still
532 * observing the entry in generic_smp_call_function_interrupt()
533 * will not miss any other list entries:
534 */
535 list_add_rcu(&data->csd.list, &call_function.queue);
536 /*
537 * We rely on the wmb() in list_add_rcu to complete our writes
538 * to the cpumask before this write to refs, which indicates
539 * data is on the list and is ready to be processed.
540 */
541 atomic_set(&data->refs, refs);
542 raw_spin_unlock_irqrestore(&call_function.lock, flags);
543
544 /*
545 * Make the list addition visible before sending the ipi.
546 * (IPIs must obey or appear to obey normal Linux cache
547 * coherency rules -- see comment in generic_exec_single).
548 */
549 smp_mb();
550
551 /* Send a message to all CPUs in the map */
552 arch_send_call_function_ipi_mask(data->cpumask);
553
554 /* Optionally wait for the CPUs to complete */
555 if (wait)
556 csd_lock_wait(&data->csd);
557}
558EXPORT_SYMBOL(smp_call_function_many);
559
560/**
561 * smp_call_function(): Run a function on all other CPUs.
562 * @func: The function to run. This must be fast and non-blocking.
563 * @info: An arbitrary pointer to pass to the function.
564 * @wait: If true, wait (atomically) until function has completed
565 * on other CPUs.
566 *
567 * Returns 0.
568 *
569 * If @wait is true, then returns once @func has returned; otherwise
570 * it returns just before the target cpu calls @func.
571 *
572 * You must not call this function with disabled interrupts or from a
573 * hardware interrupt handler or from a bottom half handler.
574 */
575int smp_call_function(smp_call_func_t func, void *info, int wait)
576{
577 preempt_disable();
578 smp_call_function_many(cpu_online_mask, func, info, wait);
579 preempt_enable();
580
581 return 0;
582}
583EXPORT_SYMBOL(smp_call_function);
584
585void ipi_call_lock(void)
586{
587 raw_spin_lock(&call_function.lock);
588}
589
590void ipi_call_unlock(void)
591{
592 raw_spin_unlock(&call_function.lock);
593}
594
595void ipi_call_lock_irq(void)
596{
597 raw_spin_lock_irq(&call_function.lock);
598}
599
600void ipi_call_unlock_irq(void)
601{
602 raw_spin_unlock_irq(&call_function.lock);
603}
604#endif /* USE_GENERIC_SMP_HELPERS */
605
606/* Setup configured maximum number of CPUs to activate */
607unsigned int setup_max_cpus = NR_CPUS;
608EXPORT_SYMBOL(setup_max_cpus);
609
610
611/*
612 * Setup routine for controlling SMP activation
613 *
614 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
615 * activation entirely (the MPS table probe still happens, though).
616 *
617 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
618 * greater than 0, limits the maximum number of CPUs activated in
619 * SMP mode to <NUM>.
620 */
621
622void __weak arch_disable_smp_support(void) { }
623
624static int __init nosmp(char *str)
625{
626 setup_max_cpus = 0;
627 arch_disable_smp_support();
628
629 return 0;
630}
631
632early_param("nosmp", nosmp);
633
634/* this is hard limit */
635static int __init nrcpus(char *str)
636{
637 int nr_cpus;
638
639 get_option(&str, &nr_cpus);
640 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
641 nr_cpu_ids = nr_cpus;
642
643 return 0;
644}
645
646early_param("nr_cpus", nrcpus);
647
648static int __init maxcpus(char *str)
649{
650 get_option(&str, &setup_max_cpus);
651 if (setup_max_cpus == 0)
652 arch_disable_smp_support();
653
654 return 0;
655}
656
657early_param("maxcpus", maxcpus);
658
659/* Setup number of possible processor ids */
660int nr_cpu_ids __read_mostly = NR_CPUS;
661EXPORT_SYMBOL(nr_cpu_ids);
662
663/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
664void __init setup_nr_cpu_ids(void)
665{
666 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
667}
668
669/* Called by boot processor to activate the rest. */
670void __init smp_init(void)
671{
672 unsigned int cpu;
673
674 idle_threads_init();
675
676 /* FIXME: This should be done in userspace --RR */
677 for_each_present_cpu(cpu) {
678 if (num_online_cpus() >= setup_max_cpus)
679 break;
680 if (!cpu_online(cpu))
681 cpu_up(cpu);
682 }
683
684 /* Any cleanup work */
685 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
686 smp_cpus_done(setup_max_cpus);
687}
688
689/*
690 * Call a function on all processors. May be used during early boot while
691 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
692 * of local_irq_disable/enable().
693 */
694int on_each_cpu(void (*func) (void *info), void *info, int wait)
695{
696 unsigned long flags;
697 int ret = 0;
698
699 preempt_disable();
700 ret = smp_call_function(func, info, wait);
701 local_irq_save(flags);
702 func(info);
703 local_irq_restore(flags);
704 preempt_enable();
705 return ret;
706}
707EXPORT_SYMBOL(on_each_cpu);
708
709/**
710 * on_each_cpu_mask(): Run a function on processors specified by
711 * cpumask, which may include the local processor.
712 * @mask: The set of cpus to run on (only runs on online subset).
713 * @func: The function to run. This must be fast and non-blocking.
714 * @info: An arbitrary pointer to pass to the function.
715 * @wait: If true, wait (atomically) until function has completed
716 * on other CPUs.
717 *
718 * If @wait is true, then returns once @func has returned.
719 *
720 * You must not call this function with disabled interrupts or
721 * from a hardware interrupt handler or from a bottom half handler.
722 */
723void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
724 void *info, bool wait)
725{
726 int cpu = get_cpu();
727
728 smp_call_function_many(mask, func, info, wait);
729 if (cpumask_test_cpu(cpu, mask)) {
730 local_irq_disable();
731 func(info);
732 local_irq_enable();
733 }
734 put_cpu();
735}
736EXPORT_SYMBOL(on_each_cpu_mask);
737
738/*
739 * on_each_cpu_cond(): Call a function on each processor for which
740 * the supplied function cond_func returns true, optionally waiting
741 * for all the required CPUs to finish. This may include the local
742 * processor.
743 * @cond_func: A callback function that is passed a cpu id and
744 * the the info parameter. The function is called
745 * with preemption disabled. The function should
746 * return a blooean value indicating whether to IPI
747 * the specified CPU.
748 * @func: The function to run on all applicable CPUs.
749 * This must be fast and non-blocking.
750 * @info: An arbitrary pointer to pass to both functions.
751 * @wait: If true, wait (atomically) until function has
752 * completed on other CPUs.
753 * @gfp_flags: GFP flags to use when allocating the cpumask
754 * used internally by the function.
755 *
756 * The function might sleep if the GFP flags indicates a non
757 * atomic allocation is allowed.
758 *
759 * Preemption is disabled to protect against CPUs going offline but not online.
760 * CPUs going online during the call will not be seen or sent an IPI.
761 *
762 * You must not call this function with disabled interrupts or
763 * from a hardware interrupt handler or from a bottom half handler.
764 */
765void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
766 smp_call_func_t func, void *info, bool wait,
767 gfp_t gfp_flags)
768{
769 cpumask_var_t cpus;
770 int cpu, ret;
771
772 might_sleep_if(gfp_flags & __GFP_WAIT);
773
774 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
775 preempt_disable();
776 for_each_online_cpu(cpu)
777 if (cond_func(cpu, info))
778 cpumask_set_cpu(cpu, cpus);
779 on_each_cpu_mask(cpus, func, info, wait);
780 preempt_enable();
781 free_cpumask_var(cpus);
782 } else {
783 /*
784 * No free cpumask, bother. No matter, we'll
785 * just have to IPI them one by one.
786 */
787 preempt_disable();
788 for_each_online_cpu(cpu)
789 if (cond_func(cpu, info)) {
790 ret = smp_call_function_single(cpu, func,
791 info, wait);
792 WARN_ON_ONCE(!ret);
793 }
794 preempt_enable();
795 }
796}
797EXPORT_SYMBOL(on_each_cpu_cond);
798
799static void do_nothing(void *unused)
800{
801}
802
803/**
804 * kick_all_cpus_sync - Force all cpus out of idle
805 *
806 * Used to synchronize the update of pm_idle function pointer. It's
807 * called after the pointer is updated and returns after the dummy
808 * callback function has been executed on all cpus. The execution of
809 * the function can only happen on the remote cpus after they have
810 * left the idle function which had been called via pm_idle function
811 * pointer. So it's guaranteed that nothing uses the previous pointer
812 * anymore.
813 */
814void kick_all_cpus_sync(void)
815{
816 /* Make sure the change is visible before we kick the cpus */
817 smp_mb();
818 smp_call_function(do_nothing, NULL, 1);
819}
820EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/irq_work.h>
11#include <linux/rcupdate.h>
12#include <linux/rculist.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/gfp.h>
18#include <linux/smp.h>
19#include <linux/cpu.h>
20#include <linux/sched.h>
21#include <linux/sched/idle.h>
22#include <linux/hypervisor.h>
23
24#include "smpboot.h"
25#include "sched/smp.h"
26
27#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
28
29struct call_function_data {
30 call_single_data_t __percpu *csd;
31 cpumask_var_t cpumask;
32 cpumask_var_t cpumask_ipi;
33};
34
35static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
36
37static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
38
39static void flush_smp_call_function_queue(bool warn_cpu_offline);
40
41int smpcfd_prepare_cpu(unsigned int cpu)
42{
43 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
44
45 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
46 cpu_to_node(cpu)))
47 return -ENOMEM;
48 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
49 cpu_to_node(cpu))) {
50 free_cpumask_var(cfd->cpumask);
51 return -ENOMEM;
52 }
53 cfd->csd = alloc_percpu(call_single_data_t);
54 if (!cfd->csd) {
55 free_cpumask_var(cfd->cpumask);
56 free_cpumask_var(cfd->cpumask_ipi);
57 return -ENOMEM;
58 }
59
60 return 0;
61}
62
63int smpcfd_dead_cpu(unsigned int cpu)
64{
65 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
66
67 free_cpumask_var(cfd->cpumask);
68 free_cpumask_var(cfd->cpumask_ipi);
69 free_percpu(cfd->csd);
70 return 0;
71}
72
73int smpcfd_dying_cpu(unsigned int cpu)
74{
75 /*
76 * The IPIs for the smp-call-function callbacks queued by other
77 * CPUs might arrive late, either due to hardware latencies or
78 * because this CPU disabled interrupts (inside stop-machine)
79 * before the IPIs were sent. So flush out any pending callbacks
80 * explicitly (without waiting for the IPIs to arrive), to
81 * ensure that the outgoing CPU doesn't go offline with work
82 * still pending.
83 */
84 flush_smp_call_function_queue(false);
85 irq_work_run();
86 return 0;
87}
88
89void __init call_function_init(void)
90{
91 int i;
92
93 for_each_possible_cpu(i)
94 init_llist_head(&per_cpu(call_single_queue, i));
95
96 smpcfd_prepare_cpu(smp_processor_id());
97}
98
99/*
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
101 *
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
105 */
106static __always_inline void csd_lock_wait(call_single_data_t *csd)
107{
108 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
109}
110
111static __always_inline void csd_lock(call_single_data_t *csd)
112{
113 csd_lock_wait(csd);
114 csd->flags |= CSD_FLAG_LOCK;
115
116 /*
117 * prevent CPU from reordering the above assignment
118 * to ->flags with any subsequent assignments to other
119 * fields of the specified call_single_data_t structure:
120 */
121 smp_wmb();
122}
123
124static __always_inline void csd_unlock(call_single_data_t *csd)
125{
126 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
127
128 /*
129 * ensure we're all done before releasing data:
130 */
131 smp_store_release(&csd->flags, 0);
132}
133
134static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135
136void __smp_call_single_queue(int cpu, struct llist_node *node)
137{
138 /*
139 * The list addition should be visible before sending the IPI
140 * handler locks the list to pull the entry off it because of
141 * normal cache coherency rules implied by spinlocks.
142 *
143 * If IPIs can go out of order to the cache coherency protocol
144 * in an architecture, sufficient synchronisation should be added
145 * to arch code to make it appear to obey cache coherency WRT
146 * locking and barrier primitives. Generic code isn't really
147 * equipped to do the right thing...
148 */
149 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
150 send_call_function_single_ipi(cpu);
151}
152
153/*
154 * Insert a previously allocated call_single_data_t element
155 * for execution on the given CPU. data must already have
156 * ->func, ->info, and ->flags set.
157 */
158static int generic_exec_single(int cpu, call_single_data_t *csd)
159{
160 if (cpu == smp_processor_id()) {
161 smp_call_func_t func = csd->func;
162 void *info = csd->info;
163 unsigned long flags;
164
165 /*
166 * We can unlock early even for the synchronous on-stack case,
167 * since we're doing this from the same CPU..
168 */
169 csd_unlock(csd);
170 local_irq_save(flags);
171 func(info);
172 local_irq_restore(flags);
173 return 0;
174 }
175
176 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
177 csd_unlock(csd);
178 return -ENXIO;
179 }
180
181 __smp_call_single_queue(cpu, &csd->llist);
182
183 return 0;
184}
185
186/**
187 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
188 *
189 * Invoked by arch to handle an IPI for call function single.
190 * Must be called with interrupts disabled.
191 */
192void generic_smp_call_function_single_interrupt(void)
193{
194 flush_smp_call_function_queue(true);
195}
196
197/**
198 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
199 *
200 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
201 * offline CPU. Skip this check if set to 'false'.
202 *
203 * Flush any pending smp-call-function callbacks queued on this CPU. This is
204 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
205 * to ensure that all pending IPI callbacks are run before it goes completely
206 * offline.
207 *
208 * Loop through the call_single_queue and run all the queued callbacks.
209 * Must be called with interrupts disabled.
210 */
211static void flush_smp_call_function_queue(bool warn_cpu_offline)
212{
213 call_single_data_t *csd, *csd_next;
214 struct llist_node *entry, *prev;
215 struct llist_head *head;
216 static bool warned;
217
218 lockdep_assert_irqs_disabled();
219
220 head = this_cpu_ptr(&call_single_queue);
221 entry = llist_del_all(head);
222 entry = llist_reverse_order(entry);
223
224 /* There shouldn't be any pending callbacks on an offline CPU. */
225 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
226 !warned && !llist_empty(head))) {
227 warned = true;
228 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
229
230 /*
231 * We don't have to use the _safe() variant here
232 * because we are not invoking the IPI handlers yet.
233 */
234 llist_for_each_entry(csd, entry, llist) {
235 switch (CSD_TYPE(csd)) {
236 case CSD_TYPE_ASYNC:
237 case CSD_TYPE_SYNC:
238 case CSD_TYPE_IRQ_WORK:
239 pr_warn("IPI callback %pS sent to offline CPU\n",
240 csd->func);
241 break;
242
243 case CSD_TYPE_TTWU:
244 pr_warn("IPI task-wakeup sent to offline CPU\n");
245 break;
246
247 default:
248 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
249 CSD_TYPE(csd));
250 break;
251 }
252 }
253 }
254
255 /*
256 * First; run all SYNC callbacks, people are waiting for us.
257 */
258 prev = NULL;
259 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
260 /* Do we wait until *after* callback? */
261 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
262 smp_call_func_t func = csd->func;
263 void *info = csd->info;
264
265 if (prev) {
266 prev->next = &csd_next->llist;
267 } else {
268 entry = &csd_next->llist;
269 }
270
271 func(info);
272 csd_unlock(csd);
273 } else {
274 prev = &csd->llist;
275 }
276 }
277
278 if (!entry)
279 return;
280
281 /*
282 * Second; run all !SYNC callbacks.
283 */
284 prev = NULL;
285 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
286 int type = CSD_TYPE(csd);
287
288 if (type != CSD_TYPE_TTWU) {
289 if (prev) {
290 prev->next = &csd_next->llist;
291 } else {
292 entry = &csd_next->llist;
293 }
294
295 if (type == CSD_TYPE_ASYNC) {
296 smp_call_func_t func = csd->func;
297 void *info = csd->info;
298
299 csd_unlock(csd);
300 func(info);
301 } else if (type == CSD_TYPE_IRQ_WORK) {
302 irq_work_single(csd);
303 }
304
305 } else {
306 prev = &csd->llist;
307 }
308 }
309
310 /*
311 * Third; only CSD_TYPE_TTWU is left, issue those.
312 */
313 if (entry)
314 sched_ttwu_pending(entry);
315}
316
317void flush_smp_call_function_from_idle(void)
318{
319 unsigned long flags;
320
321 if (llist_empty(this_cpu_ptr(&call_single_queue)))
322 return;
323
324 local_irq_save(flags);
325 flush_smp_call_function_queue(true);
326 local_irq_restore(flags);
327}
328
329/*
330 * smp_call_function_single - Run a function on a specific CPU
331 * @func: The function to run. This must be fast and non-blocking.
332 * @info: An arbitrary pointer to pass to the function.
333 * @wait: If true, wait until function has completed on other CPUs.
334 *
335 * Returns 0 on success, else a negative status code.
336 */
337int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
338 int wait)
339{
340 call_single_data_t *csd;
341 call_single_data_t csd_stack = {
342 .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
343 };
344 int this_cpu;
345 int err;
346
347 /*
348 * prevent preemption and reschedule on another processor,
349 * as well as CPU removal
350 */
351 this_cpu = get_cpu();
352
353 /*
354 * Can deadlock when called with interrupts disabled.
355 * We allow cpu's that are not yet online though, as no one else can
356 * send smp call function interrupt to this cpu and as such deadlocks
357 * can't happen.
358 */
359 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
360 && !oops_in_progress);
361
362 /*
363 * When @wait we can deadlock when we interrupt between llist_add() and
364 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
365 * csd_lock() on because the interrupt context uses the same csd
366 * storage.
367 */
368 WARN_ON_ONCE(!in_task());
369
370 csd = &csd_stack;
371 if (!wait) {
372 csd = this_cpu_ptr(&csd_data);
373 csd_lock(csd);
374 }
375
376 csd->func = func;
377 csd->info = info;
378
379 err = generic_exec_single(cpu, csd);
380
381 if (wait)
382 csd_lock_wait(csd);
383
384 put_cpu();
385
386 return err;
387}
388EXPORT_SYMBOL(smp_call_function_single);
389
390/**
391 * smp_call_function_single_async(): Run an asynchronous function on a
392 * specific CPU.
393 * @cpu: The CPU to run on.
394 * @csd: Pre-allocated and setup data structure
395 *
396 * Like smp_call_function_single(), but the call is asynchonous and
397 * can thus be done from contexts with disabled interrupts.
398 *
399 * The caller passes his own pre-allocated data structure
400 * (ie: embedded in an object) and is responsible for synchronizing it
401 * such that the IPIs performed on the @csd are strictly serialized.
402 *
403 * If the function is called with one csd which has not yet been
404 * processed by previous call to smp_call_function_single_async(), the
405 * function will return immediately with -EBUSY showing that the csd
406 * object is still in progress.
407 *
408 * NOTE: Be careful, there is unfortunately no current debugging facility to
409 * validate the correctness of this serialization.
410 */
411int smp_call_function_single_async(int cpu, call_single_data_t *csd)
412{
413 int err = 0;
414
415 preempt_disable();
416
417 if (csd->flags & CSD_FLAG_LOCK) {
418 err = -EBUSY;
419 goto out;
420 }
421
422 csd->flags = CSD_FLAG_LOCK;
423 smp_wmb();
424
425 err = generic_exec_single(cpu, csd);
426
427out:
428 preempt_enable();
429
430 return err;
431}
432EXPORT_SYMBOL_GPL(smp_call_function_single_async);
433
434/*
435 * smp_call_function_any - Run a function on any of the given cpus
436 * @mask: The mask of cpus it can run on.
437 * @func: The function to run. This must be fast and non-blocking.
438 * @info: An arbitrary pointer to pass to the function.
439 * @wait: If true, wait until function has completed.
440 *
441 * Returns 0 on success, else a negative status code (if no cpus were online).
442 *
443 * Selection preference:
444 * 1) current cpu if in @mask
445 * 2) any cpu of current node if in @mask
446 * 3) any other online cpu in @mask
447 */
448int smp_call_function_any(const struct cpumask *mask,
449 smp_call_func_t func, void *info, int wait)
450{
451 unsigned int cpu;
452 const struct cpumask *nodemask;
453 int ret;
454
455 /* Try for same CPU (cheapest) */
456 cpu = get_cpu();
457 if (cpumask_test_cpu(cpu, mask))
458 goto call;
459
460 /* Try for same node. */
461 nodemask = cpumask_of_node(cpu_to_node(cpu));
462 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
463 cpu = cpumask_next_and(cpu, nodemask, mask)) {
464 if (cpu_online(cpu))
465 goto call;
466 }
467
468 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
469 cpu = cpumask_any_and(mask, cpu_online_mask);
470call:
471 ret = smp_call_function_single(cpu, func, info, wait);
472 put_cpu();
473 return ret;
474}
475EXPORT_SYMBOL_GPL(smp_call_function_any);
476
477static void smp_call_function_many_cond(const struct cpumask *mask,
478 smp_call_func_t func, void *info,
479 bool wait, smp_cond_func_t cond_func)
480{
481 struct call_function_data *cfd;
482 int cpu, next_cpu, this_cpu = smp_processor_id();
483
484 /*
485 * Can deadlock when called with interrupts disabled.
486 * We allow cpu's that are not yet online though, as no one else can
487 * send smp call function interrupt to this cpu and as such deadlocks
488 * can't happen.
489 */
490 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
491 && !oops_in_progress && !early_boot_irqs_disabled);
492
493 /*
494 * When @wait we can deadlock when we interrupt between llist_add() and
495 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
496 * csd_lock() on because the interrupt context uses the same csd
497 * storage.
498 */
499 WARN_ON_ONCE(!in_task());
500
501 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
502 cpu = cpumask_first_and(mask, cpu_online_mask);
503 if (cpu == this_cpu)
504 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
505
506 /* No online cpus? We're done. */
507 if (cpu >= nr_cpu_ids)
508 return;
509
510 /* Do we have another CPU which isn't us? */
511 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
512 if (next_cpu == this_cpu)
513 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
514
515 /* Fastpath: do that cpu by itself. */
516 if (next_cpu >= nr_cpu_ids) {
517 if (!cond_func || cond_func(cpu, info))
518 smp_call_function_single(cpu, func, info, wait);
519 return;
520 }
521
522 cfd = this_cpu_ptr(&cfd_data);
523
524 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
525 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
526
527 /* Some callers race with other cpus changing the passed mask */
528 if (unlikely(!cpumask_weight(cfd->cpumask)))
529 return;
530
531 cpumask_clear(cfd->cpumask_ipi);
532 for_each_cpu(cpu, cfd->cpumask) {
533 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
534
535 if (cond_func && !cond_func(cpu, info))
536 continue;
537
538 csd_lock(csd);
539 if (wait)
540 csd->flags |= CSD_TYPE_SYNC;
541 csd->func = func;
542 csd->info = info;
543 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
544 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
545 }
546
547 /* Send a message to all CPUs in the map */
548 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
549
550 if (wait) {
551 for_each_cpu(cpu, cfd->cpumask) {
552 call_single_data_t *csd;
553
554 csd = per_cpu_ptr(cfd->csd, cpu);
555 csd_lock_wait(csd);
556 }
557 }
558}
559
560/**
561 * smp_call_function_many(): Run a function on a set of other CPUs.
562 * @mask: The set of cpus to run on (only runs on online subset).
563 * @func: The function to run. This must be fast and non-blocking.
564 * @info: An arbitrary pointer to pass to the function.
565 * @wait: If true, wait (atomically) until function has completed
566 * on other CPUs.
567 *
568 * If @wait is true, then returns once @func has returned.
569 *
570 * You must not call this function with disabled interrupts or from a
571 * hardware interrupt handler or from a bottom half handler. Preemption
572 * must be disabled when calling this function.
573 */
574void smp_call_function_many(const struct cpumask *mask,
575 smp_call_func_t func, void *info, bool wait)
576{
577 smp_call_function_many_cond(mask, func, info, wait, NULL);
578}
579EXPORT_SYMBOL(smp_call_function_many);
580
581/**
582 * smp_call_function(): Run a function on all other CPUs.
583 * @func: The function to run. This must be fast and non-blocking.
584 * @info: An arbitrary pointer to pass to the function.
585 * @wait: If true, wait (atomically) until function has completed
586 * on other CPUs.
587 *
588 * Returns 0.
589 *
590 * If @wait is true, then returns once @func has returned; otherwise
591 * it returns just before the target cpu calls @func.
592 *
593 * You must not call this function with disabled interrupts or from a
594 * hardware interrupt handler or from a bottom half handler.
595 */
596void smp_call_function(smp_call_func_t func, void *info, int wait)
597{
598 preempt_disable();
599 smp_call_function_many(cpu_online_mask, func, info, wait);
600 preempt_enable();
601}
602EXPORT_SYMBOL(smp_call_function);
603
604/* Setup configured maximum number of CPUs to activate */
605unsigned int setup_max_cpus = NR_CPUS;
606EXPORT_SYMBOL(setup_max_cpus);
607
608
609/*
610 * Setup routine for controlling SMP activation
611 *
612 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
613 * activation entirely (the MPS table probe still happens, though).
614 *
615 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
616 * greater than 0, limits the maximum number of CPUs activated in
617 * SMP mode to <NUM>.
618 */
619
620void __weak arch_disable_smp_support(void) { }
621
622static int __init nosmp(char *str)
623{
624 setup_max_cpus = 0;
625 arch_disable_smp_support();
626
627 return 0;
628}
629
630early_param("nosmp", nosmp);
631
632/* this is hard limit */
633static int __init nrcpus(char *str)
634{
635 int nr_cpus;
636
637 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
638 nr_cpu_ids = nr_cpus;
639
640 return 0;
641}
642
643early_param("nr_cpus", nrcpus);
644
645static int __init maxcpus(char *str)
646{
647 get_option(&str, &setup_max_cpus);
648 if (setup_max_cpus == 0)
649 arch_disable_smp_support();
650
651 return 0;
652}
653
654early_param("maxcpus", maxcpus);
655
656/* Setup number of possible processor ids */
657unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
658EXPORT_SYMBOL(nr_cpu_ids);
659
660/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
661void __init setup_nr_cpu_ids(void)
662{
663 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
664}
665
666/* Called by boot processor to activate the rest. */
667void __init smp_init(void)
668{
669 int num_nodes, num_cpus;
670
671 idle_threads_init();
672 cpuhp_threads_init();
673
674 pr_info("Bringing up secondary CPUs ...\n");
675
676 bringup_nonboot_cpus(setup_max_cpus);
677
678 num_nodes = num_online_nodes();
679 num_cpus = num_online_cpus();
680 pr_info("Brought up %d node%s, %d CPU%s\n",
681 num_nodes, (num_nodes > 1 ? "s" : ""),
682 num_cpus, (num_cpus > 1 ? "s" : ""));
683
684 /* Any cleanup work */
685 smp_cpus_done(setup_max_cpus);
686}
687
688/*
689 * Call a function on all processors. May be used during early boot while
690 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
691 * of local_irq_disable/enable().
692 */
693void on_each_cpu(smp_call_func_t func, void *info, int wait)
694{
695 unsigned long flags;
696
697 preempt_disable();
698 smp_call_function(func, info, wait);
699 local_irq_save(flags);
700 func(info);
701 local_irq_restore(flags);
702 preempt_enable();
703}
704EXPORT_SYMBOL(on_each_cpu);
705
706/**
707 * on_each_cpu_mask(): Run a function on processors specified by
708 * cpumask, which may include the local processor.
709 * @mask: The set of cpus to run on (only runs on online subset).
710 * @func: The function to run. This must be fast and non-blocking.
711 * @info: An arbitrary pointer to pass to the function.
712 * @wait: If true, wait (atomically) until function has completed
713 * on other CPUs.
714 *
715 * If @wait is true, then returns once @func has returned.
716 *
717 * You must not call this function with disabled interrupts or from a
718 * hardware interrupt handler or from a bottom half handler. The
719 * exception is that it may be used during early boot while
720 * early_boot_irqs_disabled is set.
721 */
722void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
723 void *info, bool wait)
724{
725 int cpu = get_cpu();
726
727 smp_call_function_many(mask, func, info, wait);
728 if (cpumask_test_cpu(cpu, mask)) {
729 unsigned long flags;
730 local_irq_save(flags);
731 func(info);
732 local_irq_restore(flags);
733 }
734 put_cpu();
735}
736EXPORT_SYMBOL(on_each_cpu_mask);
737
738/*
739 * on_each_cpu_cond(): Call a function on each processor for which
740 * the supplied function cond_func returns true, optionally waiting
741 * for all the required CPUs to finish. This may include the local
742 * processor.
743 * @cond_func: A callback function that is passed a cpu id and
744 * the the info parameter. The function is called
745 * with preemption disabled. The function should
746 * return a blooean value indicating whether to IPI
747 * the specified CPU.
748 * @func: The function to run on all applicable CPUs.
749 * This must be fast and non-blocking.
750 * @info: An arbitrary pointer to pass to both functions.
751 * @wait: If true, wait (atomically) until function has
752 * completed on other CPUs.
753 *
754 * Preemption is disabled to protect against CPUs going offline but not online.
755 * CPUs going online during the call will not be seen or sent an IPI.
756 *
757 * You must not call this function with disabled interrupts or
758 * from a hardware interrupt handler or from a bottom half handler.
759 */
760void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
761 void *info, bool wait, const struct cpumask *mask)
762{
763 int cpu = get_cpu();
764
765 smp_call_function_many_cond(mask, func, info, wait, cond_func);
766 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
767 unsigned long flags;
768
769 local_irq_save(flags);
770 func(info);
771 local_irq_restore(flags);
772 }
773 put_cpu();
774}
775EXPORT_SYMBOL(on_each_cpu_cond_mask);
776
777void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
778 void *info, bool wait)
779{
780 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
781}
782EXPORT_SYMBOL(on_each_cpu_cond);
783
784static void do_nothing(void *unused)
785{
786}
787
788/**
789 * kick_all_cpus_sync - Force all cpus out of idle
790 *
791 * Used to synchronize the update of pm_idle function pointer. It's
792 * called after the pointer is updated and returns after the dummy
793 * callback function has been executed on all cpus. The execution of
794 * the function can only happen on the remote cpus after they have
795 * left the idle function which had been called via pm_idle function
796 * pointer. So it's guaranteed that nothing uses the previous pointer
797 * anymore.
798 */
799void kick_all_cpus_sync(void)
800{
801 /* Make sure the change is visible before we kick the cpus */
802 smp_mb();
803 smp_call_function(do_nothing, NULL, 1);
804}
805EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
806
807/**
808 * wake_up_all_idle_cpus - break all cpus out of idle
809 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
810 * including idle polling cpus, for non-idle cpus, we will do nothing
811 * for them.
812 */
813void wake_up_all_idle_cpus(void)
814{
815 int cpu;
816
817 preempt_disable();
818 for_each_online_cpu(cpu) {
819 if (cpu == smp_processor_id())
820 continue;
821
822 wake_up_if_idle(cpu);
823 }
824 preempt_enable();
825}
826EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
827
828/**
829 * smp_call_on_cpu - Call a function on a specific cpu
830 *
831 * Used to call a function on a specific cpu and wait for it to return.
832 * Optionally make sure the call is done on a specified physical cpu via vcpu
833 * pinning in order to support virtualized environments.
834 */
835struct smp_call_on_cpu_struct {
836 struct work_struct work;
837 struct completion done;
838 int (*func)(void *);
839 void *data;
840 int ret;
841 int cpu;
842};
843
844static void smp_call_on_cpu_callback(struct work_struct *work)
845{
846 struct smp_call_on_cpu_struct *sscs;
847
848 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
849 if (sscs->cpu >= 0)
850 hypervisor_pin_vcpu(sscs->cpu);
851 sscs->ret = sscs->func(sscs->data);
852 if (sscs->cpu >= 0)
853 hypervisor_pin_vcpu(-1);
854
855 complete(&sscs->done);
856}
857
858int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
859{
860 struct smp_call_on_cpu_struct sscs = {
861 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
862 .func = func,
863 .data = par,
864 .cpu = phys ? cpu : -1,
865 };
866
867 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
868
869 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
870 return -ENXIO;
871
872 queue_work_on(cpu, system_wq, &sscs.work);
873 wait_for_completion(&sscs.done);
874
875 return sscs.ret;
876}
877EXPORT_SYMBOL_GPL(smp_call_on_cpu);