Loading...
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/irq_work.h>
10#include <linux/rcupdate.h>
11#include <linux/rculist.h>
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/gfp.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
19#include <linux/sched.h>
20#include <linux/hypervisor.h>
21
22#include "smpboot.h"
23
24enum {
25 CSD_FLAG_LOCK = 0x01,
26 CSD_FLAG_SYNCHRONOUS = 0x02,
27};
28
29struct call_function_data {
30 struct call_single_data __percpu *csd;
31 cpumask_var_t cpumask;
32};
33
34static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
37
38static void flush_smp_call_function_queue(bool warn_cpu_offline);
39
40int smpcfd_prepare_cpu(unsigned int cpu)
41{
42 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
43
44 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
45 cpu_to_node(cpu)))
46 return -ENOMEM;
47 cfd->csd = alloc_percpu(struct call_single_data);
48 if (!cfd->csd) {
49 free_cpumask_var(cfd->cpumask);
50 return -ENOMEM;
51 }
52
53 return 0;
54}
55
56int smpcfd_dead_cpu(unsigned int cpu)
57{
58 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
59
60 free_cpumask_var(cfd->cpumask);
61 free_percpu(cfd->csd);
62 return 0;
63}
64
65int smpcfd_dying_cpu(unsigned int cpu)
66{
67 /*
68 * The IPIs for the smp-call-function callbacks queued by other
69 * CPUs might arrive late, either due to hardware latencies or
70 * because this CPU disabled interrupts (inside stop-machine)
71 * before the IPIs were sent. So flush out any pending callbacks
72 * explicitly (without waiting for the IPIs to arrive), to
73 * ensure that the outgoing CPU doesn't go offline with work
74 * still pending.
75 */
76 flush_smp_call_function_queue(false);
77 return 0;
78}
79
80void __init call_function_init(void)
81{
82 int i;
83
84 for_each_possible_cpu(i)
85 init_llist_head(&per_cpu(call_single_queue, i));
86
87 smpcfd_prepare_cpu(smp_processor_id());
88}
89
90/*
91 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
92 *
93 * For non-synchronous ipi calls the csd can still be in use by the
94 * previous function call. For multi-cpu calls its even more interesting
95 * as we'll have to ensure no other cpu is observing our csd.
96 */
97static __always_inline void csd_lock_wait(struct call_single_data *csd)
98{
99 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
100}
101
102static __always_inline void csd_lock(struct call_single_data *csd)
103{
104 csd_lock_wait(csd);
105 csd->flags |= CSD_FLAG_LOCK;
106
107 /*
108 * prevent CPU from reordering the above assignment
109 * to ->flags with any subsequent assignments to other
110 * fields of the specified call_single_data structure:
111 */
112 smp_wmb();
113}
114
115static __always_inline void csd_unlock(struct call_single_data *csd)
116{
117 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
118
119 /*
120 * ensure we're all done before releasing data:
121 */
122 smp_store_release(&csd->flags, 0);
123}
124
125static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
126
127/*
128 * Insert a previously allocated call_single_data element
129 * for execution on the given CPU. data must already have
130 * ->func, ->info, and ->flags set.
131 */
132static int generic_exec_single(int cpu, struct call_single_data *csd,
133 smp_call_func_t func, void *info)
134{
135 if (cpu == smp_processor_id()) {
136 unsigned long flags;
137
138 /*
139 * We can unlock early even for the synchronous on-stack case,
140 * since we're doing this from the same CPU..
141 */
142 csd_unlock(csd);
143 local_irq_save(flags);
144 func(info);
145 local_irq_restore(flags);
146 return 0;
147 }
148
149
150 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
151 csd_unlock(csd);
152 return -ENXIO;
153 }
154
155 csd->func = func;
156 csd->info = info;
157
158 /*
159 * The list addition should be visible before sending the IPI
160 * handler locks the list to pull the entry off it because of
161 * normal cache coherency rules implied by spinlocks.
162 *
163 * If IPIs can go out of order to the cache coherency protocol
164 * in an architecture, sufficient synchronisation should be added
165 * to arch code to make it appear to obey cache coherency WRT
166 * locking and barrier primitives. Generic code isn't really
167 * equipped to do the right thing...
168 */
169 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
170 arch_send_call_function_single_ipi(cpu);
171
172 return 0;
173}
174
175/**
176 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
177 *
178 * Invoked by arch to handle an IPI for call function single.
179 * Must be called with interrupts disabled.
180 */
181void generic_smp_call_function_single_interrupt(void)
182{
183 flush_smp_call_function_queue(true);
184}
185
186/**
187 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
188 *
189 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
190 * offline CPU. Skip this check if set to 'false'.
191 *
192 * Flush any pending smp-call-function callbacks queued on this CPU. This is
193 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
194 * to ensure that all pending IPI callbacks are run before it goes completely
195 * offline.
196 *
197 * Loop through the call_single_queue and run all the queued callbacks.
198 * Must be called with interrupts disabled.
199 */
200static void flush_smp_call_function_queue(bool warn_cpu_offline)
201{
202 struct llist_head *head;
203 struct llist_node *entry;
204 struct call_single_data *csd, *csd_next;
205 static bool warned;
206
207 WARN_ON(!irqs_disabled());
208
209 head = this_cpu_ptr(&call_single_queue);
210 entry = llist_del_all(head);
211 entry = llist_reverse_order(entry);
212
213 /* There shouldn't be any pending callbacks on an offline CPU. */
214 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
215 !warned && !llist_empty(head))) {
216 warned = true;
217 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
218
219 /*
220 * We don't have to use the _safe() variant here
221 * because we are not invoking the IPI handlers yet.
222 */
223 llist_for_each_entry(csd, entry, llist)
224 pr_warn("IPI callback %pS sent to offline CPU\n",
225 csd->func);
226 }
227
228 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
229 smp_call_func_t func = csd->func;
230 void *info = csd->info;
231
232 /* Do we wait until *after* callback? */
233 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
234 func(info);
235 csd_unlock(csd);
236 } else {
237 csd_unlock(csd);
238 func(info);
239 }
240 }
241
242 /*
243 * Handle irq works queued remotely by irq_work_queue_on().
244 * Smp functions above are typically synchronous so they
245 * better run first since some other CPUs may be busy waiting
246 * for them.
247 */
248 irq_work_run();
249}
250
251/*
252 * smp_call_function_single - Run a function on a specific CPU
253 * @func: The function to run. This must be fast and non-blocking.
254 * @info: An arbitrary pointer to pass to the function.
255 * @wait: If true, wait until function has completed on other CPUs.
256 *
257 * Returns 0 on success, else a negative status code.
258 */
259int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
260 int wait)
261{
262 struct call_single_data *csd;
263 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
264 int this_cpu;
265 int err;
266
267 /*
268 * prevent preemption and reschedule on another processor,
269 * as well as CPU removal
270 */
271 this_cpu = get_cpu();
272
273 /*
274 * Can deadlock when called with interrupts disabled.
275 * We allow cpu's that are not yet online though, as no one else can
276 * send smp call function interrupt to this cpu and as such deadlocks
277 * can't happen.
278 */
279 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
280 && !oops_in_progress);
281
282 csd = &csd_stack;
283 if (!wait) {
284 csd = this_cpu_ptr(&csd_data);
285 csd_lock(csd);
286 }
287
288 err = generic_exec_single(cpu, csd, func, info);
289
290 if (wait)
291 csd_lock_wait(csd);
292
293 put_cpu();
294
295 return err;
296}
297EXPORT_SYMBOL(smp_call_function_single);
298
299/**
300 * smp_call_function_single_async(): Run an asynchronous function on a
301 * specific CPU.
302 * @cpu: The CPU to run on.
303 * @csd: Pre-allocated and setup data structure
304 *
305 * Like smp_call_function_single(), but the call is asynchonous and
306 * can thus be done from contexts with disabled interrupts.
307 *
308 * The caller passes his own pre-allocated data structure
309 * (ie: embedded in an object) and is responsible for synchronizing it
310 * such that the IPIs performed on the @csd are strictly serialized.
311 *
312 * NOTE: Be careful, there is unfortunately no current debugging facility to
313 * validate the correctness of this serialization.
314 */
315int smp_call_function_single_async(int cpu, struct call_single_data *csd)
316{
317 int err = 0;
318
319 preempt_disable();
320
321 /* We could deadlock if we have to wait here with interrupts disabled! */
322 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
323 csd_lock_wait(csd);
324
325 csd->flags = CSD_FLAG_LOCK;
326 smp_wmb();
327
328 err = generic_exec_single(cpu, csd, csd->func, csd->info);
329 preempt_enable();
330
331 return err;
332}
333EXPORT_SYMBOL_GPL(smp_call_function_single_async);
334
335/*
336 * smp_call_function_any - Run a function on any of the given cpus
337 * @mask: The mask of cpus it can run on.
338 * @func: The function to run. This must be fast and non-blocking.
339 * @info: An arbitrary pointer to pass to the function.
340 * @wait: If true, wait until function has completed.
341 *
342 * Returns 0 on success, else a negative status code (if no cpus were online).
343 *
344 * Selection preference:
345 * 1) current cpu if in @mask
346 * 2) any cpu of current node if in @mask
347 * 3) any other online cpu in @mask
348 */
349int smp_call_function_any(const struct cpumask *mask,
350 smp_call_func_t func, void *info, int wait)
351{
352 unsigned int cpu;
353 const struct cpumask *nodemask;
354 int ret;
355
356 /* Try for same CPU (cheapest) */
357 cpu = get_cpu();
358 if (cpumask_test_cpu(cpu, mask))
359 goto call;
360
361 /* Try for same node. */
362 nodemask = cpumask_of_node(cpu_to_node(cpu));
363 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
364 cpu = cpumask_next_and(cpu, nodemask, mask)) {
365 if (cpu_online(cpu))
366 goto call;
367 }
368
369 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
370 cpu = cpumask_any_and(mask, cpu_online_mask);
371call:
372 ret = smp_call_function_single(cpu, func, info, wait);
373 put_cpu();
374 return ret;
375}
376EXPORT_SYMBOL_GPL(smp_call_function_any);
377
378/**
379 * smp_call_function_many(): Run a function on a set of other CPUs.
380 * @mask: The set of cpus to run on (only runs on online subset).
381 * @func: The function to run. This must be fast and non-blocking.
382 * @info: An arbitrary pointer to pass to the function.
383 * @wait: If true, wait (atomically) until function has completed
384 * on other CPUs.
385 *
386 * If @wait is true, then returns once @func has returned.
387 *
388 * You must not call this function with disabled interrupts or from a
389 * hardware interrupt handler or from a bottom half handler. Preemption
390 * must be disabled when calling this function.
391 */
392void smp_call_function_many(const struct cpumask *mask,
393 smp_call_func_t func, void *info, bool wait)
394{
395 struct call_function_data *cfd;
396 int cpu, next_cpu, this_cpu = smp_processor_id();
397
398 /*
399 * Can deadlock when called with interrupts disabled.
400 * We allow cpu's that are not yet online though, as no one else can
401 * send smp call function interrupt to this cpu and as such deadlocks
402 * can't happen.
403 */
404 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
405 && !oops_in_progress && !early_boot_irqs_disabled);
406
407 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
408 cpu = cpumask_first_and(mask, cpu_online_mask);
409 if (cpu == this_cpu)
410 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
411
412 /* No online cpus? We're done. */
413 if (cpu >= nr_cpu_ids)
414 return;
415
416 /* Do we have another CPU which isn't us? */
417 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
418 if (next_cpu == this_cpu)
419 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
420
421 /* Fastpath: do that cpu by itself. */
422 if (next_cpu >= nr_cpu_ids) {
423 smp_call_function_single(cpu, func, info, wait);
424 return;
425 }
426
427 cfd = this_cpu_ptr(&cfd_data);
428
429 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
430 cpumask_clear_cpu(this_cpu, cfd->cpumask);
431
432 /* Some callers race with other cpus changing the passed mask */
433 if (unlikely(!cpumask_weight(cfd->cpumask)))
434 return;
435
436 for_each_cpu(cpu, cfd->cpumask) {
437 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
438
439 csd_lock(csd);
440 if (wait)
441 csd->flags |= CSD_FLAG_SYNCHRONOUS;
442 csd->func = func;
443 csd->info = info;
444 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
445 }
446
447 /* Send a message to all CPUs in the map */
448 arch_send_call_function_ipi_mask(cfd->cpumask);
449
450 if (wait) {
451 for_each_cpu(cpu, cfd->cpumask) {
452 struct call_single_data *csd;
453
454 csd = per_cpu_ptr(cfd->csd, cpu);
455 csd_lock_wait(csd);
456 }
457 }
458}
459EXPORT_SYMBOL(smp_call_function_many);
460
461/**
462 * smp_call_function(): Run a function on all other CPUs.
463 * @func: The function to run. This must be fast and non-blocking.
464 * @info: An arbitrary pointer to pass to the function.
465 * @wait: If true, wait (atomically) until function has completed
466 * on other CPUs.
467 *
468 * Returns 0.
469 *
470 * If @wait is true, then returns once @func has returned; otherwise
471 * it returns just before the target cpu calls @func.
472 *
473 * You must not call this function with disabled interrupts or from a
474 * hardware interrupt handler or from a bottom half handler.
475 */
476int smp_call_function(smp_call_func_t func, void *info, int wait)
477{
478 preempt_disable();
479 smp_call_function_many(cpu_online_mask, func, info, wait);
480 preempt_enable();
481
482 return 0;
483}
484EXPORT_SYMBOL(smp_call_function);
485
486/* Setup configured maximum number of CPUs to activate */
487unsigned int setup_max_cpus = NR_CPUS;
488EXPORT_SYMBOL(setup_max_cpus);
489
490
491/*
492 * Setup routine for controlling SMP activation
493 *
494 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
495 * activation entirely (the MPS table probe still happens, though).
496 *
497 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
498 * greater than 0, limits the maximum number of CPUs activated in
499 * SMP mode to <NUM>.
500 */
501
502void __weak arch_disable_smp_support(void) { }
503
504static int __init nosmp(char *str)
505{
506 setup_max_cpus = 0;
507 arch_disable_smp_support();
508
509 return 0;
510}
511
512early_param("nosmp", nosmp);
513
514/* this is hard limit */
515static int __init nrcpus(char *str)
516{
517 int nr_cpus;
518
519 get_option(&str, &nr_cpus);
520 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
521 nr_cpu_ids = nr_cpus;
522
523 return 0;
524}
525
526early_param("nr_cpus", nrcpus);
527
528static int __init maxcpus(char *str)
529{
530 get_option(&str, &setup_max_cpus);
531 if (setup_max_cpus == 0)
532 arch_disable_smp_support();
533
534 return 0;
535}
536
537early_param("maxcpus", maxcpus);
538
539/* Setup number of possible processor ids */
540int nr_cpu_ids __read_mostly = NR_CPUS;
541EXPORT_SYMBOL(nr_cpu_ids);
542
543/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
544void __init setup_nr_cpu_ids(void)
545{
546 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
547}
548
549/* Called by boot processor to activate the rest. */
550void __init smp_init(void)
551{
552 int num_nodes, num_cpus;
553 unsigned int cpu;
554
555 idle_threads_init();
556 cpuhp_threads_init();
557
558 pr_info("Bringing up secondary CPUs ...\n");
559
560 /* FIXME: This should be done in userspace --RR */
561 for_each_present_cpu(cpu) {
562 if (num_online_cpus() >= setup_max_cpus)
563 break;
564 if (!cpu_online(cpu))
565 cpu_up(cpu);
566 }
567
568 num_nodes = num_online_nodes();
569 num_cpus = num_online_cpus();
570 pr_info("Brought up %d node%s, %d CPU%s\n",
571 num_nodes, (num_nodes > 1 ? "s" : ""),
572 num_cpus, (num_cpus > 1 ? "s" : ""));
573
574 /* Any cleanup work */
575 smp_cpus_done(setup_max_cpus);
576}
577
578/*
579 * Call a function on all processors. May be used during early boot while
580 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
581 * of local_irq_disable/enable().
582 */
583int on_each_cpu(void (*func) (void *info), void *info, int wait)
584{
585 unsigned long flags;
586 int ret = 0;
587
588 preempt_disable();
589 ret = smp_call_function(func, info, wait);
590 local_irq_save(flags);
591 func(info);
592 local_irq_restore(flags);
593 preempt_enable();
594 return ret;
595}
596EXPORT_SYMBOL(on_each_cpu);
597
598/**
599 * on_each_cpu_mask(): Run a function on processors specified by
600 * cpumask, which may include the local processor.
601 * @mask: The set of cpus to run on (only runs on online subset).
602 * @func: The function to run. This must be fast and non-blocking.
603 * @info: An arbitrary pointer to pass to the function.
604 * @wait: If true, wait (atomically) until function has completed
605 * on other CPUs.
606 *
607 * If @wait is true, then returns once @func has returned.
608 *
609 * You must not call this function with disabled interrupts or from a
610 * hardware interrupt handler or from a bottom half handler. The
611 * exception is that it may be used during early boot while
612 * early_boot_irqs_disabled is set.
613 */
614void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
615 void *info, bool wait)
616{
617 int cpu = get_cpu();
618
619 smp_call_function_many(mask, func, info, wait);
620 if (cpumask_test_cpu(cpu, mask)) {
621 unsigned long flags;
622 local_irq_save(flags);
623 func(info);
624 local_irq_restore(flags);
625 }
626 put_cpu();
627}
628EXPORT_SYMBOL(on_each_cpu_mask);
629
630/*
631 * on_each_cpu_cond(): Call a function on each processor for which
632 * the supplied function cond_func returns true, optionally waiting
633 * for all the required CPUs to finish. This may include the local
634 * processor.
635 * @cond_func: A callback function that is passed a cpu id and
636 * the the info parameter. The function is called
637 * with preemption disabled. The function should
638 * return a blooean value indicating whether to IPI
639 * the specified CPU.
640 * @func: The function to run on all applicable CPUs.
641 * This must be fast and non-blocking.
642 * @info: An arbitrary pointer to pass to both functions.
643 * @wait: If true, wait (atomically) until function has
644 * completed on other CPUs.
645 * @gfp_flags: GFP flags to use when allocating the cpumask
646 * used internally by the function.
647 *
648 * The function might sleep if the GFP flags indicates a non
649 * atomic allocation is allowed.
650 *
651 * Preemption is disabled to protect against CPUs going offline but not online.
652 * CPUs going online during the call will not be seen or sent an IPI.
653 *
654 * You must not call this function with disabled interrupts or
655 * from a hardware interrupt handler or from a bottom half handler.
656 */
657void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
658 smp_call_func_t func, void *info, bool wait,
659 gfp_t gfp_flags)
660{
661 cpumask_var_t cpus;
662 int cpu, ret;
663
664 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
665
666 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
667 preempt_disable();
668 for_each_online_cpu(cpu)
669 if (cond_func(cpu, info))
670 cpumask_set_cpu(cpu, cpus);
671 on_each_cpu_mask(cpus, func, info, wait);
672 preempt_enable();
673 free_cpumask_var(cpus);
674 } else {
675 /*
676 * No free cpumask, bother. No matter, we'll
677 * just have to IPI them one by one.
678 */
679 preempt_disable();
680 for_each_online_cpu(cpu)
681 if (cond_func(cpu, info)) {
682 ret = smp_call_function_single(cpu, func,
683 info, wait);
684 WARN_ON_ONCE(ret);
685 }
686 preempt_enable();
687 }
688}
689EXPORT_SYMBOL(on_each_cpu_cond);
690
691static void do_nothing(void *unused)
692{
693}
694
695/**
696 * kick_all_cpus_sync - Force all cpus out of idle
697 *
698 * Used to synchronize the update of pm_idle function pointer. It's
699 * called after the pointer is updated and returns after the dummy
700 * callback function has been executed on all cpus. The execution of
701 * the function can only happen on the remote cpus after they have
702 * left the idle function which had been called via pm_idle function
703 * pointer. So it's guaranteed that nothing uses the previous pointer
704 * anymore.
705 */
706void kick_all_cpus_sync(void)
707{
708 /* Make sure the change is visible before we kick the cpus */
709 smp_mb();
710 smp_call_function(do_nothing, NULL, 1);
711}
712EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
713
714/**
715 * wake_up_all_idle_cpus - break all cpus out of idle
716 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
717 * including idle polling cpus, for non-idle cpus, we will do nothing
718 * for them.
719 */
720void wake_up_all_idle_cpus(void)
721{
722 int cpu;
723
724 preempt_disable();
725 for_each_online_cpu(cpu) {
726 if (cpu == smp_processor_id())
727 continue;
728
729 wake_up_if_idle(cpu);
730 }
731 preempt_enable();
732}
733EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
734
735/**
736 * smp_call_on_cpu - Call a function on a specific cpu
737 *
738 * Used to call a function on a specific cpu and wait for it to return.
739 * Optionally make sure the call is done on a specified physical cpu via vcpu
740 * pinning in order to support virtualized environments.
741 */
742struct smp_call_on_cpu_struct {
743 struct work_struct work;
744 struct completion done;
745 int (*func)(void *);
746 void *data;
747 int ret;
748 int cpu;
749};
750
751static void smp_call_on_cpu_callback(struct work_struct *work)
752{
753 struct smp_call_on_cpu_struct *sscs;
754
755 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
756 if (sscs->cpu >= 0)
757 hypervisor_pin_vcpu(sscs->cpu);
758 sscs->ret = sscs->func(sscs->data);
759 if (sscs->cpu >= 0)
760 hypervisor_pin_vcpu(-1);
761
762 complete(&sscs->done);
763}
764
765int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
766{
767 struct smp_call_on_cpu_struct sscs = {
768 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
769 .func = func,
770 .data = par,
771 .cpu = phys ? cpu : -1,
772 };
773
774 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
775
776 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
777 return -ENXIO;
778
779 queue_work_on(cpu, system_wq, &sscs.work);
780 wait_for_completion(&sscs.done);
781
782 return sscs.ret;
783}
784EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
17static struct {
18 struct list_head queue;
19 raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp =
21 {
22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 };
25
26enum {
27 CSD_FLAG_LOCK = 0x01,
28};
29
30struct call_function_data {
31 struct call_single_data csd;
32 atomic_t refs;
33 cpumask_var_t cpumask;
34};
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
38struct call_single_queue {
39 struct list_head list;
40 raw_spinlock_t lock;
41};
42
43static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
44
45static int
46hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47{
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
50
51 switch (action) {
52 case CPU_UP_PREPARE:
53 case CPU_UP_PREPARE_FROZEN:
54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55 cpu_to_node(cpu)))
56 return notifier_from_errno(-ENOMEM);
57 break;
58
59#ifdef CONFIG_HOTPLUG_CPU
60 case CPU_UP_CANCELED:
61 case CPU_UP_CANCELED_FROZEN:
62
63 case CPU_DEAD:
64 case CPU_DEAD_FROZEN:
65 free_cpumask_var(cfd->cpumask);
66 break;
67#endif
68 };
69
70 return NOTIFY_OK;
71}
72
73static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd,
75};
76
77void __init call_function_init(void)
78{
79 void *cpu = (void *)(long)smp_processor_id();
80 int i;
81
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
84
85 raw_spin_lock_init(&q->lock);
86 INIT_LIST_HEAD(&q->list);
87 }
88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
91}
92
93/*
94 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
95 *
96 * For non-synchronous ipi calls the csd can still be in use by the
97 * previous function call. For multi-cpu calls its even more interesting
98 * as we'll have to ensure no other cpu is observing our csd.
99 */
100static void csd_lock_wait(struct call_single_data *data)
101{
102 while (data->flags & CSD_FLAG_LOCK)
103 cpu_relax();
104}
105
106static void csd_lock(struct call_single_data *data)
107{
108 csd_lock_wait(data);
109 data->flags = CSD_FLAG_LOCK;
110
111 /*
112 * prevent CPU from reordering the above assignment
113 * to ->flags with any subsequent assignments to other
114 * fields of the specified call_single_data structure:
115 */
116 smp_mb();
117}
118
119static void csd_unlock(struct call_single_data *data)
120{
121 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
122
123 /*
124 * ensure we're all done before releasing data:
125 */
126 smp_mb();
127
128 data->flags &= ~CSD_FLAG_LOCK;
129}
130
131/*
132 * Insert a previously allocated call_single_data element
133 * for execution on the given CPU. data must already have
134 * ->func, ->info, and ->flags set.
135 */
136static
137void generic_exec_single(int cpu, struct call_single_data *data, int wait)
138{
139 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
140 unsigned long flags;
141 int ipi;
142
143 raw_spin_lock_irqsave(&dst->lock, flags);
144 ipi = list_empty(&dst->list);
145 list_add_tail(&data->list, &dst->list);
146 raw_spin_unlock_irqrestore(&dst->lock, flags);
147
148 /*
149 * The list addition should be visible before sending the IPI
150 * handler locks the list to pull the entry off it because of
151 * normal cache coherency rules implied by spinlocks.
152 *
153 * If IPIs can go out of order to the cache coherency protocol
154 * in an architecture, sufficient synchronisation should be added
155 * to arch code to make it appear to obey cache coherency WRT
156 * locking and barrier primitives. Generic code isn't really
157 * equipped to do the right thing...
158 */
159 if (ipi)
160 arch_send_call_function_single_ipi(cpu);
161
162 if (wait)
163 csd_lock_wait(data);
164}
165
166/*
167 * Invoked by arch to handle an IPI for call function. Must be called with
168 * interrupts disabled.
169 */
170void generic_smp_call_function_interrupt(void)
171{
172 struct call_function_data *data;
173 int cpu = smp_processor_id();
174
175 /*
176 * Shouldn't receive this interrupt on a cpu that is not yet online.
177 */
178 WARN_ON_ONCE(!cpu_online(cpu));
179
180 /*
181 * Ensure entry is visible on call_function_queue after we have
182 * entered the IPI. See comment in smp_call_function_many.
183 * If we don't have this, then we may miss an entry on the list
184 * and never get another IPI to process it.
185 */
186 smp_mb();
187
188 /*
189 * It's ok to use list_for_each_rcu() here even though we may
190 * delete 'pos', since list_del_rcu() doesn't clear ->next
191 */
192 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
193 int refs;
194 smp_call_func_t func;
195
196 /*
197 * Since we walk the list without any locks, we might
198 * see an entry that was completed, removed from the
199 * list and is in the process of being reused.
200 *
201 * We must check that the cpu is in the cpumask before
202 * checking the refs, and both must be set before
203 * executing the callback on this cpu.
204 */
205
206 if (!cpumask_test_cpu(cpu, data->cpumask))
207 continue;
208
209 smp_rmb();
210
211 if (atomic_read(&data->refs) == 0)
212 continue;
213
214 func = data->csd.func; /* save for later warn */
215 func(data->csd.info);
216
217 /*
218 * If the cpu mask is not still set then func enabled
219 * interrupts (BUG), and this cpu took another smp call
220 * function interrupt and executed func(info) twice
221 * on this cpu. That nested execution decremented refs.
222 */
223 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
224 WARN(1, "%pf enabled interrupts and double executed\n", func);
225 continue;
226 }
227
228 refs = atomic_dec_return(&data->refs);
229 WARN_ON(refs < 0);
230
231 if (refs)
232 continue;
233
234 WARN_ON(!cpumask_empty(data->cpumask));
235
236 raw_spin_lock(&call_function.lock);
237 list_del_rcu(&data->csd.list);
238 raw_spin_unlock(&call_function.lock);
239
240 csd_unlock(&data->csd);
241 }
242
243}
244
245/*
246 * Invoked by arch to handle an IPI for call function single. Must be
247 * called from the arch with interrupts disabled.
248 */
249void generic_smp_call_function_single_interrupt(void)
250{
251 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
252 unsigned int data_flags;
253 LIST_HEAD(list);
254
255 /*
256 * Shouldn't receive this interrupt on a cpu that is not yet online.
257 */
258 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
259
260 raw_spin_lock(&q->lock);
261 list_replace_init(&q->list, &list);
262 raw_spin_unlock(&q->lock);
263
264 while (!list_empty(&list)) {
265 struct call_single_data *data;
266
267 data = list_entry(list.next, struct call_single_data, list);
268 list_del(&data->list);
269
270 /*
271 * 'data' can be invalid after this call if flags == 0
272 * (when called through generic_exec_single()),
273 * so save them away before making the call:
274 */
275 data_flags = data->flags;
276
277 data->func(data->info);
278
279 /*
280 * Unlocked CSDs are valid through generic_exec_single():
281 */
282 if (data_flags & CSD_FLAG_LOCK)
283 csd_unlock(data);
284 }
285}
286
287static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
288
289/*
290 * smp_call_function_single - Run a function on a specific CPU
291 * @func: The function to run. This must be fast and non-blocking.
292 * @info: An arbitrary pointer to pass to the function.
293 * @wait: If true, wait until function has completed on other CPUs.
294 *
295 * Returns 0 on success, else a negative status code.
296 */
297int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
298 int wait)
299{
300 struct call_single_data d = {
301 .flags = 0,
302 };
303 unsigned long flags;
304 int this_cpu;
305 int err = 0;
306
307 /*
308 * prevent preemption and reschedule on another processor,
309 * as well as CPU removal
310 */
311 this_cpu = get_cpu();
312
313 /*
314 * Can deadlock when called with interrupts disabled.
315 * We allow cpu's that are not yet online though, as no one else can
316 * send smp call function interrupt to this cpu and as such deadlocks
317 * can't happen.
318 */
319 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
320 && !oops_in_progress);
321
322 if (cpu == this_cpu) {
323 local_irq_save(flags);
324 func(info);
325 local_irq_restore(flags);
326 } else {
327 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
328 struct call_single_data *data = &d;
329
330 if (!wait)
331 data = &__get_cpu_var(csd_data);
332
333 csd_lock(data);
334
335 data->func = func;
336 data->info = info;
337 generic_exec_single(cpu, data, wait);
338 } else {
339 err = -ENXIO; /* CPU not online */
340 }
341 }
342
343 put_cpu();
344
345 return err;
346}
347EXPORT_SYMBOL(smp_call_function_single);
348
349/*
350 * smp_call_function_any - Run a function on any of the given cpus
351 * @mask: The mask of cpus it can run on.
352 * @func: The function to run. This must be fast and non-blocking.
353 * @info: An arbitrary pointer to pass to the function.
354 * @wait: If true, wait until function has completed.
355 *
356 * Returns 0 on success, else a negative status code (if no cpus were online).
357 * Note that @wait will be implicitly turned on in case of allocation failures,
358 * since we fall back to on-stack allocation.
359 *
360 * Selection preference:
361 * 1) current cpu if in @mask
362 * 2) any cpu of current node if in @mask
363 * 3) any other online cpu in @mask
364 */
365int smp_call_function_any(const struct cpumask *mask,
366 smp_call_func_t func, void *info, int wait)
367{
368 unsigned int cpu;
369 const struct cpumask *nodemask;
370 int ret;
371
372 /* Try for same CPU (cheapest) */
373 cpu = get_cpu();
374 if (cpumask_test_cpu(cpu, mask))
375 goto call;
376
377 /* Try for same node. */
378 nodemask = cpumask_of_node(cpu_to_node(cpu));
379 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
380 cpu = cpumask_next_and(cpu, nodemask, mask)) {
381 if (cpu_online(cpu))
382 goto call;
383 }
384
385 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
386 cpu = cpumask_any_and(mask, cpu_online_mask);
387call:
388 ret = smp_call_function_single(cpu, func, info, wait);
389 put_cpu();
390 return ret;
391}
392EXPORT_SYMBOL_GPL(smp_call_function_any);
393
394/**
395 * __smp_call_function_single(): Run a function on a specific CPU
396 * @cpu: The CPU to run on.
397 * @data: Pre-allocated and setup data structure
398 * @wait: If true, wait until function has completed on specified CPU.
399 *
400 * Like smp_call_function_single(), but allow caller to pass in a
401 * pre-allocated data structure. Useful for embedding @data inside
402 * other structures, for instance.
403 */
404void __smp_call_function_single(int cpu, struct call_single_data *data,
405 int wait)
406{
407 unsigned int this_cpu;
408 unsigned long flags;
409
410 this_cpu = get_cpu();
411 /*
412 * Can deadlock when called with interrupts disabled.
413 * We allow cpu's that are not yet online though, as no one else can
414 * send smp call function interrupt to this cpu and as such deadlocks
415 * can't happen.
416 */
417 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
418 && !oops_in_progress);
419
420 if (cpu == this_cpu) {
421 local_irq_save(flags);
422 data->func(data->info);
423 local_irq_restore(flags);
424 } else {
425 csd_lock(data);
426 generic_exec_single(cpu, data, wait);
427 }
428 put_cpu();
429}
430
431/**
432 * smp_call_function_many(): Run a function on a set of other CPUs.
433 * @mask: The set of cpus to run on (only runs on online subset).
434 * @func: The function to run. This must be fast and non-blocking.
435 * @info: An arbitrary pointer to pass to the function.
436 * @wait: If true, wait (atomically) until function has completed
437 * on other CPUs.
438 *
439 * If @wait is true, then returns once @func has returned.
440 *
441 * You must not call this function with disabled interrupts or from a
442 * hardware interrupt handler or from a bottom half handler. Preemption
443 * must be disabled when calling this function.
444 */
445void smp_call_function_many(const struct cpumask *mask,
446 smp_call_func_t func, void *info, bool wait)
447{
448 struct call_function_data *data;
449 unsigned long flags;
450 int refs, cpu, next_cpu, this_cpu = smp_processor_id();
451
452 /*
453 * Can deadlock when called with interrupts disabled.
454 * We allow cpu's that are not yet online though, as no one else can
455 * send smp call function interrupt to this cpu and as such deadlocks
456 * can't happen.
457 */
458 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
459 && !oops_in_progress && !early_boot_irqs_disabled);
460
461 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
462 cpu = cpumask_first_and(mask, cpu_online_mask);
463 if (cpu == this_cpu)
464 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
465
466 /* No online cpus? We're done. */
467 if (cpu >= nr_cpu_ids)
468 return;
469
470 /* Do we have another CPU which isn't us? */
471 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
472 if (next_cpu == this_cpu)
473 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
474
475 /* Fastpath: do that cpu by itself. */
476 if (next_cpu >= nr_cpu_ids) {
477 smp_call_function_single(cpu, func, info, wait);
478 return;
479 }
480
481 data = &__get_cpu_var(cfd_data);
482 csd_lock(&data->csd);
483
484 /* This BUG_ON verifies our reuse assertions and can be removed */
485 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
486
487 /*
488 * The global call function queue list add and delete are protected
489 * by a lock, but the list is traversed without any lock, relying
490 * on the rcu list add and delete to allow safe concurrent traversal.
491 * We reuse the call function data without waiting for any grace
492 * period after some other cpu removes it from the global queue.
493 * This means a cpu might find our data block as it is being
494 * filled out.
495 *
496 * We hold off the interrupt handler on the other cpu by
497 * ordering our writes to the cpu mask vs our setting of the
498 * refs counter. We assert only the cpu owning the data block
499 * will set a bit in cpumask, and each bit will only be cleared
500 * by the subject cpu. Each cpu must first find its bit is
501 * set and then check that refs is set indicating the element is
502 * ready to be processed, otherwise it must skip the entry.
503 *
504 * On the previous iteration refs was set to 0 by another cpu.
505 * To avoid the use of transitivity, set the counter to 0 here
506 * so the wmb will pair with the rmb in the interrupt handler.
507 */
508 atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
509
510 data->csd.func = func;
511 data->csd.info = info;
512
513 /* Ensure 0 refs is visible before mask. Also orders func and info */
514 smp_wmb();
515
516 /* We rely on the "and" being processed before the store */
517 cpumask_and(data->cpumask, mask, cpu_online_mask);
518 cpumask_clear_cpu(this_cpu, data->cpumask);
519 refs = cpumask_weight(data->cpumask);
520
521 /* Some callers race with other cpus changing the passed mask */
522 if (unlikely(!refs)) {
523 csd_unlock(&data->csd);
524 return;
525 }
526
527 raw_spin_lock_irqsave(&call_function.lock, flags);
528 /*
529 * Place entry at the _HEAD_ of the list, so that any cpu still
530 * observing the entry in generic_smp_call_function_interrupt()
531 * will not miss any other list entries:
532 */
533 list_add_rcu(&data->csd.list, &call_function.queue);
534 /*
535 * We rely on the wmb() in list_add_rcu to complete our writes
536 * to the cpumask before this write to refs, which indicates
537 * data is on the list and is ready to be processed.
538 */
539 atomic_set(&data->refs, refs);
540 raw_spin_unlock_irqrestore(&call_function.lock, flags);
541
542 /*
543 * Make the list addition visible before sending the ipi.
544 * (IPIs must obey or appear to obey normal Linux cache
545 * coherency rules -- see comment in generic_exec_single).
546 */
547 smp_mb();
548
549 /* Send a message to all CPUs in the map */
550 arch_send_call_function_ipi_mask(data->cpumask);
551
552 /* Optionally wait for the CPUs to complete */
553 if (wait)
554 csd_lock_wait(&data->csd);
555}
556EXPORT_SYMBOL(smp_call_function_many);
557
558/**
559 * smp_call_function(): Run a function on all other CPUs.
560 * @func: The function to run. This must be fast and non-blocking.
561 * @info: An arbitrary pointer to pass to the function.
562 * @wait: If true, wait (atomically) until function has completed
563 * on other CPUs.
564 *
565 * Returns 0.
566 *
567 * If @wait is true, then returns once @func has returned; otherwise
568 * it returns just before the target cpu calls @func.
569 *
570 * You must not call this function with disabled interrupts or from a
571 * hardware interrupt handler or from a bottom half handler.
572 */
573int smp_call_function(smp_call_func_t func, void *info, int wait)
574{
575 preempt_disable();
576 smp_call_function_many(cpu_online_mask, func, info, wait);
577 preempt_enable();
578
579 return 0;
580}
581EXPORT_SYMBOL(smp_call_function);
582
583void ipi_call_lock(void)
584{
585 raw_spin_lock(&call_function.lock);
586}
587
588void ipi_call_unlock(void)
589{
590 raw_spin_unlock(&call_function.lock);
591}
592
593void ipi_call_lock_irq(void)
594{
595 raw_spin_lock_irq(&call_function.lock);
596}
597
598void ipi_call_unlock_irq(void)
599{
600 raw_spin_unlock_irq(&call_function.lock);
601}
602#endif /* USE_GENERIC_SMP_HELPERS */
603
604/* Setup configured maximum number of CPUs to activate */
605unsigned int setup_max_cpus = NR_CPUS;
606EXPORT_SYMBOL(setup_max_cpus);
607
608
609/*
610 * Setup routine for controlling SMP activation
611 *
612 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
613 * activation entirely (the MPS table probe still happens, though).
614 *
615 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
616 * greater than 0, limits the maximum number of CPUs activated in
617 * SMP mode to <NUM>.
618 */
619
620void __weak arch_disable_smp_support(void) { }
621
622static int __init nosmp(char *str)
623{
624 setup_max_cpus = 0;
625 arch_disable_smp_support();
626
627 return 0;
628}
629
630early_param("nosmp", nosmp);
631
632/* this is hard limit */
633static int __init nrcpus(char *str)
634{
635 int nr_cpus;
636
637 get_option(&str, &nr_cpus);
638 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
639 nr_cpu_ids = nr_cpus;
640
641 return 0;
642}
643
644early_param("nr_cpus", nrcpus);
645
646static int __init maxcpus(char *str)
647{
648 get_option(&str, &setup_max_cpus);
649 if (setup_max_cpus == 0)
650 arch_disable_smp_support();
651
652 return 0;
653}
654
655early_param("maxcpus", maxcpus);
656
657/* Setup number of possible processor ids */
658int nr_cpu_ids __read_mostly = NR_CPUS;
659EXPORT_SYMBOL(nr_cpu_ids);
660
661/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
662void __init setup_nr_cpu_ids(void)
663{
664 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
665}
666
667/* Called by boot processor to activate the rest. */
668void __init smp_init(void)
669{
670 unsigned int cpu;
671
672 /* FIXME: This should be done in userspace --RR */
673 for_each_present_cpu(cpu) {
674 if (num_online_cpus() >= setup_max_cpus)
675 break;
676 if (!cpu_online(cpu))
677 cpu_up(cpu);
678 }
679
680 /* Any cleanup work */
681 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
682 smp_cpus_done(setup_max_cpus);
683}
684
685/*
686 * Call a function on all processors. May be used during early boot while
687 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
688 * of local_irq_disable/enable().
689 */
690int on_each_cpu(void (*func) (void *info), void *info, int wait)
691{
692 unsigned long flags;
693 int ret = 0;
694
695 preempt_disable();
696 ret = smp_call_function(func, info, wait);
697 local_irq_save(flags);
698 func(info);
699 local_irq_restore(flags);
700 preempt_enable();
701 return ret;
702}
703EXPORT_SYMBOL(on_each_cpu);