Loading...
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/irq_work.h>
10#include <linux/rcupdate.h>
11#include <linux/rculist.h>
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/gfp.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
19#include <linux/sched.h>
20#include <linux/hypervisor.h>
21
22#include "smpboot.h"
23
24enum {
25 CSD_FLAG_LOCK = 0x01,
26 CSD_FLAG_SYNCHRONOUS = 0x02,
27};
28
29struct call_function_data {
30 struct call_single_data __percpu *csd;
31 cpumask_var_t cpumask;
32};
33
34static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
37
38static void flush_smp_call_function_queue(bool warn_cpu_offline);
39
40int smpcfd_prepare_cpu(unsigned int cpu)
41{
42 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
43
44 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
45 cpu_to_node(cpu)))
46 return -ENOMEM;
47 cfd->csd = alloc_percpu(struct call_single_data);
48 if (!cfd->csd) {
49 free_cpumask_var(cfd->cpumask);
50 return -ENOMEM;
51 }
52
53 return 0;
54}
55
56int smpcfd_dead_cpu(unsigned int cpu)
57{
58 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
59
60 free_cpumask_var(cfd->cpumask);
61 free_percpu(cfd->csd);
62 return 0;
63}
64
65int smpcfd_dying_cpu(unsigned int cpu)
66{
67 /*
68 * The IPIs for the smp-call-function callbacks queued by other
69 * CPUs might arrive late, either due to hardware latencies or
70 * because this CPU disabled interrupts (inside stop-machine)
71 * before the IPIs were sent. So flush out any pending callbacks
72 * explicitly (without waiting for the IPIs to arrive), to
73 * ensure that the outgoing CPU doesn't go offline with work
74 * still pending.
75 */
76 flush_smp_call_function_queue(false);
77 return 0;
78}
79
80void __init call_function_init(void)
81{
82 int i;
83
84 for_each_possible_cpu(i)
85 init_llist_head(&per_cpu(call_single_queue, i));
86
87 smpcfd_prepare_cpu(smp_processor_id());
88}
89
90/*
91 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
92 *
93 * For non-synchronous ipi calls the csd can still be in use by the
94 * previous function call. For multi-cpu calls its even more interesting
95 * as we'll have to ensure no other cpu is observing our csd.
96 */
97static __always_inline void csd_lock_wait(struct call_single_data *csd)
98{
99 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
100}
101
102static __always_inline void csd_lock(struct call_single_data *csd)
103{
104 csd_lock_wait(csd);
105 csd->flags |= CSD_FLAG_LOCK;
106
107 /*
108 * prevent CPU from reordering the above assignment
109 * to ->flags with any subsequent assignments to other
110 * fields of the specified call_single_data structure:
111 */
112 smp_wmb();
113}
114
115static __always_inline void csd_unlock(struct call_single_data *csd)
116{
117 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
118
119 /*
120 * ensure we're all done before releasing data:
121 */
122 smp_store_release(&csd->flags, 0);
123}
124
125static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
126
127/*
128 * Insert a previously allocated call_single_data element
129 * for execution on the given CPU. data must already have
130 * ->func, ->info, and ->flags set.
131 */
132static int generic_exec_single(int cpu, struct call_single_data *csd,
133 smp_call_func_t func, void *info)
134{
135 if (cpu == smp_processor_id()) {
136 unsigned long flags;
137
138 /*
139 * We can unlock early even for the synchronous on-stack case,
140 * since we're doing this from the same CPU..
141 */
142 csd_unlock(csd);
143 local_irq_save(flags);
144 func(info);
145 local_irq_restore(flags);
146 return 0;
147 }
148
149
150 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
151 csd_unlock(csd);
152 return -ENXIO;
153 }
154
155 csd->func = func;
156 csd->info = info;
157
158 /*
159 * The list addition should be visible before sending the IPI
160 * handler locks the list to pull the entry off it because of
161 * normal cache coherency rules implied by spinlocks.
162 *
163 * If IPIs can go out of order to the cache coherency protocol
164 * in an architecture, sufficient synchronisation should be added
165 * to arch code to make it appear to obey cache coherency WRT
166 * locking and barrier primitives. Generic code isn't really
167 * equipped to do the right thing...
168 */
169 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
170 arch_send_call_function_single_ipi(cpu);
171
172 return 0;
173}
174
175/**
176 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
177 *
178 * Invoked by arch to handle an IPI for call function single.
179 * Must be called with interrupts disabled.
180 */
181void generic_smp_call_function_single_interrupt(void)
182{
183 flush_smp_call_function_queue(true);
184}
185
186/**
187 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
188 *
189 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
190 * offline CPU. Skip this check if set to 'false'.
191 *
192 * Flush any pending smp-call-function callbacks queued on this CPU. This is
193 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
194 * to ensure that all pending IPI callbacks are run before it goes completely
195 * offline.
196 *
197 * Loop through the call_single_queue and run all the queued callbacks.
198 * Must be called with interrupts disabled.
199 */
200static void flush_smp_call_function_queue(bool warn_cpu_offline)
201{
202 struct llist_head *head;
203 struct llist_node *entry;
204 struct call_single_data *csd, *csd_next;
205 static bool warned;
206
207 WARN_ON(!irqs_disabled());
208
209 head = this_cpu_ptr(&call_single_queue);
210 entry = llist_del_all(head);
211 entry = llist_reverse_order(entry);
212
213 /* There shouldn't be any pending callbacks on an offline CPU. */
214 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
215 !warned && !llist_empty(head))) {
216 warned = true;
217 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
218
219 /*
220 * We don't have to use the _safe() variant here
221 * because we are not invoking the IPI handlers yet.
222 */
223 llist_for_each_entry(csd, entry, llist)
224 pr_warn("IPI callback %pS sent to offline CPU\n",
225 csd->func);
226 }
227
228 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
229 smp_call_func_t func = csd->func;
230 void *info = csd->info;
231
232 /* Do we wait until *after* callback? */
233 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
234 func(info);
235 csd_unlock(csd);
236 } else {
237 csd_unlock(csd);
238 func(info);
239 }
240 }
241
242 /*
243 * Handle irq works queued remotely by irq_work_queue_on().
244 * Smp functions above are typically synchronous so they
245 * better run first since some other CPUs may be busy waiting
246 * for them.
247 */
248 irq_work_run();
249}
250
251/*
252 * smp_call_function_single - Run a function on a specific CPU
253 * @func: The function to run. This must be fast and non-blocking.
254 * @info: An arbitrary pointer to pass to the function.
255 * @wait: If true, wait until function has completed on other CPUs.
256 *
257 * Returns 0 on success, else a negative status code.
258 */
259int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
260 int wait)
261{
262 struct call_single_data *csd;
263 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
264 int this_cpu;
265 int err;
266
267 /*
268 * prevent preemption and reschedule on another processor,
269 * as well as CPU removal
270 */
271 this_cpu = get_cpu();
272
273 /*
274 * Can deadlock when called with interrupts disabled.
275 * We allow cpu's that are not yet online though, as no one else can
276 * send smp call function interrupt to this cpu and as such deadlocks
277 * can't happen.
278 */
279 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
280 && !oops_in_progress);
281
282 csd = &csd_stack;
283 if (!wait) {
284 csd = this_cpu_ptr(&csd_data);
285 csd_lock(csd);
286 }
287
288 err = generic_exec_single(cpu, csd, func, info);
289
290 if (wait)
291 csd_lock_wait(csd);
292
293 put_cpu();
294
295 return err;
296}
297EXPORT_SYMBOL(smp_call_function_single);
298
299/**
300 * smp_call_function_single_async(): Run an asynchronous function on a
301 * specific CPU.
302 * @cpu: The CPU to run on.
303 * @csd: Pre-allocated and setup data structure
304 *
305 * Like smp_call_function_single(), but the call is asynchonous and
306 * can thus be done from contexts with disabled interrupts.
307 *
308 * The caller passes his own pre-allocated data structure
309 * (ie: embedded in an object) and is responsible for synchronizing it
310 * such that the IPIs performed on the @csd are strictly serialized.
311 *
312 * NOTE: Be careful, there is unfortunately no current debugging facility to
313 * validate the correctness of this serialization.
314 */
315int smp_call_function_single_async(int cpu, struct call_single_data *csd)
316{
317 int err = 0;
318
319 preempt_disable();
320
321 /* We could deadlock if we have to wait here with interrupts disabled! */
322 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
323 csd_lock_wait(csd);
324
325 csd->flags = CSD_FLAG_LOCK;
326 smp_wmb();
327
328 err = generic_exec_single(cpu, csd, csd->func, csd->info);
329 preempt_enable();
330
331 return err;
332}
333EXPORT_SYMBOL_GPL(smp_call_function_single_async);
334
335/*
336 * smp_call_function_any - Run a function on any of the given cpus
337 * @mask: The mask of cpus it can run on.
338 * @func: The function to run. This must be fast and non-blocking.
339 * @info: An arbitrary pointer to pass to the function.
340 * @wait: If true, wait until function has completed.
341 *
342 * Returns 0 on success, else a negative status code (if no cpus were online).
343 *
344 * Selection preference:
345 * 1) current cpu if in @mask
346 * 2) any cpu of current node if in @mask
347 * 3) any other online cpu in @mask
348 */
349int smp_call_function_any(const struct cpumask *mask,
350 smp_call_func_t func, void *info, int wait)
351{
352 unsigned int cpu;
353 const struct cpumask *nodemask;
354 int ret;
355
356 /* Try for same CPU (cheapest) */
357 cpu = get_cpu();
358 if (cpumask_test_cpu(cpu, mask))
359 goto call;
360
361 /* Try for same node. */
362 nodemask = cpumask_of_node(cpu_to_node(cpu));
363 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
364 cpu = cpumask_next_and(cpu, nodemask, mask)) {
365 if (cpu_online(cpu))
366 goto call;
367 }
368
369 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
370 cpu = cpumask_any_and(mask, cpu_online_mask);
371call:
372 ret = smp_call_function_single(cpu, func, info, wait);
373 put_cpu();
374 return ret;
375}
376EXPORT_SYMBOL_GPL(smp_call_function_any);
377
378/**
379 * smp_call_function_many(): Run a function on a set of other CPUs.
380 * @mask: The set of cpus to run on (only runs on online subset).
381 * @func: The function to run. This must be fast and non-blocking.
382 * @info: An arbitrary pointer to pass to the function.
383 * @wait: If true, wait (atomically) until function has completed
384 * on other CPUs.
385 *
386 * If @wait is true, then returns once @func has returned.
387 *
388 * You must not call this function with disabled interrupts or from a
389 * hardware interrupt handler or from a bottom half handler. Preemption
390 * must be disabled when calling this function.
391 */
392void smp_call_function_many(const struct cpumask *mask,
393 smp_call_func_t func, void *info, bool wait)
394{
395 struct call_function_data *cfd;
396 int cpu, next_cpu, this_cpu = smp_processor_id();
397
398 /*
399 * Can deadlock when called with interrupts disabled.
400 * We allow cpu's that are not yet online though, as no one else can
401 * send smp call function interrupt to this cpu and as such deadlocks
402 * can't happen.
403 */
404 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
405 && !oops_in_progress && !early_boot_irqs_disabled);
406
407 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
408 cpu = cpumask_first_and(mask, cpu_online_mask);
409 if (cpu == this_cpu)
410 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
411
412 /* No online cpus? We're done. */
413 if (cpu >= nr_cpu_ids)
414 return;
415
416 /* Do we have another CPU which isn't us? */
417 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
418 if (next_cpu == this_cpu)
419 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
420
421 /* Fastpath: do that cpu by itself. */
422 if (next_cpu >= nr_cpu_ids) {
423 smp_call_function_single(cpu, func, info, wait);
424 return;
425 }
426
427 cfd = this_cpu_ptr(&cfd_data);
428
429 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
430 cpumask_clear_cpu(this_cpu, cfd->cpumask);
431
432 /* Some callers race with other cpus changing the passed mask */
433 if (unlikely(!cpumask_weight(cfd->cpumask)))
434 return;
435
436 for_each_cpu(cpu, cfd->cpumask) {
437 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
438
439 csd_lock(csd);
440 if (wait)
441 csd->flags |= CSD_FLAG_SYNCHRONOUS;
442 csd->func = func;
443 csd->info = info;
444 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
445 }
446
447 /* Send a message to all CPUs in the map */
448 arch_send_call_function_ipi_mask(cfd->cpumask);
449
450 if (wait) {
451 for_each_cpu(cpu, cfd->cpumask) {
452 struct call_single_data *csd;
453
454 csd = per_cpu_ptr(cfd->csd, cpu);
455 csd_lock_wait(csd);
456 }
457 }
458}
459EXPORT_SYMBOL(smp_call_function_many);
460
461/**
462 * smp_call_function(): Run a function on all other CPUs.
463 * @func: The function to run. This must be fast and non-blocking.
464 * @info: An arbitrary pointer to pass to the function.
465 * @wait: If true, wait (atomically) until function has completed
466 * on other CPUs.
467 *
468 * Returns 0.
469 *
470 * If @wait is true, then returns once @func has returned; otherwise
471 * it returns just before the target cpu calls @func.
472 *
473 * You must not call this function with disabled interrupts or from a
474 * hardware interrupt handler or from a bottom half handler.
475 */
476int smp_call_function(smp_call_func_t func, void *info, int wait)
477{
478 preempt_disable();
479 smp_call_function_many(cpu_online_mask, func, info, wait);
480 preempt_enable();
481
482 return 0;
483}
484EXPORT_SYMBOL(smp_call_function);
485
486/* Setup configured maximum number of CPUs to activate */
487unsigned int setup_max_cpus = NR_CPUS;
488EXPORT_SYMBOL(setup_max_cpus);
489
490
491/*
492 * Setup routine for controlling SMP activation
493 *
494 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
495 * activation entirely (the MPS table probe still happens, though).
496 *
497 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
498 * greater than 0, limits the maximum number of CPUs activated in
499 * SMP mode to <NUM>.
500 */
501
502void __weak arch_disable_smp_support(void) { }
503
504static int __init nosmp(char *str)
505{
506 setup_max_cpus = 0;
507 arch_disable_smp_support();
508
509 return 0;
510}
511
512early_param("nosmp", nosmp);
513
514/* this is hard limit */
515static int __init nrcpus(char *str)
516{
517 int nr_cpus;
518
519 get_option(&str, &nr_cpus);
520 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
521 nr_cpu_ids = nr_cpus;
522
523 return 0;
524}
525
526early_param("nr_cpus", nrcpus);
527
528static int __init maxcpus(char *str)
529{
530 get_option(&str, &setup_max_cpus);
531 if (setup_max_cpus == 0)
532 arch_disable_smp_support();
533
534 return 0;
535}
536
537early_param("maxcpus", maxcpus);
538
539/* Setup number of possible processor ids */
540int nr_cpu_ids __read_mostly = NR_CPUS;
541EXPORT_SYMBOL(nr_cpu_ids);
542
543/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
544void __init setup_nr_cpu_ids(void)
545{
546 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
547}
548
549/* Called by boot processor to activate the rest. */
550void __init smp_init(void)
551{
552 int num_nodes, num_cpus;
553 unsigned int cpu;
554
555 idle_threads_init();
556 cpuhp_threads_init();
557
558 pr_info("Bringing up secondary CPUs ...\n");
559
560 /* FIXME: This should be done in userspace --RR */
561 for_each_present_cpu(cpu) {
562 if (num_online_cpus() >= setup_max_cpus)
563 break;
564 if (!cpu_online(cpu))
565 cpu_up(cpu);
566 }
567
568 num_nodes = num_online_nodes();
569 num_cpus = num_online_cpus();
570 pr_info("Brought up %d node%s, %d CPU%s\n",
571 num_nodes, (num_nodes > 1 ? "s" : ""),
572 num_cpus, (num_cpus > 1 ? "s" : ""));
573
574 /* Any cleanup work */
575 smp_cpus_done(setup_max_cpus);
576}
577
578/*
579 * Call a function on all processors. May be used during early boot while
580 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
581 * of local_irq_disable/enable().
582 */
583int on_each_cpu(void (*func) (void *info), void *info, int wait)
584{
585 unsigned long flags;
586 int ret = 0;
587
588 preempt_disable();
589 ret = smp_call_function(func, info, wait);
590 local_irq_save(flags);
591 func(info);
592 local_irq_restore(flags);
593 preempt_enable();
594 return ret;
595}
596EXPORT_SYMBOL(on_each_cpu);
597
598/**
599 * on_each_cpu_mask(): Run a function on processors specified by
600 * cpumask, which may include the local processor.
601 * @mask: The set of cpus to run on (only runs on online subset).
602 * @func: The function to run. This must be fast and non-blocking.
603 * @info: An arbitrary pointer to pass to the function.
604 * @wait: If true, wait (atomically) until function has completed
605 * on other CPUs.
606 *
607 * If @wait is true, then returns once @func has returned.
608 *
609 * You must not call this function with disabled interrupts or from a
610 * hardware interrupt handler or from a bottom half handler. The
611 * exception is that it may be used during early boot while
612 * early_boot_irqs_disabled is set.
613 */
614void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
615 void *info, bool wait)
616{
617 int cpu = get_cpu();
618
619 smp_call_function_many(mask, func, info, wait);
620 if (cpumask_test_cpu(cpu, mask)) {
621 unsigned long flags;
622 local_irq_save(flags);
623 func(info);
624 local_irq_restore(flags);
625 }
626 put_cpu();
627}
628EXPORT_SYMBOL(on_each_cpu_mask);
629
630/*
631 * on_each_cpu_cond(): Call a function on each processor for which
632 * the supplied function cond_func returns true, optionally waiting
633 * for all the required CPUs to finish. This may include the local
634 * processor.
635 * @cond_func: A callback function that is passed a cpu id and
636 * the the info parameter. The function is called
637 * with preemption disabled. The function should
638 * return a blooean value indicating whether to IPI
639 * the specified CPU.
640 * @func: The function to run on all applicable CPUs.
641 * This must be fast and non-blocking.
642 * @info: An arbitrary pointer to pass to both functions.
643 * @wait: If true, wait (atomically) until function has
644 * completed on other CPUs.
645 * @gfp_flags: GFP flags to use when allocating the cpumask
646 * used internally by the function.
647 *
648 * The function might sleep if the GFP flags indicates a non
649 * atomic allocation is allowed.
650 *
651 * Preemption is disabled to protect against CPUs going offline but not online.
652 * CPUs going online during the call will not be seen or sent an IPI.
653 *
654 * You must not call this function with disabled interrupts or
655 * from a hardware interrupt handler or from a bottom half handler.
656 */
657void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
658 smp_call_func_t func, void *info, bool wait,
659 gfp_t gfp_flags)
660{
661 cpumask_var_t cpus;
662 int cpu, ret;
663
664 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
665
666 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
667 preempt_disable();
668 for_each_online_cpu(cpu)
669 if (cond_func(cpu, info))
670 cpumask_set_cpu(cpu, cpus);
671 on_each_cpu_mask(cpus, func, info, wait);
672 preempt_enable();
673 free_cpumask_var(cpus);
674 } else {
675 /*
676 * No free cpumask, bother. No matter, we'll
677 * just have to IPI them one by one.
678 */
679 preempt_disable();
680 for_each_online_cpu(cpu)
681 if (cond_func(cpu, info)) {
682 ret = smp_call_function_single(cpu, func,
683 info, wait);
684 WARN_ON_ONCE(ret);
685 }
686 preempt_enable();
687 }
688}
689EXPORT_SYMBOL(on_each_cpu_cond);
690
691static void do_nothing(void *unused)
692{
693}
694
695/**
696 * kick_all_cpus_sync - Force all cpus out of idle
697 *
698 * Used to synchronize the update of pm_idle function pointer. It's
699 * called after the pointer is updated and returns after the dummy
700 * callback function has been executed on all cpus. The execution of
701 * the function can only happen on the remote cpus after they have
702 * left the idle function which had been called via pm_idle function
703 * pointer. So it's guaranteed that nothing uses the previous pointer
704 * anymore.
705 */
706void kick_all_cpus_sync(void)
707{
708 /* Make sure the change is visible before we kick the cpus */
709 smp_mb();
710 smp_call_function(do_nothing, NULL, 1);
711}
712EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
713
714/**
715 * wake_up_all_idle_cpus - break all cpus out of idle
716 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
717 * including idle polling cpus, for non-idle cpus, we will do nothing
718 * for them.
719 */
720void wake_up_all_idle_cpus(void)
721{
722 int cpu;
723
724 preempt_disable();
725 for_each_online_cpu(cpu) {
726 if (cpu == smp_processor_id())
727 continue;
728
729 wake_up_if_idle(cpu);
730 }
731 preempt_enable();
732}
733EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
734
735/**
736 * smp_call_on_cpu - Call a function on a specific cpu
737 *
738 * Used to call a function on a specific cpu and wait for it to return.
739 * Optionally make sure the call is done on a specified physical cpu via vcpu
740 * pinning in order to support virtualized environments.
741 */
742struct smp_call_on_cpu_struct {
743 struct work_struct work;
744 struct completion done;
745 int (*func)(void *);
746 void *data;
747 int ret;
748 int cpu;
749};
750
751static void smp_call_on_cpu_callback(struct work_struct *work)
752{
753 struct smp_call_on_cpu_struct *sscs;
754
755 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
756 if (sscs->cpu >= 0)
757 hypervisor_pin_vcpu(sscs->cpu);
758 sscs->ret = sscs->func(sscs->data);
759 if (sscs->cpu >= 0)
760 hypervisor_pin_vcpu(-1);
761
762 complete(&sscs->done);
763}
764
765int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
766{
767 struct smp_call_on_cpu_struct sscs = {
768 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
769 .func = func,
770 .data = par,
771 .cpu = phys ? cpu : -1,
772 };
773
774 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
775
776 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
777 return -ENXIO;
778
779 queue_work_on(cpu, system_wq, &sscs.work);
780 wait_for_completion(&sscs.done);
781
782 return sscs.ret;
783}
784EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/irq_work.h>
11#include <linux/rcupdate.h>
12#include <linux/rculist.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/gfp.h>
19#include <linux/smp.h>
20#include <linux/cpu.h>
21#include <linux/sched.h>
22#include <linux/sched/idle.h>
23#include <linux/hypervisor.h>
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
27#include <linux/jump_label.h>
28#include <linux/string_choices.h>
29
30#include <trace/events/ipi.h>
31#define CREATE_TRACE_POINTS
32#include <trace/events/csd.h>
33#undef CREATE_TRACE_POINTS
34
35#include "smpboot.h"
36#include "sched/smp.h"
37
38#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
39
40struct call_function_data {
41 call_single_data_t __percpu *csd;
42 cpumask_var_t cpumask;
43 cpumask_var_t cpumask_ipi;
44};
45
46static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
47
48static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
49
50static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
51
52static void __flush_smp_call_function_queue(bool warn_cpu_offline);
53
54int smpcfd_prepare_cpu(unsigned int cpu)
55{
56 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
57
58 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
59 cpu_to_node(cpu)))
60 return -ENOMEM;
61 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
62 cpu_to_node(cpu))) {
63 free_cpumask_var(cfd->cpumask);
64 return -ENOMEM;
65 }
66 cfd->csd = alloc_percpu(call_single_data_t);
67 if (!cfd->csd) {
68 free_cpumask_var(cfd->cpumask);
69 free_cpumask_var(cfd->cpumask_ipi);
70 return -ENOMEM;
71 }
72
73 return 0;
74}
75
76int smpcfd_dead_cpu(unsigned int cpu)
77{
78 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
79
80 free_cpumask_var(cfd->cpumask);
81 free_cpumask_var(cfd->cpumask_ipi);
82 free_percpu(cfd->csd);
83 return 0;
84}
85
86int smpcfd_dying_cpu(unsigned int cpu)
87{
88 /*
89 * The IPIs for the smp-call-function callbacks queued by other
90 * CPUs might arrive late, either due to hardware latencies or
91 * because this CPU disabled interrupts (inside stop-machine)
92 * before the IPIs were sent. So flush out any pending callbacks
93 * explicitly (without waiting for the IPIs to arrive), to
94 * ensure that the outgoing CPU doesn't go offline with work
95 * still pending.
96 */
97 __flush_smp_call_function_queue(false);
98 irq_work_run();
99 return 0;
100}
101
102void __init call_function_init(void)
103{
104 int i;
105
106 for_each_possible_cpu(i)
107 init_llist_head(&per_cpu(call_single_queue, i));
108
109 smpcfd_prepare_cpu(smp_processor_id());
110}
111
112static __always_inline void
113send_call_function_single_ipi(int cpu)
114{
115 if (call_function_single_prep_ipi(cpu)) {
116 trace_ipi_send_cpu(cpu, _RET_IP_,
117 generic_smp_call_function_single_interrupt);
118 arch_send_call_function_single_ipi(cpu);
119 }
120}
121
122static __always_inline void
123send_call_function_ipi_mask(struct cpumask *mask)
124{
125 trace_ipi_send_cpumask(mask, _RET_IP_,
126 generic_smp_call_function_single_interrupt);
127 arch_send_call_function_ipi_mask(mask);
128}
129
130static __always_inline void
131csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
132{
133 trace_csd_function_entry(func, csd);
134 func(info);
135 trace_csd_function_exit(func, csd);
136}
137
138#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
139
140static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
141
142/*
143 * Parse the csdlock_debug= kernel boot parameter.
144 *
145 * If you need to restore the old "ext" value that once provided
146 * additional debugging information, reapply the following commits:
147 *
148 * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
149 * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
150 */
151static int __init csdlock_debug(char *str)
152{
153 int ret;
154 unsigned int val = 0;
155
156 ret = get_option(&str, &val);
157 if (ret) {
158 if (val)
159 static_branch_enable(&csdlock_debug_enabled);
160 else
161 static_branch_disable(&csdlock_debug_enabled);
162 }
163
164 return 1;
165}
166__setup("csdlock_debug=", csdlock_debug);
167
168static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
169static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
170static DEFINE_PER_CPU(void *, cur_csd_info);
171
172static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
173module_param(csd_lock_timeout, ulong, 0444);
174static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
175module_param(panic_on_ipistall, int, 0444);
176
177static atomic_t csd_bug_count = ATOMIC_INIT(0);
178
179/* Record current CSD work for current CPU, NULL to erase. */
180static void __csd_lock_record(call_single_data_t *csd)
181{
182 if (!csd) {
183 smp_mb(); /* NULL cur_csd after unlock. */
184 __this_cpu_write(cur_csd, NULL);
185 return;
186 }
187 __this_cpu_write(cur_csd_func, csd->func);
188 __this_cpu_write(cur_csd_info, csd->info);
189 smp_wmb(); /* func and info before csd. */
190 __this_cpu_write(cur_csd, csd);
191 smp_mb(); /* Update cur_csd before function call. */
192 /* Or before unlock, as the case may be. */
193}
194
195static __always_inline void csd_lock_record(call_single_data_t *csd)
196{
197 if (static_branch_unlikely(&csdlock_debug_enabled))
198 __csd_lock_record(csd);
199}
200
201static int csd_lock_wait_getcpu(call_single_data_t *csd)
202{
203 unsigned int csd_type;
204
205 csd_type = CSD_TYPE(csd);
206 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
207 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
208 return -1;
209}
210
211static atomic_t n_csd_lock_stuck;
212
213/**
214 * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
215 *
216 * Returns @true if a CSD-lock acquisition is stuck and has been stuck
217 * long enough for a "non-responsive CSD lock" message to be printed.
218 */
219bool csd_lock_is_stuck(void)
220{
221 return !!atomic_read(&n_csd_lock_stuck);
222}
223
224/*
225 * Complain if too much time spent waiting. Note that only
226 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
227 * so waiting on other types gets much less information.
228 */
229static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages)
230{
231 int cpu = -1;
232 int cpux;
233 bool firsttime;
234 u64 ts2, ts_delta;
235 call_single_data_t *cpu_cur_csd;
236 unsigned int flags = READ_ONCE(csd->node.u_flags);
237 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
238
239 if (!(flags & CSD_FLAG_LOCK)) {
240 if (!unlikely(*bug_id))
241 return true;
242 cpu = csd_lock_wait_getcpu(csd);
243 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
244 *bug_id, raw_smp_processor_id(), cpu);
245 atomic_dec(&n_csd_lock_stuck);
246 return true;
247 }
248
249 ts2 = ktime_get_mono_fast_ns();
250 /* How long since we last checked for a stuck CSD lock.*/
251 ts_delta = ts2 - *ts1;
252 if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
253 (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) ||
254 csd_lock_timeout_ns == 0))
255 return false;
256
257 if (ts0 > ts2) {
258 /* Our own sched_clock went backward; don't blame another CPU. */
259 ts_delta = ts0 - ts2;
260 pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta);
261 *ts1 = ts2;
262 return false;
263 }
264
265 firsttime = !*bug_id;
266 if (firsttime)
267 *bug_id = atomic_inc_return(&csd_bug_count);
268 cpu = csd_lock_wait_getcpu(csd);
269 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
270 cpux = 0;
271 else
272 cpux = cpu;
273 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
274 /* How long since this CSD lock was stuck. */
275 ts_delta = ts2 - ts0;
276 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n",
277 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta,
278 cpu, csd->func, csd->info);
279 (*nmessages)++;
280 if (firsttime)
281 atomic_inc(&n_csd_lock_stuck);
282 /*
283 * If the CSD lock is still stuck after 5 minutes, it is unlikely
284 * to become unstuck. Use a signed comparison to avoid triggering
285 * on underflows when the TSC is out of sync between sockets.
286 */
287 BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
288 if (cpu_cur_csd && csd != cpu_cur_csd) {
289 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
290 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
291 READ_ONCE(per_cpu(cur_csd_info, cpux)));
292 } else {
293 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
294 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
295 }
296 if (cpu >= 0) {
297 if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
298 dump_cpu_task(cpu);
299 if (!cpu_cur_csd) {
300 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
301 arch_send_call_function_single_ipi(cpu);
302 }
303 }
304 if (firsttime)
305 dump_stack();
306 *ts1 = ts2;
307
308 return false;
309}
310
311/*
312 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
313 *
314 * For non-synchronous ipi calls the csd can still be in use by the
315 * previous function call. For multi-cpu calls its even more interesting
316 * as we'll have to ensure no other cpu is observing our csd.
317 */
318static void __csd_lock_wait(call_single_data_t *csd)
319{
320 unsigned long nmessages = 0;
321 int bug_id = 0;
322 u64 ts0, ts1;
323
324 ts1 = ts0 = ktime_get_mono_fast_ns();
325 for (;;) {
326 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
327 break;
328 cpu_relax();
329 }
330 smp_acquire__after_ctrl_dep();
331}
332
333static __always_inline void csd_lock_wait(call_single_data_t *csd)
334{
335 if (static_branch_unlikely(&csdlock_debug_enabled)) {
336 __csd_lock_wait(csd);
337 return;
338 }
339
340 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
341}
342#else
343static void csd_lock_record(call_single_data_t *csd)
344{
345}
346
347static __always_inline void csd_lock_wait(call_single_data_t *csd)
348{
349 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
350}
351#endif
352
353static __always_inline void csd_lock(call_single_data_t *csd)
354{
355 csd_lock_wait(csd);
356 csd->node.u_flags |= CSD_FLAG_LOCK;
357
358 /*
359 * prevent CPU from reordering the above assignment
360 * to ->flags with any subsequent assignments to other
361 * fields of the specified call_single_data_t structure:
362 */
363 smp_wmb();
364}
365
366static __always_inline void csd_unlock(call_single_data_t *csd)
367{
368 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
369
370 /*
371 * ensure we're all done before releasing data:
372 */
373 smp_store_release(&csd->node.u_flags, 0);
374}
375
376static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
377
378void __smp_call_single_queue(int cpu, struct llist_node *node)
379{
380 /*
381 * We have to check the type of the CSD before queueing it, because
382 * once queued it can have its flags cleared by
383 * flush_smp_call_function_queue()
384 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
385 * executes migration_cpu_stop() on the remote CPU).
386 */
387 if (trace_csd_queue_cpu_enabled()) {
388 call_single_data_t *csd;
389 smp_call_func_t func;
390
391 csd = container_of(node, call_single_data_t, node.llist);
392 func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
393 sched_ttwu_pending : csd->func;
394
395 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
396 }
397
398 /*
399 * The list addition should be visible to the target CPU when it pops
400 * the head of the list to pull the entry off it in the IPI handler
401 * because of normal cache coherency rules implied by the underlying
402 * llist ops.
403 *
404 * If IPIs can go out of order to the cache coherency protocol
405 * in an architecture, sufficient synchronisation should be added
406 * to arch code to make it appear to obey cache coherency WRT
407 * locking and barrier primitives. Generic code isn't really
408 * equipped to do the right thing...
409 */
410 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
411 send_call_function_single_ipi(cpu);
412}
413
414/*
415 * Insert a previously allocated call_single_data_t element
416 * for execution on the given CPU. data must already have
417 * ->func, ->info, and ->flags set.
418 */
419static int generic_exec_single(int cpu, call_single_data_t *csd)
420{
421 if (cpu == smp_processor_id()) {
422 smp_call_func_t func = csd->func;
423 void *info = csd->info;
424 unsigned long flags;
425
426 /*
427 * We can unlock early even for the synchronous on-stack case,
428 * since we're doing this from the same CPU..
429 */
430 csd_lock_record(csd);
431 csd_unlock(csd);
432 local_irq_save(flags);
433 csd_do_func(func, info, NULL);
434 csd_lock_record(NULL);
435 local_irq_restore(flags);
436 return 0;
437 }
438
439 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
440 csd_unlock(csd);
441 return -ENXIO;
442 }
443
444 __smp_call_single_queue(cpu, &csd->node.llist);
445
446 return 0;
447}
448
449/**
450 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
451 *
452 * Invoked by arch to handle an IPI for call function single.
453 * Must be called with interrupts disabled.
454 */
455void generic_smp_call_function_single_interrupt(void)
456{
457 __flush_smp_call_function_queue(true);
458}
459
460/**
461 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
462 *
463 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
464 * offline CPU. Skip this check if set to 'false'.
465 *
466 * Flush any pending smp-call-function callbacks queued on this CPU. This is
467 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
468 * to ensure that all pending IPI callbacks are run before it goes completely
469 * offline.
470 *
471 * Loop through the call_single_queue and run all the queued callbacks.
472 * Must be called with interrupts disabled.
473 */
474static void __flush_smp_call_function_queue(bool warn_cpu_offline)
475{
476 call_single_data_t *csd, *csd_next;
477 struct llist_node *entry, *prev;
478 struct llist_head *head;
479 static bool warned;
480 atomic_t *tbt;
481
482 lockdep_assert_irqs_disabled();
483
484 /* Allow waiters to send backtrace NMI from here onwards */
485 tbt = this_cpu_ptr(&trigger_backtrace);
486 atomic_set_release(tbt, 1);
487
488 head = this_cpu_ptr(&call_single_queue);
489 entry = llist_del_all(head);
490 entry = llist_reverse_order(entry);
491
492 /* There shouldn't be any pending callbacks on an offline CPU. */
493 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
494 !warned && entry != NULL)) {
495 warned = true;
496 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
497
498 /*
499 * We don't have to use the _safe() variant here
500 * because we are not invoking the IPI handlers yet.
501 */
502 llist_for_each_entry(csd, entry, node.llist) {
503 switch (CSD_TYPE(csd)) {
504 case CSD_TYPE_ASYNC:
505 case CSD_TYPE_SYNC:
506 case CSD_TYPE_IRQ_WORK:
507 pr_warn("IPI callback %pS sent to offline CPU\n",
508 csd->func);
509 break;
510
511 case CSD_TYPE_TTWU:
512 pr_warn("IPI task-wakeup sent to offline CPU\n");
513 break;
514
515 default:
516 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
517 CSD_TYPE(csd));
518 break;
519 }
520 }
521 }
522
523 /*
524 * First; run all SYNC callbacks, people are waiting for us.
525 */
526 prev = NULL;
527 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
528 /* Do we wait until *after* callback? */
529 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
530 smp_call_func_t func = csd->func;
531 void *info = csd->info;
532
533 if (prev) {
534 prev->next = &csd_next->node.llist;
535 } else {
536 entry = &csd_next->node.llist;
537 }
538
539 csd_lock_record(csd);
540 csd_do_func(func, info, csd);
541 csd_unlock(csd);
542 csd_lock_record(NULL);
543 } else {
544 prev = &csd->node.llist;
545 }
546 }
547
548 if (!entry)
549 return;
550
551 /*
552 * Second; run all !SYNC callbacks.
553 */
554 prev = NULL;
555 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
556 int type = CSD_TYPE(csd);
557
558 if (type != CSD_TYPE_TTWU) {
559 if (prev) {
560 prev->next = &csd_next->node.llist;
561 } else {
562 entry = &csd_next->node.llist;
563 }
564
565 if (type == CSD_TYPE_ASYNC) {
566 smp_call_func_t func = csd->func;
567 void *info = csd->info;
568
569 csd_lock_record(csd);
570 csd_unlock(csd);
571 csd_do_func(func, info, csd);
572 csd_lock_record(NULL);
573 } else if (type == CSD_TYPE_IRQ_WORK) {
574 irq_work_single(csd);
575 }
576
577 } else {
578 prev = &csd->node.llist;
579 }
580 }
581
582 /*
583 * Third; only CSD_TYPE_TTWU is left, issue those.
584 */
585 if (entry) {
586 csd = llist_entry(entry, typeof(*csd), node.llist);
587 csd_do_func(sched_ttwu_pending, entry, csd);
588 }
589}
590
591
592/**
593 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
594 * from task context (idle, migration thread)
595 *
596 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
597 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
598 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
599 * handle queued SMP function calls before scheduling.
600 *
601 * The migration thread has to ensure that an eventually pending wakeup has
602 * been handled before it migrates a task.
603 */
604void flush_smp_call_function_queue(void)
605{
606 unsigned int was_pending;
607 unsigned long flags;
608
609 if (llist_empty(this_cpu_ptr(&call_single_queue)))
610 return;
611
612 local_irq_save(flags);
613 /* Get the already pending soft interrupts for RT enabled kernels */
614 was_pending = local_softirq_pending();
615 __flush_smp_call_function_queue(true);
616 if (local_softirq_pending())
617 do_softirq_post_smp_call_flush(was_pending);
618
619 local_irq_restore(flags);
620}
621
622/*
623 * smp_call_function_single - Run a function on a specific CPU
624 * @func: The function to run. This must be fast and non-blocking.
625 * @info: An arbitrary pointer to pass to the function.
626 * @wait: If true, wait until function has completed on other CPUs.
627 *
628 * Returns 0 on success, else a negative status code.
629 */
630int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
631 int wait)
632{
633 call_single_data_t *csd;
634 call_single_data_t csd_stack = {
635 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
636 };
637 int this_cpu;
638 int err;
639
640 /*
641 * prevent preemption and reschedule on another processor,
642 * as well as CPU removal
643 */
644 this_cpu = get_cpu();
645
646 /*
647 * Can deadlock when called with interrupts disabled.
648 * We allow cpu's that are not yet online though, as no one else can
649 * send smp call function interrupt to this cpu and as such deadlocks
650 * can't happen.
651 */
652 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
653 && !oops_in_progress);
654
655 /*
656 * When @wait we can deadlock when we interrupt between llist_add() and
657 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
658 * csd_lock() on because the interrupt context uses the same csd
659 * storage.
660 */
661 WARN_ON_ONCE(!in_task());
662
663 csd = &csd_stack;
664 if (!wait) {
665 csd = this_cpu_ptr(&csd_data);
666 csd_lock(csd);
667 }
668
669 csd->func = func;
670 csd->info = info;
671#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
672 csd->node.src = smp_processor_id();
673 csd->node.dst = cpu;
674#endif
675
676 err = generic_exec_single(cpu, csd);
677
678 if (wait)
679 csd_lock_wait(csd);
680
681 put_cpu();
682
683 return err;
684}
685EXPORT_SYMBOL(smp_call_function_single);
686
687/**
688 * smp_call_function_single_async() - Run an asynchronous function on a
689 * specific CPU.
690 * @cpu: The CPU to run on.
691 * @csd: Pre-allocated and setup data structure
692 *
693 * Like smp_call_function_single(), but the call is asynchonous and
694 * can thus be done from contexts with disabled interrupts.
695 *
696 * The caller passes his own pre-allocated data structure
697 * (ie: embedded in an object) and is responsible for synchronizing it
698 * such that the IPIs performed on the @csd are strictly serialized.
699 *
700 * If the function is called with one csd which has not yet been
701 * processed by previous call to smp_call_function_single_async(), the
702 * function will return immediately with -EBUSY showing that the csd
703 * object is still in progress.
704 *
705 * NOTE: Be careful, there is unfortunately no current debugging facility to
706 * validate the correctness of this serialization.
707 *
708 * Return: %0 on success or negative errno value on error
709 */
710int smp_call_function_single_async(int cpu, call_single_data_t *csd)
711{
712 int err = 0;
713
714 preempt_disable();
715
716 if (csd->node.u_flags & CSD_FLAG_LOCK) {
717 err = -EBUSY;
718 goto out;
719 }
720
721 csd->node.u_flags = CSD_FLAG_LOCK;
722 smp_wmb();
723
724 err = generic_exec_single(cpu, csd);
725
726out:
727 preempt_enable();
728
729 return err;
730}
731EXPORT_SYMBOL_GPL(smp_call_function_single_async);
732
733/*
734 * smp_call_function_any - Run a function on any of the given cpus
735 * @mask: The mask of cpus it can run on.
736 * @func: The function to run. This must be fast and non-blocking.
737 * @info: An arbitrary pointer to pass to the function.
738 * @wait: If true, wait until function has completed.
739 *
740 * Returns 0 on success, else a negative status code (if no cpus were online).
741 *
742 * Selection preference:
743 * 1) current cpu if in @mask
744 * 2) any cpu of current node if in @mask
745 * 3) any other online cpu in @mask
746 */
747int smp_call_function_any(const struct cpumask *mask,
748 smp_call_func_t func, void *info, int wait)
749{
750 unsigned int cpu;
751 const struct cpumask *nodemask;
752 int ret;
753
754 /* Try for same CPU (cheapest) */
755 cpu = get_cpu();
756 if (cpumask_test_cpu(cpu, mask))
757 goto call;
758
759 /* Try for same node. */
760 nodemask = cpumask_of_node(cpu_to_node(cpu));
761 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
762 cpu = cpumask_next_and(cpu, nodemask, mask)) {
763 if (cpu_online(cpu))
764 goto call;
765 }
766
767 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
768 cpu = cpumask_any_and(mask, cpu_online_mask);
769call:
770 ret = smp_call_function_single(cpu, func, info, wait);
771 put_cpu();
772 return ret;
773}
774EXPORT_SYMBOL_GPL(smp_call_function_any);
775
776/*
777 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
778 *
779 * %SCF_WAIT: Wait until function execution is completed
780 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
781 */
782#define SCF_WAIT (1U << 0)
783#define SCF_RUN_LOCAL (1U << 1)
784
785static void smp_call_function_many_cond(const struct cpumask *mask,
786 smp_call_func_t func, void *info,
787 unsigned int scf_flags,
788 smp_cond_func_t cond_func)
789{
790 int cpu, last_cpu, this_cpu = smp_processor_id();
791 struct call_function_data *cfd;
792 bool wait = scf_flags & SCF_WAIT;
793 int nr_cpus = 0;
794 bool run_remote = false;
795 bool run_local = false;
796
797 lockdep_assert_preemption_disabled();
798
799 /*
800 * Can deadlock when called with interrupts disabled.
801 * We allow cpu's that are not yet online though, as no one else can
802 * send smp call function interrupt to this cpu and as such deadlocks
803 * can't happen.
804 */
805 if (cpu_online(this_cpu) && !oops_in_progress &&
806 !early_boot_irqs_disabled)
807 lockdep_assert_irqs_enabled();
808
809 /*
810 * When @wait we can deadlock when we interrupt between llist_add() and
811 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
812 * csd_lock() on because the interrupt context uses the same csd
813 * storage.
814 */
815 WARN_ON_ONCE(!in_task());
816
817 /* Check if we need local execution. */
818 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
819 run_local = true;
820
821 /* Check if we need remote execution, i.e., any CPU excluding this one. */
822 cpu = cpumask_first_and(mask, cpu_online_mask);
823 if (cpu == this_cpu)
824 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
825 if (cpu < nr_cpu_ids)
826 run_remote = true;
827
828 if (run_remote) {
829 cfd = this_cpu_ptr(&cfd_data);
830 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
831 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
832
833 cpumask_clear(cfd->cpumask_ipi);
834 for_each_cpu(cpu, cfd->cpumask) {
835 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
836
837 if (cond_func && !cond_func(cpu, info)) {
838 __cpumask_clear_cpu(cpu, cfd->cpumask);
839 continue;
840 }
841
842 csd_lock(csd);
843 if (wait)
844 csd->node.u_flags |= CSD_TYPE_SYNC;
845 csd->func = func;
846 csd->info = info;
847#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
848 csd->node.src = smp_processor_id();
849 csd->node.dst = cpu;
850#endif
851 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
852
853 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
854 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
855 nr_cpus++;
856 last_cpu = cpu;
857 }
858 }
859
860 /*
861 * Choose the most efficient way to send an IPI. Note that the
862 * number of CPUs might be zero due to concurrent changes to the
863 * provided mask.
864 */
865 if (nr_cpus == 1)
866 send_call_function_single_ipi(last_cpu);
867 else if (likely(nr_cpus > 1))
868 send_call_function_ipi_mask(cfd->cpumask_ipi);
869 }
870
871 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
872 unsigned long flags;
873
874 local_irq_save(flags);
875 csd_do_func(func, info, NULL);
876 local_irq_restore(flags);
877 }
878
879 if (run_remote && wait) {
880 for_each_cpu(cpu, cfd->cpumask) {
881 call_single_data_t *csd;
882
883 csd = per_cpu_ptr(cfd->csd, cpu);
884 csd_lock_wait(csd);
885 }
886 }
887}
888
889/**
890 * smp_call_function_many(): Run a function on a set of CPUs.
891 * @mask: The set of cpus to run on (only runs on online subset).
892 * @func: The function to run. This must be fast and non-blocking.
893 * @info: An arbitrary pointer to pass to the function.
894 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
895 * (atomically) until function has completed on other CPUs. If
896 * %SCF_RUN_LOCAL is set, the function will also be run locally
897 * if the local CPU is set in the @cpumask.
898 *
899 * If @wait is true, then returns once @func has returned.
900 *
901 * You must not call this function with disabled interrupts or from a
902 * hardware interrupt handler or from a bottom half handler. Preemption
903 * must be disabled when calling this function.
904 */
905void smp_call_function_many(const struct cpumask *mask,
906 smp_call_func_t func, void *info, bool wait)
907{
908 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
909}
910EXPORT_SYMBOL(smp_call_function_many);
911
912/**
913 * smp_call_function(): Run a function on all other CPUs.
914 * @func: The function to run. This must be fast and non-blocking.
915 * @info: An arbitrary pointer to pass to the function.
916 * @wait: If true, wait (atomically) until function has completed
917 * on other CPUs.
918 *
919 * Returns 0.
920 *
921 * If @wait is true, then returns once @func has returned; otherwise
922 * it returns just before the target cpu calls @func.
923 *
924 * You must not call this function with disabled interrupts or from a
925 * hardware interrupt handler or from a bottom half handler.
926 */
927void smp_call_function(smp_call_func_t func, void *info, int wait)
928{
929 preempt_disable();
930 smp_call_function_many(cpu_online_mask, func, info, wait);
931 preempt_enable();
932}
933EXPORT_SYMBOL(smp_call_function);
934
935/* Setup configured maximum number of CPUs to activate */
936unsigned int setup_max_cpus = NR_CPUS;
937EXPORT_SYMBOL(setup_max_cpus);
938
939
940/*
941 * Setup routine for controlling SMP activation
942 *
943 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
944 * activation entirely (the MPS table probe still happens, though).
945 *
946 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
947 * greater than 0, limits the maximum number of CPUs activated in
948 * SMP mode to <NUM>.
949 */
950
951void __weak __init arch_disable_smp_support(void) { }
952
953static int __init nosmp(char *str)
954{
955 setup_max_cpus = 0;
956 arch_disable_smp_support();
957
958 return 0;
959}
960
961early_param("nosmp", nosmp);
962
963/* this is hard limit */
964static int __init nrcpus(char *str)
965{
966 int nr_cpus;
967
968 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
969 set_nr_cpu_ids(nr_cpus);
970
971 return 0;
972}
973
974early_param("nr_cpus", nrcpus);
975
976static int __init maxcpus(char *str)
977{
978 get_option(&str, &setup_max_cpus);
979 if (setup_max_cpus == 0)
980 arch_disable_smp_support();
981
982 return 0;
983}
984
985early_param("maxcpus", maxcpus);
986
987#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
988/* Setup number of possible processor ids */
989unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
990EXPORT_SYMBOL(nr_cpu_ids);
991#endif
992
993/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
994void __init setup_nr_cpu_ids(void)
995{
996 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
997}
998
999/* Called by boot processor to activate the rest. */
1000void __init smp_init(void)
1001{
1002 int num_nodes, num_cpus;
1003
1004 idle_threads_init();
1005 cpuhp_threads_init();
1006
1007 pr_info("Bringing up secondary CPUs ...\n");
1008
1009 bringup_nonboot_cpus(setup_max_cpus);
1010
1011 num_nodes = num_online_nodes();
1012 num_cpus = num_online_cpus();
1013 pr_info("Brought up %d node%s, %d CPU%s\n",
1014 num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus));
1015
1016 /* Any cleanup work */
1017 smp_cpus_done(setup_max_cpus);
1018}
1019
1020/*
1021 * on_each_cpu_cond(): Call a function on each processor for which
1022 * the supplied function cond_func returns true, optionally waiting
1023 * for all the required CPUs to finish. This may include the local
1024 * processor.
1025 * @cond_func: A callback function that is passed a cpu id and
1026 * the info parameter. The function is called
1027 * with preemption disabled. The function should
1028 * return a blooean value indicating whether to IPI
1029 * the specified CPU.
1030 * @func: The function to run on all applicable CPUs.
1031 * This must be fast and non-blocking.
1032 * @info: An arbitrary pointer to pass to both functions.
1033 * @wait: If true, wait (atomically) until function has
1034 * completed on other CPUs.
1035 *
1036 * Preemption is disabled to protect against CPUs going offline but not online.
1037 * CPUs going online during the call will not be seen or sent an IPI.
1038 *
1039 * You must not call this function with disabled interrupts or
1040 * from a hardware interrupt handler or from a bottom half handler.
1041 */
1042void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1043 void *info, bool wait, const struct cpumask *mask)
1044{
1045 unsigned int scf_flags = SCF_RUN_LOCAL;
1046
1047 if (wait)
1048 scf_flags |= SCF_WAIT;
1049
1050 preempt_disable();
1051 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1052 preempt_enable();
1053}
1054EXPORT_SYMBOL(on_each_cpu_cond_mask);
1055
1056static void do_nothing(void *unused)
1057{
1058}
1059
1060/**
1061 * kick_all_cpus_sync - Force all cpus out of idle
1062 *
1063 * Used to synchronize the update of pm_idle function pointer. It's
1064 * called after the pointer is updated and returns after the dummy
1065 * callback function has been executed on all cpus. The execution of
1066 * the function can only happen on the remote cpus after they have
1067 * left the idle function which had been called via pm_idle function
1068 * pointer. So it's guaranteed that nothing uses the previous pointer
1069 * anymore.
1070 */
1071void kick_all_cpus_sync(void)
1072{
1073 /* Make sure the change is visible before we kick the cpus */
1074 smp_mb();
1075 smp_call_function(do_nothing, NULL, 1);
1076}
1077EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1078
1079/**
1080 * wake_up_all_idle_cpus - break all cpus out of idle
1081 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1082 * including idle polling cpus, for non-idle cpus, we will do nothing
1083 * for them.
1084 */
1085void wake_up_all_idle_cpus(void)
1086{
1087 int cpu;
1088
1089 for_each_possible_cpu(cpu) {
1090 preempt_disable();
1091 if (cpu != smp_processor_id() && cpu_online(cpu))
1092 wake_up_if_idle(cpu);
1093 preempt_enable();
1094 }
1095}
1096EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1097
1098/**
1099 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1100 * @work: &work_struct
1101 * @done: &completion to signal
1102 * @func: function to call
1103 * @data: function's data argument
1104 * @ret: return value from @func
1105 * @cpu: target CPU (%-1 for any CPU)
1106 *
1107 * Used to call a function on a specific cpu and wait for it to return.
1108 * Optionally make sure the call is done on a specified physical cpu via vcpu
1109 * pinning in order to support virtualized environments.
1110 */
1111struct smp_call_on_cpu_struct {
1112 struct work_struct work;
1113 struct completion done;
1114 int (*func)(void *);
1115 void *data;
1116 int ret;
1117 int cpu;
1118};
1119
1120static void smp_call_on_cpu_callback(struct work_struct *work)
1121{
1122 struct smp_call_on_cpu_struct *sscs;
1123
1124 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1125 if (sscs->cpu >= 0)
1126 hypervisor_pin_vcpu(sscs->cpu);
1127 sscs->ret = sscs->func(sscs->data);
1128 if (sscs->cpu >= 0)
1129 hypervisor_pin_vcpu(-1);
1130
1131 complete(&sscs->done);
1132}
1133
1134int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1135{
1136 struct smp_call_on_cpu_struct sscs = {
1137 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1138 .func = func,
1139 .data = par,
1140 .cpu = phys ? cpu : -1,
1141 };
1142
1143 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1144
1145 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1146 return -ENXIO;
1147
1148 queue_work_on(cpu, system_wq, &sscs.work);
1149 wait_for_completion(&sscs.done);
1150 destroy_work_on_stack(&sscs.work);
1151
1152 return sscs.ret;
1153}
1154EXPORT_SYMBOL_GPL(smp_call_on_cpu);