Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/irq_work.h>
11#include <linux/rcupdate.h>
12#include <linux/rculist.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/gfp.h>
19#include <linux/smp.h>
20#include <linux/cpu.h>
21#include <linux/sched.h>
22#include <linux/sched/idle.h>
23#include <linux/hypervisor.h>
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
27#include <linux/jump_label.h>
28
29#include "smpboot.h"
30#include "sched/smp.h"
31
32#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
33
34#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
35union cfd_seq_cnt {
36 u64 val;
37 struct {
38 u64 src:16;
39 u64 dst:16;
40#define CFD_SEQ_NOCPU 0xffff
41 u64 type:4;
42#define CFD_SEQ_QUEUE 0
43#define CFD_SEQ_IPI 1
44#define CFD_SEQ_NOIPI 2
45#define CFD_SEQ_PING 3
46#define CFD_SEQ_PINGED 4
47#define CFD_SEQ_HANDLE 5
48#define CFD_SEQ_DEQUEUE 6
49#define CFD_SEQ_IDLE 7
50#define CFD_SEQ_GOTIPI 8
51#define CFD_SEQ_HDLEND 9
52 u64 cnt:28;
53 } u;
54};
55
56static char *seq_type[] = {
57 [CFD_SEQ_QUEUE] = "queue",
58 [CFD_SEQ_IPI] = "ipi",
59 [CFD_SEQ_NOIPI] = "noipi",
60 [CFD_SEQ_PING] = "ping",
61 [CFD_SEQ_PINGED] = "pinged",
62 [CFD_SEQ_HANDLE] = "handle",
63 [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
64 [CFD_SEQ_IDLE] = "idle",
65 [CFD_SEQ_GOTIPI] = "gotipi",
66 [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
67};
68
69struct cfd_seq_local {
70 u64 ping;
71 u64 pinged;
72 u64 handle;
73 u64 dequeue;
74 u64 idle;
75 u64 gotipi;
76 u64 hdlend;
77};
78#endif
79
80struct cfd_percpu {
81 call_single_data_t csd;
82#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
83 u64 seq_queue;
84 u64 seq_ipi;
85 u64 seq_noipi;
86#endif
87};
88
89struct call_function_data {
90 struct cfd_percpu __percpu *pcpu;
91 cpumask_var_t cpumask;
92 cpumask_var_t cpumask_ipi;
93};
94
95static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
96
97static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
98
99static void __flush_smp_call_function_queue(bool warn_cpu_offline);
100
101int smpcfd_prepare_cpu(unsigned int cpu)
102{
103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
104
105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
106 cpu_to_node(cpu)))
107 return -ENOMEM;
108 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
109 cpu_to_node(cpu))) {
110 free_cpumask_var(cfd->cpumask);
111 return -ENOMEM;
112 }
113 cfd->pcpu = alloc_percpu(struct cfd_percpu);
114 if (!cfd->pcpu) {
115 free_cpumask_var(cfd->cpumask);
116 free_cpumask_var(cfd->cpumask_ipi);
117 return -ENOMEM;
118 }
119
120 return 0;
121}
122
123int smpcfd_dead_cpu(unsigned int cpu)
124{
125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
126
127 free_cpumask_var(cfd->cpumask);
128 free_cpumask_var(cfd->cpumask_ipi);
129 free_percpu(cfd->pcpu);
130 return 0;
131}
132
133int smpcfd_dying_cpu(unsigned int cpu)
134{
135 /*
136 * The IPIs for the smp-call-function callbacks queued by other
137 * CPUs might arrive late, either due to hardware latencies or
138 * because this CPU disabled interrupts (inside stop-machine)
139 * before the IPIs were sent. So flush out any pending callbacks
140 * explicitly (without waiting for the IPIs to arrive), to
141 * ensure that the outgoing CPU doesn't go offline with work
142 * still pending.
143 */
144 __flush_smp_call_function_queue(false);
145 irq_work_run();
146 return 0;
147}
148
149void __init call_function_init(void)
150{
151 int i;
152
153 for_each_possible_cpu(i)
154 init_llist_head(&per_cpu(call_single_queue, i));
155
156 smpcfd_prepare_cpu(smp_processor_id());
157}
158
159#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
160
161static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
162static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
163
164static int __init csdlock_debug(char *str)
165{
166 unsigned int val = 0;
167
168 if (str && !strcmp(str, "ext")) {
169 val = 1;
170 static_branch_enable(&csdlock_debug_extended);
171 } else
172 get_option(&str, &val);
173
174 if (val)
175 static_branch_enable(&csdlock_debug_enabled);
176
177 return 1;
178}
179__setup("csdlock_debug=", csdlock_debug);
180
181static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
182static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
183static DEFINE_PER_CPU(void *, cur_csd_info);
184static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
185
186static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
187module_param(csd_lock_timeout, ulong, 0444);
188
189static atomic_t csd_bug_count = ATOMIC_INIT(0);
190static u64 cfd_seq;
191
192#define CFD_SEQ(s, d, t, c) \
193 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
194
195static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
196{
197 union cfd_seq_cnt new, old;
198
199 new = CFD_SEQ(src, dst, type, 0);
200
201 do {
202 old.val = READ_ONCE(cfd_seq);
203 new.u.cnt = old.u.cnt + 1;
204 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
205
206 return old.val;
207}
208
209#define cfd_seq_store(var, src, dst, type) \
210 do { \
211 if (static_branch_unlikely(&csdlock_debug_extended)) \
212 var = cfd_seq_inc(src, dst, type); \
213 } while (0)
214
215/* Record current CSD work for current CPU, NULL to erase. */
216static void __csd_lock_record(struct __call_single_data *csd)
217{
218 if (!csd) {
219 smp_mb(); /* NULL cur_csd after unlock. */
220 __this_cpu_write(cur_csd, NULL);
221 return;
222 }
223 __this_cpu_write(cur_csd_func, csd->func);
224 __this_cpu_write(cur_csd_info, csd->info);
225 smp_wmb(); /* func and info before csd. */
226 __this_cpu_write(cur_csd, csd);
227 smp_mb(); /* Update cur_csd before function call. */
228 /* Or before unlock, as the case may be. */
229}
230
231static __always_inline void csd_lock_record(struct __call_single_data *csd)
232{
233 if (static_branch_unlikely(&csdlock_debug_enabled))
234 __csd_lock_record(csd);
235}
236
237static int csd_lock_wait_getcpu(struct __call_single_data *csd)
238{
239 unsigned int csd_type;
240
241 csd_type = CSD_TYPE(csd);
242 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
243 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
244 return -1;
245}
246
247static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
248 unsigned int type, union cfd_seq_cnt *data,
249 unsigned int *n_data, unsigned int now)
250{
251 union cfd_seq_cnt new[2];
252 unsigned int i, j, k;
253
254 new[0].val = val;
255 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
256
257 for (i = 0; i < 2; i++) {
258 if (new[i].u.cnt <= now)
259 new[i].u.cnt |= 0x80000000U;
260 for (j = 0; j < *n_data; j++) {
261 if (new[i].u.cnt == data[j].u.cnt) {
262 /* Direct read value trumps generated one. */
263 if (i == 0)
264 data[j].val = new[i].val;
265 break;
266 }
267 if (new[i].u.cnt < data[j].u.cnt) {
268 for (k = *n_data; k > j; k--)
269 data[k].val = data[k - 1].val;
270 data[j].val = new[i].val;
271 (*n_data)++;
272 break;
273 }
274 }
275 if (j == *n_data) {
276 data[j].val = new[i].val;
277 (*n_data)++;
278 }
279 }
280}
281
282static const char *csd_lock_get_type(unsigned int type)
283{
284 return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
285}
286
287static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
288{
289 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
290 unsigned int srccpu = csd->node.src;
291 struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
292 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
293 unsigned int now;
294 union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
295 unsigned int n_data = 0, i;
296
297 data[0].val = READ_ONCE(cfd_seq);
298 now = data[0].u.cnt;
299
300 cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
301 cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
302 cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
303
304 cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
305 cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
306
307 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
308 cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
309 cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
310 cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
311 cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
312
313 for (i = 0; i < n_data; i++) {
314 pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
315 data[i].u.cnt & ~0x80000000U, data[i].u.src,
316 data[i].u.dst, csd_lock_get_type(data[i].u.type));
317 }
318 pr_alert("\tcsd: cnt now: %07x\n", now);
319}
320
321/*
322 * Complain if too much time spent waiting. Note that only
323 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
324 * so waiting on other types gets much less information.
325 */
326static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
327{
328 int cpu = -1;
329 int cpux;
330 bool firsttime;
331 u64 ts2, ts_delta;
332 call_single_data_t *cpu_cur_csd;
333 unsigned int flags = READ_ONCE(csd->node.u_flags);
334 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
335
336 if (!(flags & CSD_FLAG_LOCK)) {
337 if (!unlikely(*bug_id))
338 return true;
339 cpu = csd_lock_wait_getcpu(csd);
340 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
341 *bug_id, raw_smp_processor_id(), cpu);
342 return true;
343 }
344
345 ts2 = sched_clock();
346 ts_delta = ts2 - *ts1;
347 if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
348 return false;
349
350 firsttime = !*bug_id;
351 if (firsttime)
352 *bug_id = atomic_inc_return(&csd_bug_count);
353 cpu = csd_lock_wait_getcpu(csd);
354 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
355 cpux = 0;
356 else
357 cpux = cpu;
358 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
359 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
360 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
361 cpu, csd->func, csd->info);
362 if (cpu_cur_csd && csd != cpu_cur_csd) {
363 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
364 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
365 READ_ONCE(per_cpu(cur_csd_info, cpux)));
366 } else {
367 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
368 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
369 }
370 if (cpu >= 0) {
371 if (static_branch_unlikely(&csdlock_debug_extended))
372 csd_lock_print_extended(csd, cpu);
373 dump_cpu_task(cpu);
374 if (!cpu_cur_csd) {
375 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
376 arch_send_call_function_single_ipi(cpu);
377 }
378 }
379 dump_stack();
380 *ts1 = ts2;
381
382 return false;
383}
384
385/*
386 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
387 *
388 * For non-synchronous ipi calls the csd can still be in use by the
389 * previous function call. For multi-cpu calls its even more interesting
390 * as we'll have to ensure no other cpu is observing our csd.
391 */
392static void __csd_lock_wait(struct __call_single_data *csd)
393{
394 int bug_id = 0;
395 u64 ts0, ts1;
396
397 ts1 = ts0 = sched_clock();
398 for (;;) {
399 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
400 break;
401 cpu_relax();
402 }
403 smp_acquire__after_ctrl_dep();
404}
405
406static __always_inline void csd_lock_wait(struct __call_single_data *csd)
407{
408 if (static_branch_unlikely(&csdlock_debug_enabled)) {
409 __csd_lock_wait(csd);
410 return;
411 }
412
413 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
414}
415
416static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
417{
418 unsigned int this_cpu = smp_processor_id();
419 struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
420 struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
421 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
422
423 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
424 if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
425 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
426 cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
427 send_call_function_single_ipi(cpu);
428 cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
429 } else {
430 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
431 }
432}
433#else
434#define cfd_seq_store(var, src, dst, type)
435
436static void csd_lock_record(struct __call_single_data *csd)
437{
438}
439
440static __always_inline void csd_lock_wait(struct __call_single_data *csd)
441{
442 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
443}
444#endif
445
446static __always_inline void csd_lock(struct __call_single_data *csd)
447{
448 csd_lock_wait(csd);
449 csd->node.u_flags |= CSD_FLAG_LOCK;
450
451 /*
452 * prevent CPU from reordering the above assignment
453 * to ->flags with any subsequent assignments to other
454 * fields of the specified call_single_data_t structure:
455 */
456 smp_wmb();
457}
458
459static __always_inline void csd_unlock(struct __call_single_data *csd)
460{
461 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
462
463 /*
464 * ensure we're all done before releasing data:
465 */
466 smp_store_release(&csd->node.u_flags, 0);
467}
468
469static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
470
471void __smp_call_single_queue(int cpu, struct llist_node *node)
472{
473#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
474 if (static_branch_unlikely(&csdlock_debug_extended)) {
475 unsigned int type;
476
477 type = CSD_TYPE(container_of(node, call_single_data_t,
478 node.llist));
479 if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
480 __smp_call_single_queue_debug(cpu, node);
481 return;
482 }
483 }
484#endif
485
486 /*
487 * The list addition should be visible before sending the IPI
488 * handler locks the list to pull the entry off it because of
489 * normal cache coherency rules implied by spinlocks.
490 *
491 * If IPIs can go out of order to the cache coherency protocol
492 * in an architecture, sufficient synchronisation should be added
493 * to arch code to make it appear to obey cache coherency WRT
494 * locking and barrier primitives. Generic code isn't really
495 * equipped to do the right thing...
496 */
497 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
498 send_call_function_single_ipi(cpu);
499}
500
501/*
502 * Insert a previously allocated call_single_data_t element
503 * for execution on the given CPU. data must already have
504 * ->func, ->info, and ->flags set.
505 */
506static int generic_exec_single(int cpu, struct __call_single_data *csd)
507{
508 if (cpu == smp_processor_id()) {
509 smp_call_func_t func = csd->func;
510 void *info = csd->info;
511 unsigned long flags;
512
513 /*
514 * We can unlock early even for the synchronous on-stack case,
515 * since we're doing this from the same CPU..
516 */
517 csd_lock_record(csd);
518 csd_unlock(csd);
519 local_irq_save(flags);
520 func(info);
521 csd_lock_record(NULL);
522 local_irq_restore(flags);
523 return 0;
524 }
525
526 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
527 csd_unlock(csd);
528 return -ENXIO;
529 }
530
531 __smp_call_single_queue(cpu, &csd->node.llist);
532
533 return 0;
534}
535
536/**
537 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
538 *
539 * Invoked by arch to handle an IPI for call function single.
540 * Must be called with interrupts disabled.
541 */
542void generic_smp_call_function_single_interrupt(void)
543{
544 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
545 smp_processor_id(), CFD_SEQ_GOTIPI);
546 __flush_smp_call_function_queue(true);
547}
548
549/**
550 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
551 *
552 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
553 * offline CPU. Skip this check if set to 'false'.
554 *
555 * Flush any pending smp-call-function callbacks queued on this CPU. This is
556 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
557 * to ensure that all pending IPI callbacks are run before it goes completely
558 * offline.
559 *
560 * Loop through the call_single_queue and run all the queued callbacks.
561 * Must be called with interrupts disabled.
562 */
563static void __flush_smp_call_function_queue(bool warn_cpu_offline)
564{
565 call_single_data_t *csd, *csd_next;
566 struct llist_node *entry, *prev;
567 struct llist_head *head;
568 static bool warned;
569
570 lockdep_assert_irqs_disabled();
571
572 head = this_cpu_ptr(&call_single_queue);
573 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
574 smp_processor_id(), CFD_SEQ_HANDLE);
575 entry = llist_del_all(head);
576 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
577 /* Special meaning of source cpu: 0 == queue empty */
578 entry ? CFD_SEQ_NOCPU : 0,
579 smp_processor_id(), CFD_SEQ_DEQUEUE);
580 entry = llist_reverse_order(entry);
581
582 /* There shouldn't be any pending callbacks on an offline CPU. */
583 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
584 !warned && entry != NULL)) {
585 warned = true;
586 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
587
588 /*
589 * We don't have to use the _safe() variant here
590 * because we are not invoking the IPI handlers yet.
591 */
592 llist_for_each_entry(csd, entry, node.llist) {
593 switch (CSD_TYPE(csd)) {
594 case CSD_TYPE_ASYNC:
595 case CSD_TYPE_SYNC:
596 case CSD_TYPE_IRQ_WORK:
597 pr_warn("IPI callback %pS sent to offline CPU\n",
598 csd->func);
599 break;
600
601 case CSD_TYPE_TTWU:
602 pr_warn("IPI task-wakeup sent to offline CPU\n");
603 break;
604
605 default:
606 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
607 CSD_TYPE(csd));
608 break;
609 }
610 }
611 }
612
613 /*
614 * First; run all SYNC callbacks, people are waiting for us.
615 */
616 prev = NULL;
617 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
618 /* Do we wait until *after* callback? */
619 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
620 smp_call_func_t func = csd->func;
621 void *info = csd->info;
622
623 if (prev) {
624 prev->next = &csd_next->node.llist;
625 } else {
626 entry = &csd_next->node.llist;
627 }
628
629 csd_lock_record(csd);
630 func(info);
631 csd_unlock(csd);
632 csd_lock_record(NULL);
633 } else {
634 prev = &csd->node.llist;
635 }
636 }
637
638 if (!entry) {
639 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
640 0, smp_processor_id(),
641 CFD_SEQ_HDLEND);
642 return;
643 }
644
645 /*
646 * Second; run all !SYNC callbacks.
647 */
648 prev = NULL;
649 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
650 int type = CSD_TYPE(csd);
651
652 if (type != CSD_TYPE_TTWU) {
653 if (prev) {
654 prev->next = &csd_next->node.llist;
655 } else {
656 entry = &csd_next->node.llist;
657 }
658
659 if (type == CSD_TYPE_ASYNC) {
660 smp_call_func_t func = csd->func;
661 void *info = csd->info;
662
663 csd_lock_record(csd);
664 csd_unlock(csd);
665 func(info);
666 csd_lock_record(NULL);
667 } else if (type == CSD_TYPE_IRQ_WORK) {
668 irq_work_single(csd);
669 }
670
671 } else {
672 prev = &csd->node.llist;
673 }
674 }
675
676 /*
677 * Third; only CSD_TYPE_TTWU is left, issue those.
678 */
679 if (entry)
680 sched_ttwu_pending(entry);
681
682 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
683 smp_processor_id(), CFD_SEQ_HDLEND);
684}
685
686
687/**
688 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
689 * from task context (idle, migration thread)
690 *
691 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
692 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
693 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
694 * handle queued SMP function calls before scheduling.
695 *
696 * The migration thread has to ensure that an eventually pending wakeup has
697 * been handled before it migrates a task.
698 */
699void flush_smp_call_function_queue(void)
700{
701 unsigned int was_pending;
702 unsigned long flags;
703
704 if (llist_empty(this_cpu_ptr(&call_single_queue)))
705 return;
706
707 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
708 smp_processor_id(), CFD_SEQ_IDLE);
709 local_irq_save(flags);
710 /* Get the already pending soft interrupts for RT enabled kernels */
711 was_pending = local_softirq_pending();
712 __flush_smp_call_function_queue(true);
713 if (local_softirq_pending())
714 do_softirq_post_smp_call_flush(was_pending);
715
716 local_irq_restore(flags);
717}
718
719/*
720 * smp_call_function_single - Run a function on a specific CPU
721 * @func: The function to run. This must be fast and non-blocking.
722 * @info: An arbitrary pointer to pass to the function.
723 * @wait: If true, wait until function has completed on other CPUs.
724 *
725 * Returns 0 on success, else a negative status code.
726 */
727int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
728 int wait)
729{
730 call_single_data_t *csd;
731 call_single_data_t csd_stack = {
732 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
733 };
734 int this_cpu;
735 int err;
736
737 /*
738 * prevent preemption and reschedule on another processor,
739 * as well as CPU removal
740 */
741 this_cpu = get_cpu();
742
743 /*
744 * Can deadlock when called with interrupts disabled.
745 * We allow cpu's that are not yet online though, as no one else can
746 * send smp call function interrupt to this cpu and as such deadlocks
747 * can't happen.
748 */
749 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
750 && !oops_in_progress);
751
752 /*
753 * When @wait we can deadlock when we interrupt between llist_add() and
754 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
755 * csd_lock() on because the interrupt context uses the same csd
756 * storage.
757 */
758 WARN_ON_ONCE(!in_task());
759
760 csd = &csd_stack;
761 if (!wait) {
762 csd = this_cpu_ptr(&csd_data);
763 csd_lock(csd);
764 }
765
766 csd->func = func;
767 csd->info = info;
768#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
769 csd->node.src = smp_processor_id();
770 csd->node.dst = cpu;
771#endif
772
773 err = generic_exec_single(cpu, csd);
774
775 if (wait)
776 csd_lock_wait(csd);
777
778 put_cpu();
779
780 return err;
781}
782EXPORT_SYMBOL(smp_call_function_single);
783
784/**
785 * smp_call_function_single_async() - Run an asynchronous function on a
786 * specific CPU.
787 * @cpu: The CPU to run on.
788 * @csd: Pre-allocated and setup data structure
789 *
790 * Like smp_call_function_single(), but the call is asynchonous and
791 * can thus be done from contexts with disabled interrupts.
792 *
793 * The caller passes his own pre-allocated data structure
794 * (ie: embedded in an object) and is responsible for synchronizing it
795 * such that the IPIs performed on the @csd are strictly serialized.
796 *
797 * If the function is called with one csd which has not yet been
798 * processed by previous call to smp_call_function_single_async(), the
799 * function will return immediately with -EBUSY showing that the csd
800 * object is still in progress.
801 *
802 * NOTE: Be careful, there is unfortunately no current debugging facility to
803 * validate the correctness of this serialization.
804 *
805 * Return: %0 on success or negative errno value on error
806 */
807int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
808{
809 int err = 0;
810
811 preempt_disable();
812
813 if (csd->node.u_flags & CSD_FLAG_LOCK) {
814 err = -EBUSY;
815 goto out;
816 }
817
818 csd->node.u_flags = CSD_FLAG_LOCK;
819 smp_wmb();
820
821 err = generic_exec_single(cpu, csd);
822
823out:
824 preempt_enable();
825
826 return err;
827}
828EXPORT_SYMBOL_GPL(smp_call_function_single_async);
829
830/*
831 * smp_call_function_any - Run a function on any of the given cpus
832 * @mask: The mask of cpus it can run on.
833 * @func: The function to run. This must be fast and non-blocking.
834 * @info: An arbitrary pointer to pass to the function.
835 * @wait: If true, wait until function has completed.
836 *
837 * Returns 0 on success, else a negative status code (if no cpus were online).
838 *
839 * Selection preference:
840 * 1) current cpu if in @mask
841 * 2) any cpu of current node if in @mask
842 * 3) any other online cpu in @mask
843 */
844int smp_call_function_any(const struct cpumask *mask,
845 smp_call_func_t func, void *info, int wait)
846{
847 unsigned int cpu;
848 const struct cpumask *nodemask;
849 int ret;
850
851 /* Try for same CPU (cheapest) */
852 cpu = get_cpu();
853 if (cpumask_test_cpu(cpu, mask))
854 goto call;
855
856 /* Try for same node. */
857 nodemask = cpumask_of_node(cpu_to_node(cpu));
858 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
859 cpu = cpumask_next_and(cpu, nodemask, mask)) {
860 if (cpu_online(cpu))
861 goto call;
862 }
863
864 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
865 cpu = cpumask_any_and(mask, cpu_online_mask);
866call:
867 ret = smp_call_function_single(cpu, func, info, wait);
868 put_cpu();
869 return ret;
870}
871EXPORT_SYMBOL_GPL(smp_call_function_any);
872
873/*
874 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
875 *
876 * %SCF_WAIT: Wait until function execution is completed
877 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
878 */
879#define SCF_WAIT (1U << 0)
880#define SCF_RUN_LOCAL (1U << 1)
881
882static void smp_call_function_many_cond(const struct cpumask *mask,
883 smp_call_func_t func, void *info,
884 unsigned int scf_flags,
885 smp_cond_func_t cond_func)
886{
887 int cpu, last_cpu, this_cpu = smp_processor_id();
888 struct call_function_data *cfd;
889 bool wait = scf_flags & SCF_WAIT;
890 bool run_remote = false;
891 bool run_local = false;
892 int nr_cpus = 0;
893
894 lockdep_assert_preemption_disabled();
895
896 /*
897 * Can deadlock when called with interrupts disabled.
898 * We allow cpu's that are not yet online though, as no one else can
899 * send smp call function interrupt to this cpu and as such deadlocks
900 * can't happen.
901 */
902 if (cpu_online(this_cpu) && !oops_in_progress &&
903 !early_boot_irqs_disabled)
904 lockdep_assert_irqs_enabled();
905
906 /*
907 * When @wait we can deadlock when we interrupt between llist_add() and
908 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
909 * csd_lock() on because the interrupt context uses the same csd
910 * storage.
911 */
912 WARN_ON_ONCE(!in_task());
913
914 /* Check if we need local execution. */
915 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
916 run_local = true;
917
918 /* Check if we need remote execution, i.e., any CPU excluding this one. */
919 cpu = cpumask_first_and(mask, cpu_online_mask);
920 if (cpu == this_cpu)
921 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
922 if (cpu < nr_cpu_ids)
923 run_remote = true;
924
925 if (run_remote) {
926 cfd = this_cpu_ptr(&cfd_data);
927 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
928 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
929
930 cpumask_clear(cfd->cpumask_ipi);
931 for_each_cpu(cpu, cfd->cpumask) {
932 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
933 call_single_data_t *csd = &pcpu->csd;
934
935 if (cond_func && !cond_func(cpu, info))
936 continue;
937
938 csd_lock(csd);
939 if (wait)
940 csd->node.u_flags |= CSD_TYPE_SYNC;
941 csd->func = func;
942 csd->info = info;
943#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
944 csd->node.src = smp_processor_id();
945 csd->node.dst = cpu;
946#endif
947 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
948 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
949 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
950 nr_cpus++;
951 last_cpu = cpu;
952
953 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
954 } else {
955 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
956 }
957 }
958
959 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
960
961 /*
962 * Choose the most efficient way to send an IPI. Note that the
963 * number of CPUs might be zero due to concurrent changes to the
964 * provided mask.
965 */
966 if (nr_cpus == 1)
967 send_call_function_single_ipi(last_cpu);
968 else if (likely(nr_cpus > 1))
969 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
970
971 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
972 }
973
974 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
975 unsigned long flags;
976
977 local_irq_save(flags);
978 func(info);
979 local_irq_restore(flags);
980 }
981
982 if (run_remote && wait) {
983 for_each_cpu(cpu, cfd->cpumask) {
984 call_single_data_t *csd;
985
986 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
987 csd_lock_wait(csd);
988 }
989 }
990}
991
992/**
993 * smp_call_function_many(): Run a function on a set of CPUs.
994 * @mask: The set of cpus to run on (only runs on online subset).
995 * @func: The function to run. This must be fast and non-blocking.
996 * @info: An arbitrary pointer to pass to the function.
997 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
998 * (atomically) until function has completed on other CPUs. If
999 * %SCF_RUN_LOCAL is set, the function will also be run locally
1000 * if the local CPU is set in the @cpumask.
1001 *
1002 * If @wait is true, then returns once @func has returned.
1003 *
1004 * You must not call this function with disabled interrupts or from a
1005 * hardware interrupt handler or from a bottom half handler. Preemption
1006 * must be disabled when calling this function.
1007 */
1008void smp_call_function_many(const struct cpumask *mask,
1009 smp_call_func_t func, void *info, bool wait)
1010{
1011 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
1012}
1013EXPORT_SYMBOL(smp_call_function_many);
1014
1015/**
1016 * smp_call_function(): Run a function on all other CPUs.
1017 * @func: The function to run. This must be fast and non-blocking.
1018 * @info: An arbitrary pointer to pass to the function.
1019 * @wait: If true, wait (atomically) until function has completed
1020 * on other CPUs.
1021 *
1022 * Returns 0.
1023 *
1024 * If @wait is true, then returns once @func has returned; otherwise
1025 * it returns just before the target cpu calls @func.
1026 *
1027 * You must not call this function with disabled interrupts or from a
1028 * hardware interrupt handler or from a bottom half handler.
1029 */
1030void smp_call_function(smp_call_func_t func, void *info, int wait)
1031{
1032 preempt_disable();
1033 smp_call_function_many(cpu_online_mask, func, info, wait);
1034 preempt_enable();
1035}
1036EXPORT_SYMBOL(smp_call_function);
1037
1038/* Setup configured maximum number of CPUs to activate */
1039unsigned int setup_max_cpus = NR_CPUS;
1040EXPORT_SYMBOL(setup_max_cpus);
1041
1042
1043/*
1044 * Setup routine for controlling SMP activation
1045 *
1046 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
1047 * activation entirely (the MPS table probe still happens, though).
1048 *
1049 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
1050 * greater than 0, limits the maximum number of CPUs activated in
1051 * SMP mode to <NUM>.
1052 */
1053
1054void __weak arch_disable_smp_support(void) { }
1055
1056static int __init nosmp(char *str)
1057{
1058 setup_max_cpus = 0;
1059 arch_disable_smp_support();
1060
1061 return 0;
1062}
1063
1064early_param("nosmp", nosmp);
1065
1066/* this is hard limit */
1067static int __init nrcpus(char *str)
1068{
1069 int nr_cpus;
1070
1071 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
1072 set_nr_cpu_ids(nr_cpus);
1073
1074 return 0;
1075}
1076
1077early_param("nr_cpus", nrcpus);
1078
1079static int __init maxcpus(char *str)
1080{
1081 get_option(&str, &setup_max_cpus);
1082 if (setup_max_cpus == 0)
1083 arch_disable_smp_support();
1084
1085 return 0;
1086}
1087
1088early_param("maxcpus", maxcpus);
1089
1090#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
1091/* Setup number of possible processor ids */
1092unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
1093EXPORT_SYMBOL(nr_cpu_ids);
1094#endif
1095
1096/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
1097void __init setup_nr_cpu_ids(void)
1098{
1099 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
1100}
1101
1102/* Called by boot processor to activate the rest. */
1103void __init smp_init(void)
1104{
1105 int num_nodes, num_cpus;
1106
1107 idle_threads_init();
1108 cpuhp_threads_init();
1109
1110 pr_info("Bringing up secondary CPUs ...\n");
1111
1112 bringup_nonboot_cpus(setup_max_cpus);
1113
1114 num_nodes = num_online_nodes();
1115 num_cpus = num_online_cpus();
1116 pr_info("Brought up %d node%s, %d CPU%s\n",
1117 num_nodes, (num_nodes > 1 ? "s" : ""),
1118 num_cpus, (num_cpus > 1 ? "s" : ""));
1119
1120 /* Any cleanup work */
1121 smp_cpus_done(setup_max_cpus);
1122}
1123
1124/*
1125 * on_each_cpu_cond(): Call a function on each processor for which
1126 * the supplied function cond_func returns true, optionally waiting
1127 * for all the required CPUs to finish. This may include the local
1128 * processor.
1129 * @cond_func: A callback function that is passed a cpu id and
1130 * the info parameter. The function is called
1131 * with preemption disabled. The function should
1132 * return a blooean value indicating whether to IPI
1133 * the specified CPU.
1134 * @func: The function to run on all applicable CPUs.
1135 * This must be fast and non-blocking.
1136 * @info: An arbitrary pointer to pass to both functions.
1137 * @wait: If true, wait (atomically) until function has
1138 * completed on other CPUs.
1139 *
1140 * Preemption is disabled to protect against CPUs going offline but not online.
1141 * CPUs going online during the call will not be seen or sent an IPI.
1142 *
1143 * You must not call this function with disabled interrupts or
1144 * from a hardware interrupt handler or from a bottom half handler.
1145 */
1146void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1147 void *info, bool wait, const struct cpumask *mask)
1148{
1149 unsigned int scf_flags = SCF_RUN_LOCAL;
1150
1151 if (wait)
1152 scf_flags |= SCF_WAIT;
1153
1154 preempt_disable();
1155 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1156 preempt_enable();
1157}
1158EXPORT_SYMBOL(on_each_cpu_cond_mask);
1159
1160static void do_nothing(void *unused)
1161{
1162}
1163
1164/**
1165 * kick_all_cpus_sync - Force all cpus out of idle
1166 *
1167 * Used to synchronize the update of pm_idle function pointer. It's
1168 * called after the pointer is updated and returns after the dummy
1169 * callback function has been executed on all cpus. The execution of
1170 * the function can only happen on the remote cpus after they have
1171 * left the idle function which had been called via pm_idle function
1172 * pointer. So it's guaranteed that nothing uses the previous pointer
1173 * anymore.
1174 */
1175void kick_all_cpus_sync(void)
1176{
1177 /* Make sure the change is visible before we kick the cpus */
1178 smp_mb();
1179 smp_call_function(do_nothing, NULL, 1);
1180}
1181EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1182
1183/**
1184 * wake_up_all_idle_cpus - break all cpus out of idle
1185 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1186 * including idle polling cpus, for non-idle cpus, we will do nothing
1187 * for them.
1188 */
1189void wake_up_all_idle_cpus(void)
1190{
1191 int cpu;
1192
1193 for_each_possible_cpu(cpu) {
1194 preempt_disable();
1195 if (cpu != smp_processor_id() && cpu_online(cpu))
1196 wake_up_if_idle(cpu);
1197 preempt_enable();
1198 }
1199}
1200EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1201
1202/**
1203 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1204 * @work: &work_struct
1205 * @done: &completion to signal
1206 * @func: function to call
1207 * @data: function's data argument
1208 * @ret: return value from @func
1209 * @cpu: target CPU (%-1 for any CPU)
1210 *
1211 * Used to call a function on a specific cpu and wait for it to return.
1212 * Optionally make sure the call is done on a specified physical cpu via vcpu
1213 * pinning in order to support virtualized environments.
1214 */
1215struct smp_call_on_cpu_struct {
1216 struct work_struct work;
1217 struct completion done;
1218 int (*func)(void *);
1219 void *data;
1220 int ret;
1221 int cpu;
1222};
1223
1224static void smp_call_on_cpu_callback(struct work_struct *work)
1225{
1226 struct smp_call_on_cpu_struct *sscs;
1227
1228 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1229 if (sscs->cpu >= 0)
1230 hypervisor_pin_vcpu(sscs->cpu);
1231 sscs->ret = sscs->func(sscs->data);
1232 if (sscs->cpu >= 0)
1233 hypervisor_pin_vcpu(-1);
1234
1235 complete(&sscs->done);
1236}
1237
1238int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1239{
1240 struct smp_call_on_cpu_struct sscs = {
1241 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1242 .func = func,
1243 .data = par,
1244 .cpu = phys ? cpu : -1,
1245 };
1246
1247 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1248
1249 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1250 return -ENXIO;
1251
1252 queue_work_on(cpu, system_wq, &sscs.work);
1253 wait_for_completion(&sscs.done);
1254
1255 return sscs.ret;
1256}
1257EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/irq_work.h>
11#include <linux/rcupdate.h>
12#include <linux/rculist.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/gfp.h>
18#include <linux/smp.h>
19#include <linux/cpu.h>
20#include <linux/sched.h>
21#include <linux/sched/idle.h>
22#include <linux/hypervisor.h>
23
24#include "smpboot.h"
25
26enum {
27 CSD_FLAG_LOCK = 0x01,
28 CSD_FLAG_SYNCHRONOUS = 0x02,
29};
30
31struct call_function_data {
32 call_single_data_t __percpu *csd;
33 cpumask_var_t cpumask;
34 cpumask_var_t cpumask_ipi;
35};
36
37static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
38
39static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
40
41static void flush_smp_call_function_queue(bool warn_cpu_offline);
42
43int smpcfd_prepare_cpu(unsigned int cpu)
44{
45 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
46
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
48 cpu_to_node(cpu)))
49 return -ENOMEM;
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu))) {
52 free_cpumask_var(cfd->cpumask);
53 return -ENOMEM;
54 }
55 cfd->csd = alloc_percpu(call_single_data_t);
56 if (!cfd->csd) {
57 free_cpumask_var(cfd->cpumask);
58 free_cpumask_var(cfd->cpumask_ipi);
59 return -ENOMEM;
60 }
61
62 return 0;
63}
64
65int smpcfd_dead_cpu(unsigned int cpu)
66{
67 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
68
69 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
71 free_percpu(cfd->csd);
72 return 0;
73}
74
75int smpcfd_dying_cpu(unsigned int cpu)
76{
77 /*
78 * The IPIs for the smp-call-function callbacks queued by other
79 * CPUs might arrive late, either due to hardware latencies or
80 * because this CPU disabled interrupts (inside stop-machine)
81 * before the IPIs were sent. So flush out any pending callbacks
82 * explicitly (without waiting for the IPIs to arrive), to
83 * ensure that the outgoing CPU doesn't go offline with work
84 * still pending.
85 */
86 flush_smp_call_function_queue(false);
87 return 0;
88}
89
90void __init call_function_init(void)
91{
92 int i;
93
94 for_each_possible_cpu(i)
95 init_llist_head(&per_cpu(call_single_queue, i));
96
97 smpcfd_prepare_cpu(smp_processor_id());
98}
99
100/*
101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102 *
103 * For non-synchronous ipi calls the csd can still be in use by the
104 * previous function call. For multi-cpu calls its even more interesting
105 * as we'll have to ensure no other cpu is observing our csd.
106 */
107static __always_inline void csd_lock_wait(call_single_data_t *csd)
108{
109 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
110}
111
112static __always_inline void csd_lock(call_single_data_t *csd)
113{
114 csd_lock_wait(csd);
115 csd->flags |= CSD_FLAG_LOCK;
116
117 /*
118 * prevent CPU from reordering the above assignment
119 * to ->flags with any subsequent assignments to other
120 * fields of the specified call_single_data_t structure:
121 */
122 smp_wmb();
123}
124
125static __always_inline void csd_unlock(call_single_data_t *csd)
126{
127 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
128
129 /*
130 * ensure we're all done before releasing data:
131 */
132 smp_store_release(&csd->flags, 0);
133}
134
135static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
136
137/*
138 * Insert a previously allocated call_single_data_t element
139 * for execution on the given CPU. data must already have
140 * ->func, ->info, and ->flags set.
141 */
142static int generic_exec_single(int cpu, call_single_data_t *csd,
143 smp_call_func_t func, void *info)
144{
145 if (cpu == smp_processor_id()) {
146 unsigned long flags;
147
148 /*
149 * We can unlock early even for the synchronous on-stack case,
150 * since we're doing this from the same CPU..
151 */
152 csd_unlock(csd);
153 local_irq_save(flags);
154 func(info);
155 local_irq_restore(flags);
156 return 0;
157 }
158
159
160 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
161 csd_unlock(csd);
162 return -ENXIO;
163 }
164
165 csd->func = func;
166 csd->info = info;
167
168 /*
169 * The list addition should be visible before sending the IPI
170 * handler locks the list to pull the entry off it because of
171 * normal cache coherency rules implied by spinlocks.
172 *
173 * If IPIs can go out of order to the cache coherency protocol
174 * in an architecture, sufficient synchronisation should be added
175 * to arch code to make it appear to obey cache coherency WRT
176 * locking and barrier primitives. Generic code isn't really
177 * equipped to do the right thing...
178 */
179 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
180 arch_send_call_function_single_ipi(cpu);
181
182 return 0;
183}
184
185/**
186 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
187 *
188 * Invoked by arch to handle an IPI for call function single.
189 * Must be called with interrupts disabled.
190 */
191void generic_smp_call_function_single_interrupt(void)
192{
193 flush_smp_call_function_queue(true);
194}
195
196/**
197 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
198 *
199 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
200 * offline CPU. Skip this check if set to 'false'.
201 *
202 * Flush any pending smp-call-function callbacks queued on this CPU. This is
203 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
204 * to ensure that all pending IPI callbacks are run before it goes completely
205 * offline.
206 *
207 * Loop through the call_single_queue and run all the queued callbacks.
208 * Must be called with interrupts disabled.
209 */
210static void flush_smp_call_function_queue(bool warn_cpu_offline)
211{
212 struct llist_head *head;
213 struct llist_node *entry;
214 call_single_data_t *csd, *csd_next;
215 static bool warned;
216
217 lockdep_assert_irqs_disabled();
218
219 head = this_cpu_ptr(&call_single_queue);
220 entry = llist_del_all(head);
221 entry = llist_reverse_order(entry);
222
223 /* There shouldn't be any pending callbacks on an offline CPU. */
224 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
225 !warned && !llist_empty(head))) {
226 warned = true;
227 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
228
229 /*
230 * We don't have to use the _safe() variant here
231 * because we are not invoking the IPI handlers yet.
232 */
233 llist_for_each_entry(csd, entry, llist)
234 pr_warn("IPI callback %pS sent to offline CPU\n",
235 csd->func);
236 }
237
238 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
239 smp_call_func_t func = csd->func;
240 void *info = csd->info;
241
242 /* Do we wait until *after* callback? */
243 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
244 func(info);
245 csd_unlock(csd);
246 } else {
247 csd_unlock(csd);
248 func(info);
249 }
250 }
251
252 /*
253 * Handle irq works queued remotely by irq_work_queue_on().
254 * Smp functions above are typically synchronous so they
255 * better run first since some other CPUs may be busy waiting
256 * for them.
257 */
258 irq_work_run();
259}
260
261/*
262 * smp_call_function_single - Run a function on a specific CPU
263 * @func: The function to run. This must be fast and non-blocking.
264 * @info: An arbitrary pointer to pass to the function.
265 * @wait: If true, wait until function has completed on other CPUs.
266 *
267 * Returns 0 on success, else a negative status code.
268 */
269int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
270 int wait)
271{
272 call_single_data_t *csd;
273 call_single_data_t csd_stack = {
274 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
275 };
276 int this_cpu;
277 int err;
278
279 /*
280 * prevent preemption and reschedule on another processor,
281 * as well as CPU removal
282 */
283 this_cpu = get_cpu();
284
285 /*
286 * Can deadlock when called with interrupts disabled.
287 * We allow cpu's that are not yet online though, as no one else can
288 * send smp call function interrupt to this cpu and as such deadlocks
289 * can't happen.
290 */
291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292 && !oops_in_progress);
293
294 /*
295 * When @wait we can deadlock when we interrupt between llist_add() and
296 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
297 * csd_lock() on because the interrupt context uses the same csd
298 * storage.
299 */
300 WARN_ON_ONCE(!in_task());
301
302 csd = &csd_stack;
303 if (!wait) {
304 csd = this_cpu_ptr(&csd_data);
305 csd_lock(csd);
306 }
307
308 err = generic_exec_single(cpu, csd, func, info);
309
310 if (wait)
311 csd_lock_wait(csd);
312
313 put_cpu();
314
315 return err;
316}
317EXPORT_SYMBOL(smp_call_function_single);
318
319/**
320 * smp_call_function_single_async(): Run an asynchronous function on a
321 * specific CPU.
322 * @cpu: The CPU to run on.
323 * @csd: Pre-allocated and setup data structure
324 *
325 * Like smp_call_function_single(), but the call is asynchonous and
326 * can thus be done from contexts with disabled interrupts.
327 *
328 * The caller passes his own pre-allocated data structure
329 * (ie: embedded in an object) and is responsible for synchronizing it
330 * such that the IPIs performed on the @csd are strictly serialized.
331 *
332 * NOTE: Be careful, there is unfortunately no current debugging facility to
333 * validate the correctness of this serialization.
334 */
335int smp_call_function_single_async(int cpu, call_single_data_t *csd)
336{
337 int err = 0;
338
339 preempt_disable();
340
341 /* We could deadlock if we have to wait here with interrupts disabled! */
342 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
343 csd_lock_wait(csd);
344
345 csd->flags = CSD_FLAG_LOCK;
346 smp_wmb();
347
348 err = generic_exec_single(cpu, csd, csd->func, csd->info);
349 preempt_enable();
350
351 return err;
352}
353EXPORT_SYMBOL_GPL(smp_call_function_single_async);
354
355/*
356 * smp_call_function_any - Run a function on any of the given cpus
357 * @mask: The mask of cpus it can run on.
358 * @func: The function to run. This must be fast and non-blocking.
359 * @info: An arbitrary pointer to pass to the function.
360 * @wait: If true, wait until function has completed.
361 *
362 * Returns 0 on success, else a negative status code (if no cpus were online).
363 *
364 * Selection preference:
365 * 1) current cpu if in @mask
366 * 2) any cpu of current node if in @mask
367 * 3) any other online cpu in @mask
368 */
369int smp_call_function_any(const struct cpumask *mask,
370 smp_call_func_t func, void *info, int wait)
371{
372 unsigned int cpu;
373 const struct cpumask *nodemask;
374 int ret;
375
376 /* Try for same CPU (cheapest) */
377 cpu = get_cpu();
378 if (cpumask_test_cpu(cpu, mask))
379 goto call;
380
381 /* Try for same node. */
382 nodemask = cpumask_of_node(cpu_to_node(cpu));
383 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
384 cpu = cpumask_next_and(cpu, nodemask, mask)) {
385 if (cpu_online(cpu))
386 goto call;
387 }
388
389 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
390 cpu = cpumask_any_and(mask, cpu_online_mask);
391call:
392 ret = smp_call_function_single(cpu, func, info, wait);
393 put_cpu();
394 return ret;
395}
396EXPORT_SYMBOL_GPL(smp_call_function_any);
397
398/**
399 * smp_call_function_many(): Run a function on a set of other CPUs.
400 * @mask: The set of cpus to run on (only runs on online subset).
401 * @func: The function to run. This must be fast and non-blocking.
402 * @info: An arbitrary pointer to pass to the function.
403 * @wait: If true, wait (atomically) until function has completed
404 * on other CPUs.
405 *
406 * If @wait is true, then returns once @func has returned.
407 *
408 * You must not call this function with disabled interrupts or from a
409 * hardware interrupt handler or from a bottom half handler. Preemption
410 * must be disabled when calling this function.
411 */
412void smp_call_function_many(const struct cpumask *mask,
413 smp_call_func_t func, void *info, bool wait)
414{
415 struct call_function_data *cfd;
416 int cpu, next_cpu, this_cpu = smp_processor_id();
417
418 /*
419 * Can deadlock when called with interrupts disabled.
420 * We allow cpu's that are not yet online though, as no one else can
421 * send smp call function interrupt to this cpu and as such deadlocks
422 * can't happen.
423 */
424 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
425 && !oops_in_progress && !early_boot_irqs_disabled);
426
427 /*
428 * When @wait we can deadlock when we interrupt between llist_add() and
429 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
430 * csd_lock() on because the interrupt context uses the same csd
431 * storage.
432 */
433 WARN_ON_ONCE(!in_task());
434
435 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
436 cpu = cpumask_first_and(mask, cpu_online_mask);
437 if (cpu == this_cpu)
438 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
439
440 /* No online cpus? We're done. */
441 if (cpu >= nr_cpu_ids)
442 return;
443
444 /* Do we have another CPU which isn't us? */
445 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
446 if (next_cpu == this_cpu)
447 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
448
449 /* Fastpath: do that cpu by itself. */
450 if (next_cpu >= nr_cpu_ids) {
451 smp_call_function_single(cpu, func, info, wait);
452 return;
453 }
454
455 cfd = this_cpu_ptr(&cfd_data);
456
457 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
458 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
459
460 /* Some callers race with other cpus changing the passed mask */
461 if (unlikely(!cpumask_weight(cfd->cpumask)))
462 return;
463
464 cpumask_clear(cfd->cpumask_ipi);
465 for_each_cpu(cpu, cfd->cpumask) {
466 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
467
468 csd_lock(csd);
469 if (wait)
470 csd->flags |= CSD_FLAG_SYNCHRONOUS;
471 csd->func = func;
472 csd->info = info;
473 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
474 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
475 }
476
477 /* Send a message to all CPUs in the map */
478 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
479
480 if (wait) {
481 for_each_cpu(cpu, cfd->cpumask) {
482 call_single_data_t *csd;
483
484 csd = per_cpu_ptr(cfd->csd, cpu);
485 csd_lock_wait(csd);
486 }
487 }
488}
489EXPORT_SYMBOL(smp_call_function_many);
490
491/**
492 * smp_call_function(): Run a function on all other CPUs.
493 * @func: The function to run. This must be fast and non-blocking.
494 * @info: An arbitrary pointer to pass to the function.
495 * @wait: If true, wait (atomically) until function has completed
496 * on other CPUs.
497 *
498 * Returns 0.
499 *
500 * If @wait is true, then returns once @func has returned; otherwise
501 * it returns just before the target cpu calls @func.
502 *
503 * You must not call this function with disabled interrupts or from a
504 * hardware interrupt handler or from a bottom half handler.
505 */
506void smp_call_function(smp_call_func_t func, void *info, int wait)
507{
508 preempt_disable();
509 smp_call_function_many(cpu_online_mask, func, info, wait);
510 preempt_enable();
511}
512EXPORT_SYMBOL(smp_call_function);
513
514/* Setup configured maximum number of CPUs to activate */
515unsigned int setup_max_cpus = NR_CPUS;
516EXPORT_SYMBOL(setup_max_cpus);
517
518
519/*
520 * Setup routine for controlling SMP activation
521 *
522 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
523 * activation entirely (the MPS table probe still happens, though).
524 *
525 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
526 * greater than 0, limits the maximum number of CPUs activated in
527 * SMP mode to <NUM>.
528 */
529
530void __weak arch_disable_smp_support(void) { }
531
532static int __init nosmp(char *str)
533{
534 setup_max_cpus = 0;
535 arch_disable_smp_support();
536
537 return 0;
538}
539
540early_param("nosmp", nosmp);
541
542/* this is hard limit */
543static int __init nrcpus(char *str)
544{
545 int nr_cpus;
546
547 get_option(&str, &nr_cpus);
548 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
549 nr_cpu_ids = nr_cpus;
550
551 return 0;
552}
553
554early_param("nr_cpus", nrcpus);
555
556static int __init maxcpus(char *str)
557{
558 get_option(&str, &setup_max_cpus);
559 if (setup_max_cpus == 0)
560 arch_disable_smp_support();
561
562 return 0;
563}
564
565early_param("maxcpus", maxcpus);
566
567/* Setup number of possible processor ids */
568unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
569EXPORT_SYMBOL(nr_cpu_ids);
570
571/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
572void __init setup_nr_cpu_ids(void)
573{
574 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
575}
576
577/* Called by boot processor to activate the rest. */
578void __init smp_init(void)
579{
580 int num_nodes, num_cpus;
581 unsigned int cpu;
582
583 idle_threads_init();
584 cpuhp_threads_init();
585
586 pr_info("Bringing up secondary CPUs ...\n");
587
588 /* FIXME: This should be done in userspace --RR */
589 for_each_present_cpu(cpu) {
590 if (num_online_cpus() >= setup_max_cpus)
591 break;
592 if (!cpu_online(cpu))
593 cpu_up(cpu);
594 }
595
596 num_nodes = num_online_nodes();
597 num_cpus = num_online_cpus();
598 pr_info("Brought up %d node%s, %d CPU%s\n",
599 num_nodes, (num_nodes > 1 ? "s" : ""),
600 num_cpus, (num_cpus > 1 ? "s" : ""));
601
602 /* Any cleanup work */
603 smp_cpus_done(setup_max_cpus);
604}
605
606/*
607 * Call a function on all processors. May be used during early boot while
608 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
609 * of local_irq_disable/enable().
610 */
611void on_each_cpu(void (*func) (void *info), void *info, int wait)
612{
613 unsigned long flags;
614
615 preempt_disable();
616 smp_call_function(func, info, wait);
617 local_irq_save(flags);
618 func(info);
619 local_irq_restore(flags);
620 preempt_enable();
621}
622EXPORT_SYMBOL(on_each_cpu);
623
624/**
625 * on_each_cpu_mask(): Run a function on processors specified by
626 * cpumask, which may include the local processor.
627 * @mask: The set of cpus to run on (only runs on online subset).
628 * @func: The function to run. This must be fast and non-blocking.
629 * @info: An arbitrary pointer to pass to the function.
630 * @wait: If true, wait (atomically) until function has completed
631 * on other CPUs.
632 *
633 * If @wait is true, then returns once @func has returned.
634 *
635 * You must not call this function with disabled interrupts or from a
636 * hardware interrupt handler or from a bottom half handler. The
637 * exception is that it may be used during early boot while
638 * early_boot_irqs_disabled is set.
639 */
640void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
641 void *info, bool wait)
642{
643 int cpu = get_cpu();
644
645 smp_call_function_many(mask, func, info, wait);
646 if (cpumask_test_cpu(cpu, mask)) {
647 unsigned long flags;
648 local_irq_save(flags);
649 func(info);
650 local_irq_restore(flags);
651 }
652 put_cpu();
653}
654EXPORT_SYMBOL(on_each_cpu_mask);
655
656/*
657 * on_each_cpu_cond(): Call a function on each processor for which
658 * the supplied function cond_func returns true, optionally waiting
659 * for all the required CPUs to finish. This may include the local
660 * processor.
661 * @cond_func: A callback function that is passed a cpu id and
662 * the the info parameter. The function is called
663 * with preemption disabled. The function should
664 * return a blooean value indicating whether to IPI
665 * the specified CPU.
666 * @func: The function to run on all applicable CPUs.
667 * This must be fast and non-blocking.
668 * @info: An arbitrary pointer to pass to both functions.
669 * @wait: If true, wait (atomically) until function has
670 * completed on other CPUs.
671 * @gfp_flags: GFP flags to use when allocating the cpumask
672 * used internally by the function.
673 *
674 * The function might sleep if the GFP flags indicates a non
675 * atomic allocation is allowed.
676 *
677 * Preemption is disabled to protect against CPUs going offline but not online.
678 * CPUs going online during the call will not be seen or sent an IPI.
679 *
680 * You must not call this function with disabled interrupts or
681 * from a hardware interrupt handler or from a bottom half handler.
682 */
683void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
684 smp_call_func_t func, void *info, bool wait,
685 gfp_t gfp_flags, const struct cpumask *mask)
686{
687 cpumask_var_t cpus;
688 int cpu, ret;
689
690 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
691
692 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
693 preempt_disable();
694 for_each_cpu(cpu, mask)
695 if (cond_func(cpu, info))
696 __cpumask_set_cpu(cpu, cpus);
697 on_each_cpu_mask(cpus, func, info, wait);
698 preempt_enable();
699 free_cpumask_var(cpus);
700 } else {
701 /*
702 * No free cpumask, bother. No matter, we'll
703 * just have to IPI them one by one.
704 */
705 preempt_disable();
706 for_each_cpu(cpu, mask)
707 if (cond_func(cpu, info)) {
708 ret = smp_call_function_single(cpu, func,
709 info, wait);
710 WARN_ON_ONCE(ret);
711 }
712 preempt_enable();
713 }
714}
715EXPORT_SYMBOL(on_each_cpu_cond_mask);
716
717void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
718 smp_call_func_t func, void *info, bool wait,
719 gfp_t gfp_flags)
720{
721 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
722 cpu_online_mask);
723}
724EXPORT_SYMBOL(on_each_cpu_cond);
725
726static void do_nothing(void *unused)
727{
728}
729
730/**
731 * kick_all_cpus_sync - Force all cpus out of idle
732 *
733 * Used to synchronize the update of pm_idle function pointer. It's
734 * called after the pointer is updated and returns after the dummy
735 * callback function has been executed on all cpus. The execution of
736 * the function can only happen on the remote cpus after they have
737 * left the idle function which had been called via pm_idle function
738 * pointer. So it's guaranteed that nothing uses the previous pointer
739 * anymore.
740 */
741void kick_all_cpus_sync(void)
742{
743 /* Make sure the change is visible before we kick the cpus */
744 smp_mb();
745 smp_call_function(do_nothing, NULL, 1);
746}
747EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
748
749/**
750 * wake_up_all_idle_cpus - break all cpus out of idle
751 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
752 * including idle polling cpus, for non-idle cpus, we will do nothing
753 * for them.
754 */
755void wake_up_all_idle_cpus(void)
756{
757 int cpu;
758
759 preempt_disable();
760 for_each_online_cpu(cpu) {
761 if (cpu == smp_processor_id())
762 continue;
763
764 wake_up_if_idle(cpu);
765 }
766 preempt_enable();
767}
768EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
769
770/**
771 * smp_call_on_cpu - Call a function on a specific cpu
772 *
773 * Used to call a function on a specific cpu and wait for it to return.
774 * Optionally make sure the call is done on a specified physical cpu via vcpu
775 * pinning in order to support virtualized environments.
776 */
777struct smp_call_on_cpu_struct {
778 struct work_struct work;
779 struct completion done;
780 int (*func)(void *);
781 void *data;
782 int ret;
783 int cpu;
784};
785
786static void smp_call_on_cpu_callback(struct work_struct *work)
787{
788 struct smp_call_on_cpu_struct *sscs;
789
790 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
791 if (sscs->cpu >= 0)
792 hypervisor_pin_vcpu(sscs->cpu);
793 sscs->ret = sscs->func(sscs->data);
794 if (sscs->cpu >= 0)
795 hypervisor_pin_vcpu(-1);
796
797 complete(&sscs->done);
798}
799
800int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
801{
802 struct smp_call_on_cpu_struct sscs = {
803 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
804 .func = func,
805 .data = par,
806 .cpu = phys ? cpu : -1,
807 };
808
809 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
810
811 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
812 return -ENXIO;
813
814 queue_work_on(cpu, system_wq, &sscs.work);
815 wait_for_completion(&sscs.done);
816
817 return sscs.ret;
818}
819EXPORT_SYMBOL_GPL(smp_call_on_cpu);