Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Generic helpers for smp ipi calls
  3 *
  4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/irq_work.h>
 10#include <linux/rcupdate.h>
 11#include <linux/rculist.h>
 12#include <linux/kernel.h>
 13#include <linux/export.h>
 14#include <linux/percpu.h>
 15#include <linux/init.h>
 
 16#include <linux/gfp.h>
 17#include <linux/smp.h>
 18#include <linux/cpu.h>
 19#include <linux/sched.h>
 20#include <linux/sched/idle.h>
 21#include <linux/hypervisor.h>
 
 
 
 
 
 
 
 
 
 
 22
 23#include "smpboot.h"
 
 24
 25enum {
 26	CSD_FLAG_LOCK		= 0x01,
 27	CSD_FLAG_SYNCHRONOUS	= 0x02,
 28};
 29
 30struct call_function_data {
 31	call_single_data_t	__percpu *csd;
 32	cpumask_var_t		cpumask;
 33	cpumask_var_t		cpumask_ipi;
 34};
 35
 36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 37
 38static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 39
 40static void flush_smp_call_function_queue(bool warn_cpu_offline);
 
 
 41
 42int smpcfd_prepare_cpu(unsigned int cpu)
 43{
 44	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 45
 46	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 47				     cpu_to_node(cpu)))
 48		return -ENOMEM;
 49	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
 50				     cpu_to_node(cpu))) {
 51		free_cpumask_var(cfd->cpumask);
 52		return -ENOMEM;
 53	}
 54	cfd->csd = alloc_percpu(call_single_data_t);
 55	if (!cfd->csd) {
 56		free_cpumask_var(cfd->cpumask);
 57		free_cpumask_var(cfd->cpumask_ipi);
 58		return -ENOMEM;
 59	}
 60
 61	return 0;
 62}
 63
 64int smpcfd_dead_cpu(unsigned int cpu)
 65{
 66	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 67
 68	free_cpumask_var(cfd->cpumask);
 69	free_cpumask_var(cfd->cpumask_ipi);
 70	free_percpu(cfd->csd);
 71	return 0;
 72}
 73
 74int smpcfd_dying_cpu(unsigned int cpu)
 75{
 76	/*
 77	 * The IPIs for the smp-call-function callbacks queued by other
 78	 * CPUs might arrive late, either due to hardware latencies or
 79	 * because this CPU disabled interrupts (inside stop-machine)
 80	 * before the IPIs were sent. So flush out any pending callbacks
 81	 * explicitly (without waiting for the IPIs to arrive), to
 82	 * ensure that the outgoing CPU doesn't go offline with work
 83	 * still pending.
 84	 */
 85	flush_smp_call_function_queue(false);
 
 86	return 0;
 87}
 88
 89void __init call_function_init(void)
 90{
 91	int i;
 92
 93	for_each_possible_cpu(i)
 94		init_llist_head(&per_cpu(call_single_queue, i));
 95
 96	smpcfd_prepare_cpu(smp_processor_id());
 97}
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99/*
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
101 *
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
105 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106static __always_inline void csd_lock_wait(call_single_data_t *csd)
107{
108	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
109}
 
110
111static __always_inline void csd_lock(call_single_data_t *csd)
112{
113	csd_lock_wait(csd);
114	csd->flags |= CSD_FLAG_LOCK;
115
116	/*
117	 * prevent CPU from reordering the above assignment
118	 * to ->flags with any subsequent assignments to other
119	 * fields of the specified call_single_data_t structure:
120	 */
121	smp_wmb();
122}
123
124static __always_inline void csd_unlock(call_single_data_t *csd)
125{
126	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
127
128	/*
129	 * ensure we're all done before releasing data:
130	 */
131	smp_store_release(&csd->flags, 0);
132}
133
134static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136/*
137 * Insert a previously allocated call_single_data_t element
138 * for execution on the given CPU. data must already have
139 * ->func, ->info, and ->flags set.
140 */
141static int generic_exec_single(int cpu, call_single_data_t *csd,
142			       smp_call_func_t func, void *info)
143{
144	if (cpu == smp_processor_id()) {
 
 
145		unsigned long flags;
146
147		/*
148		 * We can unlock early even for the synchronous on-stack case,
149		 * since we're doing this from the same CPU..
150		 */
 
151		csd_unlock(csd);
152		local_irq_save(flags);
153		func(info);
 
154		local_irq_restore(flags);
155		return 0;
156	}
157
158
159	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
160		csd_unlock(csd);
161		return -ENXIO;
162	}
163
164	csd->func = func;
165	csd->info = info;
166
167	/*
168	 * The list addition should be visible before sending the IPI
169	 * handler locks the list to pull the entry off it because of
170	 * normal cache coherency rules implied by spinlocks.
171	 *
172	 * If IPIs can go out of order to the cache coherency protocol
173	 * in an architecture, sufficient synchronisation should be added
174	 * to arch code to make it appear to obey cache coherency WRT
175	 * locking and barrier primitives. Generic code isn't really
176	 * equipped to do the right thing...
177	 */
178	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
179		arch_send_call_function_single_ipi(cpu);
180
181	return 0;
182}
183
184/**
185 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
186 *
187 * Invoked by arch to handle an IPI for call function single.
188 * Must be called with interrupts disabled.
189 */
190void generic_smp_call_function_single_interrupt(void)
191{
192	flush_smp_call_function_queue(true);
193}
194
195/**
196 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
197 *
198 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
199 *		      offline CPU. Skip this check if set to 'false'.
200 *
201 * Flush any pending smp-call-function callbacks queued on this CPU. This is
202 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
203 * to ensure that all pending IPI callbacks are run before it goes completely
204 * offline.
205 *
206 * Loop through the call_single_queue and run all the queued callbacks.
207 * Must be called with interrupts disabled.
208 */
209static void flush_smp_call_function_queue(bool warn_cpu_offline)
210{
211	struct llist_head *head;
212	struct llist_node *entry;
213	call_single_data_t *csd, *csd_next;
 
 
214	static bool warned;
 
215
216	lockdep_assert_irqs_disabled();
217
 
 
 
 
218	head = this_cpu_ptr(&call_single_queue);
219	entry = llist_del_all(head);
220	entry = llist_reverse_order(entry);
221
222	/* There shouldn't be any pending callbacks on an offline CPU. */
223	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
224		     !warned && !llist_empty(head))) {
225		warned = true;
226		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
227
228		/*
229		 * We don't have to use the _safe() variant here
230		 * because we are not invoking the IPI handlers yet.
231		 */
232		llist_for_each_entry(csd, entry, llist)
233			pr_warn("IPI callback %pS sent to offline CPU\n",
234				csd->func);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235	}
236
237	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
238		smp_call_func_t func = csd->func;
239		void *info = csd->info;
240
 
241		/* Do we wait until *after* callback? */
242		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
243			func(info);
 
 
 
 
 
 
 
 
 
 
244			csd_unlock(csd);
 
245		} else {
246			csd_unlock(csd);
247			func(info);
248		}
249	}
250
 
 
 
251	/*
252	 * Handle irq works queued remotely by irq_work_queue_on().
253	 * Smp functions above are typically synchronous so they
254	 * better run first since some other CPUs may be busy waiting
255	 * for them.
256	 */
257	irq_work_run();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258}
259
260/*
261 * smp_call_function_single - Run a function on a specific CPU
262 * @func: The function to run. This must be fast and non-blocking.
263 * @info: An arbitrary pointer to pass to the function.
264 * @wait: If true, wait until function has completed on other CPUs.
265 *
266 * Returns 0 on success, else a negative status code.
267 */
268int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
269			     int wait)
270{
271	call_single_data_t *csd;
272	call_single_data_t csd_stack = {
273		.flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
274	};
275	int this_cpu;
276	int err;
277
278	/*
279	 * prevent preemption and reschedule on another processor,
280	 * as well as CPU removal
281	 */
282	this_cpu = get_cpu();
283
284	/*
285	 * Can deadlock when called with interrupts disabled.
286	 * We allow cpu's that are not yet online though, as no one else can
287	 * send smp call function interrupt to this cpu and as such deadlocks
288	 * can't happen.
289	 */
290	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
291		     && !oops_in_progress);
292
 
 
 
 
 
 
 
 
293	csd = &csd_stack;
294	if (!wait) {
295		csd = this_cpu_ptr(&csd_data);
296		csd_lock(csd);
297	}
298
299	err = generic_exec_single(cpu, csd, func, info);
 
 
 
 
 
 
 
300
301	if (wait)
302		csd_lock_wait(csd);
303
304	put_cpu();
305
306	return err;
307}
308EXPORT_SYMBOL(smp_call_function_single);
309
310/**
311 * smp_call_function_single_async(): Run an asynchronous function on a
312 * 			         specific CPU.
313 * @cpu: The CPU to run on.
314 * @csd: Pre-allocated and setup data structure
315 *
316 * Like smp_call_function_single(), but the call is asynchonous and
317 * can thus be done from contexts with disabled interrupts.
318 *
319 * The caller passes his own pre-allocated data structure
320 * (ie: embedded in an object) and is responsible for synchronizing it
321 * such that the IPIs performed on the @csd are strictly serialized.
322 *
 
 
 
 
 
323 * NOTE: Be careful, there is unfortunately no current debugging facility to
324 * validate the correctness of this serialization.
 
 
325 */
326int smp_call_function_single_async(int cpu, call_single_data_t *csd)
327{
328	int err = 0;
329
330	preempt_disable();
331
332	/* We could deadlock if we have to wait here with interrupts disabled! */
333	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
334		csd_lock_wait(csd);
 
335
336	csd->flags = CSD_FLAG_LOCK;
337	smp_wmb();
338
339	err = generic_exec_single(cpu, csd, csd->func, csd->info);
 
 
340	preempt_enable();
341
342	return err;
343}
344EXPORT_SYMBOL_GPL(smp_call_function_single_async);
345
346/*
347 * smp_call_function_any - Run a function on any of the given cpus
348 * @mask: The mask of cpus it can run on.
349 * @func: The function to run. This must be fast and non-blocking.
350 * @info: An arbitrary pointer to pass to the function.
351 * @wait: If true, wait until function has completed.
352 *
353 * Returns 0 on success, else a negative status code (if no cpus were online).
354 *
355 * Selection preference:
356 *	1) current cpu if in @mask
357 *	2) any cpu of current node if in @mask
358 *	3) any other online cpu in @mask
359 */
360int smp_call_function_any(const struct cpumask *mask,
361			  smp_call_func_t func, void *info, int wait)
362{
363	unsigned int cpu;
364	const struct cpumask *nodemask;
365	int ret;
366
367	/* Try for same CPU (cheapest) */
368	cpu = get_cpu();
369	if (cpumask_test_cpu(cpu, mask))
370		goto call;
371
372	/* Try for same node. */
373	nodemask = cpumask_of_node(cpu_to_node(cpu));
374	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
375	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
376		if (cpu_online(cpu))
377			goto call;
378	}
379
380	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
381	cpu = cpumask_any_and(mask, cpu_online_mask);
382call:
383	ret = smp_call_function_single(cpu, func, info, wait);
384	put_cpu();
385	return ret;
386}
387EXPORT_SYMBOL_GPL(smp_call_function_any);
388
389/**
390 * smp_call_function_many(): Run a function on a set of other CPUs.
391 * @mask: The set of cpus to run on (only runs on online subset).
392 * @func: The function to run. This must be fast and non-blocking.
393 * @info: An arbitrary pointer to pass to the function.
394 * @wait: If true, wait (atomically) until function has completed
395 *        on other CPUs.
396 *
397 * If @wait is true, then returns once @func has returned.
398 *
399 * You must not call this function with disabled interrupts or from a
400 * hardware interrupt handler or from a bottom half handler. Preemption
401 * must be disabled when calling this function.
402 */
403void smp_call_function_many(const struct cpumask *mask,
404			    smp_call_func_t func, void *info, bool wait)
 
 
 
 
 
405{
 
406	struct call_function_data *cfd;
407	int cpu, next_cpu, this_cpu = smp_processor_id();
 
 
 
 
 
408
409	/*
410	 * Can deadlock when called with interrupts disabled.
411	 * We allow cpu's that are not yet online though, as no one else can
412	 * send smp call function interrupt to this cpu and as such deadlocks
413	 * can't happen.
414	 */
415	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
416		     && !oops_in_progress && !early_boot_irqs_disabled);
 
 
 
 
 
 
 
 
 
417
418	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
 
 
 
 
419	cpu = cpumask_first_and(mask, cpu_online_mask);
420	if (cpu == this_cpu)
421		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 
 
422
423	/* No online cpus?  We're done. */
424	if (cpu >= nr_cpu_ids)
425		return;
 
426
427	/* Do we have another CPU which isn't us? */
428	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
429	if (next_cpu == this_cpu)
430		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
431
432	/* Fastpath: do that cpu by itself. */
433	if (next_cpu >= nr_cpu_ids) {
434		smp_call_function_single(cpu, func, info, wait);
435		return;
436	}
437
438	cfd = this_cpu_ptr(&cfd_data);
 
 
 
439
440	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
441	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
443	/* Some callers race with other cpus changing the passed mask */
444	if (unlikely(!cpumask_weight(cfd->cpumask)))
445		return;
 
 
 
 
 
 
 
446
447	cpumask_clear(cfd->cpumask_ipi);
448	for_each_cpu(cpu, cfd->cpumask) {
449		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
450
451		csd_lock(csd);
452		if (wait)
453			csd->flags |= CSD_FLAG_SYNCHRONOUS;
454		csd->func = func;
455		csd->info = info;
456		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
457			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
458	}
459
460	/* Send a message to all CPUs in the map */
461	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
462
463	if (wait) {
464		for_each_cpu(cpu, cfd->cpumask) {
465			call_single_data_t *csd;
466
467			csd = per_cpu_ptr(cfd->csd, cpu);
468			csd_lock_wait(csd);
469		}
470	}
471}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472EXPORT_SYMBOL(smp_call_function_many);
473
474/**
475 * smp_call_function(): Run a function on all other CPUs.
476 * @func: The function to run. This must be fast and non-blocking.
477 * @info: An arbitrary pointer to pass to the function.
478 * @wait: If true, wait (atomically) until function has completed
479 *        on other CPUs.
480 *
481 * Returns 0.
482 *
483 * If @wait is true, then returns once @func has returned; otherwise
484 * it returns just before the target cpu calls @func.
485 *
486 * You must not call this function with disabled interrupts or from a
487 * hardware interrupt handler or from a bottom half handler.
488 */
489int smp_call_function(smp_call_func_t func, void *info, int wait)
490{
491	preempt_disable();
492	smp_call_function_many(cpu_online_mask, func, info, wait);
493	preempt_enable();
494
495	return 0;
496}
497EXPORT_SYMBOL(smp_call_function);
498
499/* Setup configured maximum number of CPUs to activate */
500unsigned int setup_max_cpus = NR_CPUS;
501EXPORT_SYMBOL(setup_max_cpus);
502
503
504/*
505 * Setup routine for controlling SMP activation
506 *
507 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
508 * activation entirely (the MPS table probe still happens, though).
509 *
510 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
511 * greater than 0, limits the maximum number of CPUs activated in
512 * SMP mode to <NUM>.
513 */
514
515void __weak arch_disable_smp_support(void) { }
516
517static int __init nosmp(char *str)
518{
519	setup_max_cpus = 0;
520	arch_disable_smp_support();
521
522	return 0;
523}
524
525early_param("nosmp", nosmp);
526
527/* this is hard limit */
528static int __init nrcpus(char *str)
529{
530	int nr_cpus;
531
532	get_option(&str, &nr_cpus);
533	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
534		nr_cpu_ids = nr_cpus;
535
536	return 0;
537}
538
539early_param("nr_cpus", nrcpus);
540
541static int __init maxcpus(char *str)
542{
543	get_option(&str, &setup_max_cpus);
544	if (setup_max_cpus == 0)
545		arch_disable_smp_support();
546
547	return 0;
548}
549
550early_param("maxcpus", maxcpus);
551
 
552/* Setup number of possible processor ids */
553unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
554EXPORT_SYMBOL(nr_cpu_ids);
 
555
556/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
557void __init setup_nr_cpu_ids(void)
558{
559	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
560}
561
562/* Called by boot processor to activate the rest. */
563void __init smp_init(void)
564{
565	int num_nodes, num_cpus;
566	unsigned int cpu;
567
568	idle_threads_init();
569	cpuhp_threads_init();
570
571	pr_info("Bringing up secondary CPUs ...\n");
572
573	/* FIXME: This should be done in userspace --RR */
574	for_each_present_cpu(cpu) {
575		if (num_online_cpus() >= setup_max_cpus)
576			break;
577		if (!cpu_online(cpu))
578			cpu_up(cpu);
579	}
580
581	num_nodes = num_online_nodes();
582	num_cpus  = num_online_cpus();
583	pr_info("Brought up %d node%s, %d CPU%s\n",
584		num_nodes, (num_nodes > 1 ? "s" : ""),
585		num_cpus,  (num_cpus  > 1 ? "s" : ""));
586
587	/* Any cleanup work */
588	smp_cpus_done(setup_max_cpus);
589}
590
591/*
592 * Call a function on all processors.  May be used during early boot while
593 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
594 * of local_irq_disable/enable().
595 */
596int on_each_cpu(void (*func) (void *info), void *info, int wait)
597{
598	unsigned long flags;
599	int ret = 0;
600
601	preempt_disable();
602	ret = smp_call_function(func, info, wait);
603	local_irq_save(flags);
604	func(info);
605	local_irq_restore(flags);
606	preempt_enable();
607	return ret;
608}
609EXPORT_SYMBOL(on_each_cpu);
610
611/**
612 * on_each_cpu_mask(): Run a function on processors specified by
613 * cpumask, which may include the local processor.
614 * @mask: The set of cpus to run on (only runs on online subset).
615 * @func: The function to run. This must be fast and non-blocking.
616 * @info: An arbitrary pointer to pass to the function.
617 * @wait: If true, wait (atomically) until function has completed
618 *        on other CPUs.
619 *
620 * If @wait is true, then returns once @func has returned.
621 *
622 * You must not call this function with disabled interrupts or from a
623 * hardware interrupt handler or from a bottom half handler.  The
624 * exception is that it may be used during early boot while
625 * early_boot_irqs_disabled is set.
626 */
627void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
628			void *info, bool wait)
629{
630	int cpu = get_cpu();
631
632	smp_call_function_many(mask, func, info, wait);
633	if (cpumask_test_cpu(cpu, mask)) {
634		unsigned long flags;
635		local_irq_save(flags);
636		func(info);
637		local_irq_restore(flags);
638	}
639	put_cpu();
640}
641EXPORT_SYMBOL(on_each_cpu_mask);
642
643/*
644 * on_each_cpu_cond(): Call a function on each processor for which
645 * the supplied function cond_func returns true, optionally waiting
646 * for all the required CPUs to finish. This may include the local
647 * processor.
648 * @cond_func:	A callback function that is passed a cpu id and
649 *		the the info parameter. The function is called
650 *		with preemption disabled. The function should
651 *		return a blooean value indicating whether to IPI
652 *		the specified CPU.
653 * @func:	The function to run on all applicable CPUs.
654 *		This must be fast and non-blocking.
655 * @info:	An arbitrary pointer to pass to both functions.
656 * @wait:	If true, wait (atomically) until function has
657 *		completed on other CPUs.
658 * @gfp_flags:	GFP flags to use when allocating the cpumask
659 *		used internally by the function.
660 *
661 * The function might sleep if the GFP flags indicates a non
662 * atomic allocation is allowed.
663 *
664 * Preemption is disabled to protect against CPUs going offline but not online.
665 * CPUs going online during the call will not be seen or sent an IPI.
666 *
667 * You must not call this function with disabled interrupts or
668 * from a hardware interrupt handler or from a bottom half handler.
669 */
670void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
671			smp_call_func_t func, void *info, bool wait,
672			gfp_t gfp_flags)
673{
674	cpumask_var_t cpus;
675	int cpu, ret;
676
677	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
 
678
679	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
680		preempt_disable();
681		for_each_online_cpu(cpu)
682			if (cond_func(cpu, info))
683				cpumask_set_cpu(cpu, cpus);
684		on_each_cpu_mask(cpus, func, info, wait);
685		preempt_enable();
686		free_cpumask_var(cpus);
687	} else {
688		/*
689		 * No free cpumask, bother. No matter, we'll
690		 * just have to IPI them one by one.
691		 */
692		preempt_disable();
693		for_each_online_cpu(cpu)
694			if (cond_func(cpu, info)) {
695				ret = smp_call_function_single(cpu, func,
696								info, wait);
697				WARN_ON_ONCE(ret);
698			}
699		preempt_enable();
700	}
701}
702EXPORT_SYMBOL(on_each_cpu_cond);
703
704static void do_nothing(void *unused)
705{
706}
707
708/**
709 * kick_all_cpus_sync - Force all cpus out of idle
710 *
711 * Used to synchronize the update of pm_idle function pointer. It's
712 * called after the pointer is updated and returns after the dummy
713 * callback function has been executed on all cpus. The execution of
714 * the function can only happen on the remote cpus after they have
715 * left the idle function which had been called via pm_idle function
716 * pointer. So it's guaranteed that nothing uses the previous pointer
717 * anymore.
718 */
719void kick_all_cpus_sync(void)
720{
721	/* Make sure the change is visible before we kick the cpus */
722	smp_mb();
723	smp_call_function(do_nothing, NULL, 1);
724}
725EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
726
727/**
728 * wake_up_all_idle_cpus - break all cpus out of idle
729 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
730 * including idle polling cpus, for non-idle cpus, we will do nothing
731 * for them.
732 */
733void wake_up_all_idle_cpus(void)
734{
735	int cpu;
736
737	preempt_disable();
738	for_each_online_cpu(cpu) {
739		if (cpu == smp_processor_id())
740			continue;
741
742		wake_up_if_idle(cpu);
743	}
744	preempt_enable();
745}
746EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
747
748/**
749 * smp_call_on_cpu - Call a function on a specific cpu
 
 
 
 
 
 
750 *
751 * Used to call a function on a specific cpu and wait for it to return.
752 * Optionally make sure the call is done on a specified physical cpu via vcpu
753 * pinning in order to support virtualized environments.
754 */
755struct smp_call_on_cpu_struct {
756	struct work_struct	work;
757	struct completion	done;
758	int			(*func)(void *);
759	void			*data;
760	int			ret;
761	int			cpu;
762};
763
764static void smp_call_on_cpu_callback(struct work_struct *work)
765{
766	struct smp_call_on_cpu_struct *sscs;
767
768	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
769	if (sscs->cpu >= 0)
770		hypervisor_pin_vcpu(sscs->cpu);
771	sscs->ret = sscs->func(sscs->data);
772	if (sscs->cpu >= 0)
773		hypervisor_pin_vcpu(-1);
774
775	complete(&sscs->done);
776}
777
778int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
779{
780	struct smp_call_on_cpu_struct sscs = {
781		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
782		.func = func,
783		.data = par,
784		.cpu  = phys ? cpu : -1,
785	};
786
787	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
788
789	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
790		return -ENXIO;
791
792	queue_work_on(cpu, system_wq, &sscs.work);
793	wait_for_completion(&sscs.done);
 
794
795	return sscs.ret;
796}
797EXPORT_SYMBOL_GPL(smp_call_on_cpu);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic helpers for smp ipi calls
   4 *
   5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/irq_work.h>
  11#include <linux/rcupdate.h>
  12#include <linux/rculist.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/percpu.h>
  16#include <linux/init.h>
  17#include <linux/interrupt.h>
  18#include <linux/gfp.h>
  19#include <linux/smp.h>
  20#include <linux/cpu.h>
  21#include <linux/sched.h>
  22#include <linux/sched/idle.h>
  23#include <linux/hypervisor.h>
  24#include <linux/sched/clock.h>
  25#include <linux/nmi.h>
  26#include <linux/sched/debug.h>
  27#include <linux/jump_label.h>
  28#include <linux/string_choices.h>
  29
  30#include <trace/events/ipi.h>
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/csd.h>
  33#undef CREATE_TRACE_POINTS
  34
  35#include "smpboot.h"
  36#include "sched/smp.h"
  37
  38#define CSD_TYPE(_csd)	((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
 
 
 
  39
  40struct call_function_data {
  41	call_single_data_t	__percpu *csd;
  42	cpumask_var_t		cpumask;
  43	cpumask_var_t		cpumask_ipi;
  44};
  45
  46static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
  47
  48static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
  49
  50static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
  51
  52static void __flush_smp_call_function_queue(bool warn_cpu_offline);
  53
  54int smpcfd_prepare_cpu(unsigned int cpu)
  55{
  56	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  57
  58	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
  59				     cpu_to_node(cpu)))
  60		return -ENOMEM;
  61	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
  62				     cpu_to_node(cpu))) {
  63		free_cpumask_var(cfd->cpumask);
  64		return -ENOMEM;
  65	}
  66	cfd->csd = alloc_percpu(call_single_data_t);
  67	if (!cfd->csd) {
  68		free_cpumask_var(cfd->cpumask);
  69		free_cpumask_var(cfd->cpumask_ipi);
  70		return -ENOMEM;
  71	}
  72
  73	return 0;
  74}
  75
  76int smpcfd_dead_cpu(unsigned int cpu)
  77{
  78	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  79
  80	free_cpumask_var(cfd->cpumask);
  81	free_cpumask_var(cfd->cpumask_ipi);
  82	free_percpu(cfd->csd);
  83	return 0;
  84}
  85
  86int smpcfd_dying_cpu(unsigned int cpu)
  87{
  88	/*
  89	 * The IPIs for the smp-call-function callbacks queued by other
  90	 * CPUs might arrive late, either due to hardware latencies or
  91	 * because this CPU disabled interrupts (inside stop-machine)
  92	 * before the IPIs were sent. So flush out any pending callbacks
  93	 * explicitly (without waiting for the IPIs to arrive), to
  94	 * ensure that the outgoing CPU doesn't go offline with work
  95	 * still pending.
  96	 */
  97	__flush_smp_call_function_queue(false);
  98	irq_work_run();
  99	return 0;
 100}
 101
 102void __init call_function_init(void)
 103{
 104	int i;
 105
 106	for_each_possible_cpu(i)
 107		init_llist_head(&per_cpu(call_single_queue, i));
 108
 109	smpcfd_prepare_cpu(smp_processor_id());
 110}
 111
 112static __always_inline void
 113send_call_function_single_ipi(int cpu)
 114{
 115	if (call_function_single_prep_ipi(cpu)) {
 116		trace_ipi_send_cpu(cpu, _RET_IP_,
 117				   generic_smp_call_function_single_interrupt);
 118		arch_send_call_function_single_ipi(cpu);
 119	}
 120}
 121
 122static __always_inline void
 123send_call_function_ipi_mask(struct cpumask *mask)
 124{
 125	trace_ipi_send_cpumask(mask, _RET_IP_,
 126			       generic_smp_call_function_single_interrupt);
 127	arch_send_call_function_ipi_mask(mask);
 128}
 129
 130static __always_inline void
 131csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
 132{
 133	trace_csd_function_entry(func, csd);
 134	func(info);
 135	trace_csd_function_exit(func, csd);
 136}
 137
 138#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 139
 140static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
 141
 142/*
 143 * Parse the csdlock_debug= kernel boot parameter.
 144 *
 145 * If you need to restore the old "ext" value that once provided
 146 * additional debugging information, reapply the following commits:
 147 *
 148 * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
 149 * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
 150 */
 151static int __init csdlock_debug(char *str)
 152{
 153	int ret;
 154	unsigned int val = 0;
 155
 156	ret = get_option(&str, &val);
 157	if (ret) {
 158		if (val)
 159			static_branch_enable(&csdlock_debug_enabled);
 160		else
 161			static_branch_disable(&csdlock_debug_enabled);
 162	}
 163
 164	return 1;
 165}
 166__setup("csdlock_debug=", csdlock_debug);
 167
 168static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
 169static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
 170static DEFINE_PER_CPU(void *, cur_csd_info);
 171
 172static ulong csd_lock_timeout = 5000;  /* CSD lock timeout in milliseconds. */
 173module_param(csd_lock_timeout, ulong, 0444);
 174static int panic_on_ipistall;  /* CSD panic timeout in milliseconds, 300000 for five minutes. */
 175module_param(panic_on_ipistall, int, 0444);
 176
 177static atomic_t csd_bug_count = ATOMIC_INIT(0);
 178
 179/* Record current CSD work for current CPU, NULL to erase. */
 180static void __csd_lock_record(call_single_data_t *csd)
 181{
 182	if (!csd) {
 183		smp_mb(); /* NULL cur_csd after unlock. */
 184		__this_cpu_write(cur_csd, NULL);
 185		return;
 186	}
 187	__this_cpu_write(cur_csd_func, csd->func);
 188	__this_cpu_write(cur_csd_info, csd->info);
 189	smp_wmb(); /* func and info before csd. */
 190	__this_cpu_write(cur_csd, csd);
 191	smp_mb(); /* Update cur_csd before function call. */
 192		  /* Or before unlock, as the case may be. */
 193}
 194
 195static __always_inline void csd_lock_record(call_single_data_t *csd)
 196{
 197	if (static_branch_unlikely(&csdlock_debug_enabled))
 198		__csd_lock_record(csd);
 199}
 200
 201static int csd_lock_wait_getcpu(call_single_data_t *csd)
 202{
 203	unsigned int csd_type;
 204
 205	csd_type = CSD_TYPE(csd);
 206	if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
 207		return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
 208	return -1;
 209}
 210
 211static atomic_t n_csd_lock_stuck;
 212
 213/**
 214 * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
 215 *
 216 * Returns @true if a CSD-lock acquisition is stuck and has been stuck
 217 * long enough for a "non-responsive CSD lock" message to be printed.
 218 */
 219bool csd_lock_is_stuck(void)
 220{
 221	return !!atomic_read(&n_csd_lock_stuck);
 222}
 223
 224/*
 225 * Complain if too much time spent waiting.  Note that only
 226 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
 227 * so waiting on other types gets much less information.
 228 */
 229static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages)
 230{
 231	int cpu = -1;
 232	int cpux;
 233	bool firsttime;
 234	u64 ts2, ts_delta;
 235	call_single_data_t *cpu_cur_csd;
 236	unsigned int flags = READ_ONCE(csd->node.u_flags);
 237	unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
 238
 239	if (!(flags & CSD_FLAG_LOCK)) {
 240		if (!unlikely(*bug_id))
 241			return true;
 242		cpu = csd_lock_wait_getcpu(csd);
 243		pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
 244			 *bug_id, raw_smp_processor_id(), cpu);
 245		atomic_dec(&n_csd_lock_stuck);
 246		return true;
 247	}
 248
 249	ts2 = ktime_get_mono_fast_ns();
 250	/* How long since we last checked for a stuck CSD lock.*/
 251	ts_delta = ts2 - *ts1;
 252	if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
 253			       (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) ||
 254		   csd_lock_timeout_ns == 0))
 255		return false;
 256
 257	if (ts0 > ts2) {
 258		/* Our own sched_clock went backward; don't blame another CPU. */
 259		ts_delta = ts0 - ts2;
 260		pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta);
 261		*ts1 = ts2;
 262		return false;
 263	}
 264
 265	firsttime = !*bug_id;
 266	if (firsttime)
 267		*bug_id = atomic_inc_return(&csd_bug_count);
 268	cpu = csd_lock_wait_getcpu(csd);
 269	if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
 270		cpux = 0;
 271	else
 272		cpux = cpu;
 273	cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
 274	/* How long since this CSD lock was stuck. */
 275	ts_delta = ts2 - ts0;
 276	pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n",
 277		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta,
 278		 cpu, csd->func, csd->info);
 279	(*nmessages)++;
 280	if (firsttime)
 281		atomic_inc(&n_csd_lock_stuck);
 282	/*
 283	 * If the CSD lock is still stuck after 5 minutes, it is unlikely
 284	 * to become unstuck. Use a signed comparison to avoid triggering
 285	 * on underflows when the TSC is out of sync between sockets.
 286	 */
 287	BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
 288	if (cpu_cur_csd && csd != cpu_cur_csd) {
 289		pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
 290			 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
 291			 READ_ONCE(per_cpu(cur_csd_info, cpux)));
 292	} else {
 293		pr_alert("\tcsd: CSD lock (#%d) %s.\n",
 294			 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
 295	}
 296	if (cpu >= 0) {
 297		if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
 298			dump_cpu_task(cpu);
 299		if (!cpu_cur_csd) {
 300			pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
 301			arch_send_call_function_single_ipi(cpu);
 302		}
 303	}
 304	if (firsttime)
 305		dump_stack();
 306	*ts1 = ts2;
 307
 308	return false;
 309}
 310
 311/*
 312 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 313 *
 314 * For non-synchronous ipi calls the csd can still be in use by the
 315 * previous function call. For multi-cpu calls its even more interesting
 316 * as we'll have to ensure no other cpu is observing our csd.
 317 */
 318static void __csd_lock_wait(call_single_data_t *csd)
 319{
 320	unsigned long nmessages = 0;
 321	int bug_id = 0;
 322	u64 ts0, ts1;
 323
 324	ts1 = ts0 = ktime_get_mono_fast_ns();
 325	for (;;) {
 326		if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
 327			break;
 328		cpu_relax();
 329	}
 330	smp_acquire__after_ctrl_dep();
 331}
 332
 333static __always_inline void csd_lock_wait(call_single_data_t *csd)
 334{
 335	if (static_branch_unlikely(&csdlock_debug_enabled)) {
 336		__csd_lock_wait(csd);
 337		return;
 338	}
 339
 340	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 341}
 342#else
 343static void csd_lock_record(call_single_data_t *csd)
 344{
 345}
 346
 347static __always_inline void csd_lock_wait(call_single_data_t *csd)
 348{
 349	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 350}
 351#endif
 352
 353static __always_inline void csd_lock(call_single_data_t *csd)
 354{
 355	csd_lock_wait(csd);
 356	csd->node.u_flags |= CSD_FLAG_LOCK;
 357
 358	/*
 359	 * prevent CPU from reordering the above assignment
 360	 * to ->flags with any subsequent assignments to other
 361	 * fields of the specified call_single_data_t structure:
 362	 */
 363	smp_wmb();
 364}
 365
 366static __always_inline void csd_unlock(call_single_data_t *csd)
 367{
 368	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
 369
 370	/*
 371	 * ensure we're all done before releasing data:
 372	 */
 373	smp_store_release(&csd->node.u_flags, 0);
 374}
 375
 376static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
 377
 378void __smp_call_single_queue(int cpu, struct llist_node *node)
 379{
 380	/*
 381	 * We have to check the type of the CSD before queueing it, because
 382	 * once queued it can have its flags cleared by
 383	 *   flush_smp_call_function_queue()
 384	 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
 385	 * executes migration_cpu_stop() on the remote CPU).
 386	 */
 387	if (trace_csd_queue_cpu_enabled()) {
 388		call_single_data_t *csd;
 389		smp_call_func_t func;
 390
 391		csd = container_of(node, call_single_data_t, node.llist);
 392		func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
 393			sched_ttwu_pending : csd->func;
 394
 395		trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
 396	}
 397
 398	/*
 399	 * The list addition should be visible to the target CPU when it pops
 400	 * the head of the list to pull the entry off it in the IPI handler
 401	 * because of normal cache coherency rules implied by the underlying
 402	 * llist ops.
 403	 *
 404	 * If IPIs can go out of order to the cache coherency protocol
 405	 * in an architecture, sufficient synchronisation should be added
 406	 * to arch code to make it appear to obey cache coherency WRT
 407	 * locking and barrier primitives. Generic code isn't really
 408	 * equipped to do the right thing...
 409	 */
 410	if (llist_add(node, &per_cpu(call_single_queue, cpu)))
 411		send_call_function_single_ipi(cpu);
 412}
 413
 414/*
 415 * Insert a previously allocated call_single_data_t element
 416 * for execution on the given CPU. data must already have
 417 * ->func, ->info, and ->flags set.
 418 */
 419static int generic_exec_single(int cpu, call_single_data_t *csd)
 
 420{
 421	if (cpu == smp_processor_id()) {
 422		smp_call_func_t func = csd->func;
 423		void *info = csd->info;
 424		unsigned long flags;
 425
 426		/*
 427		 * We can unlock early even for the synchronous on-stack case,
 428		 * since we're doing this from the same CPU..
 429		 */
 430		csd_lock_record(csd);
 431		csd_unlock(csd);
 432		local_irq_save(flags);
 433		csd_do_func(func, info, NULL);
 434		csd_lock_record(NULL);
 435		local_irq_restore(flags);
 436		return 0;
 437	}
 438
 
 439	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 440		csd_unlock(csd);
 441		return -ENXIO;
 442	}
 443
 444	__smp_call_single_queue(cpu, &csd->node.llist);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445
 446	return 0;
 447}
 448
 449/**
 450 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
 451 *
 452 * Invoked by arch to handle an IPI for call function single.
 453 * Must be called with interrupts disabled.
 454 */
 455void generic_smp_call_function_single_interrupt(void)
 456{
 457	__flush_smp_call_function_queue(true);
 458}
 459
 460/**
 461 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 462 *
 463 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 464 *		      offline CPU. Skip this check if set to 'false'.
 465 *
 466 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 467 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 468 * to ensure that all pending IPI callbacks are run before it goes completely
 469 * offline.
 470 *
 471 * Loop through the call_single_queue and run all the queued callbacks.
 472 * Must be called with interrupts disabled.
 473 */
 474static void __flush_smp_call_function_queue(bool warn_cpu_offline)
 475{
 
 
 476	call_single_data_t *csd, *csd_next;
 477	struct llist_node *entry, *prev;
 478	struct llist_head *head;
 479	static bool warned;
 480	atomic_t *tbt;
 481
 482	lockdep_assert_irqs_disabled();
 483
 484	/* Allow waiters to send backtrace NMI from here onwards */
 485	tbt = this_cpu_ptr(&trigger_backtrace);
 486	atomic_set_release(tbt, 1);
 487
 488	head = this_cpu_ptr(&call_single_queue);
 489	entry = llist_del_all(head);
 490	entry = llist_reverse_order(entry);
 491
 492	/* There shouldn't be any pending callbacks on an offline CPU. */
 493	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
 494		     !warned && entry != NULL)) {
 495		warned = true;
 496		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 497
 498		/*
 499		 * We don't have to use the _safe() variant here
 500		 * because we are not invoking the IPI handlers yet.
 501		 */
 502		llist_for_each_entry(csd, entry, node.llist) {
 503			switch (CSD_TYPE(csd)) {
 504			case CSD_TYPE_ASYNC:
 505			case CSD_TYPE_SYNC:
 506			case CSD_TYPE_IRQ_WORK:
 507				pr_warn("IPI callback %pS sent to offline CPU\n",
 508					csd->func);
 509				break;
 510
 511			case CSD_TYPE_TTWU:
 512				pr_warn("IPI task-wakeup sent to offline CPU\n");
 513				break;
 514
 515			default:
 516				pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
 517					CSD_TYPE(csd));
 518				break;
 519			}
 520		}
 521	}
 522
 523	/*
 524	 * First; run all SYNC callbacks, people are waiting for us.
 525	 */
 526	prev = NULL;
 527	llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
 528		/* Do we wait until *after* callback? */
 529		if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
 530			smp_call_func_t func = csd->func;
 531			void *info = csd->info;
 532
 533			if (prev) {
 534				prev->next = &csd_next->node.llist;
 535			} else {
 536				entry = &csd_next->node.llist;
 537			}
 538
 539			csd_lock_record(csd);
 540			csd_do_func(func, info, csd);
 541			csd_unlock(csd);
 542			csd_lock_record(NULL);
 543		} else {
 544			prev = &csd->node.llist;
 
 545		}
 546	}
 547
 548	if (!entry)
 549		return;
 550
 551	/*
 552	 * Second; run all !SYNC callbacks.
 
 
 
 553	 */
 554	prev = NULL;
 555	llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
 556		int type = CSD_TYPE(csd);
 557
 558		if (type != CSD_TYPE_TTWU) {
 559			if (prev) {
 560				prev->next = &csd_next->node.llist;
 561			} else {
 562				entry = &csd_next->node.llist;
 563			}
 564
 565			if (type == CSD_TYPE_ASYNC) {
 566				smp_call_func_t func = csd->func;
 567				void *info = csd->info;
 568
 569				csd_lock_record(csd);
 570				csd_unlock(csd);
 571				csd_do_func(func, info, csd);
 572				csd_lock_record(NULL);
 573			} else if (type == CSD_TYPE_IRQ_WORK) {
 574				irq_work_single(csd);
 575			}
 576
 577		} else {
 578			prev = &csd->node.llist;
 579		}
 580	}
 581
 582	/*
 583	 * Third; only CSD_TYPE_TTWU is left, issue those.
 584	 */
 585	if (entry) {
 586		csd = llist_entry(entry, typeof(*csd), node.llist);
 587		csd_do_func(sched_ttwu_pending, entry, csd);
 588	}
 589}
 590
 591
 592/**
 593 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 594 *				   from task context (idle, migration thread)
 595 *
 596 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
 597 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
 598 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
 599 * handle queued SMP function calls before scheduling.
 600 *
 601 * The migration thread has to ensure that an eventually pending wakeup has
 602 * been handled before it migrates a task.
 603 */
 604void flush_smp_call_function_queue(void)
 605{
 606	unsigned int was_pending;
 607	unsigned long flags;
 608
 609	if (llist_empty(this_cpu_ptr(&call_single_queue)))
 610		return;
 611
 612	local_irq_save(flags);
 613	/* Get the already pending soft interrupts for RT enabled kernels */
 614	was_pending = local_softirq_pending();
 615	__flush_smp_call_function_queue(true);
 616	if (local_softirq_pending())
 617		do_softirq_post_smp_call_flush(was_pending);
 618
 619	local_irq_restore(flags);
 620}
 621
 622/*
 623 * smp_call_function_single - Run a function on a specific CPU
 624 * @func: The function to run. This must be fast and non-blocking.
 625 * @info: An arbitrary pointer to pass to the function.
 626 * @wait: If true, wait until function has completed on other CPUs.
 627 *
 628 * Returns 0 on success, else a negative status code.
 629 */
 630int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 631			     int wait)
 632{
 633	call_single_data_t *csd;
 634	call_single_data_t csd_stack = {
 635		.node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
 636	};
 637	int this_cpu;
 638	int err;
 639
 640	/*
 641	 * prevent preemption and reschedule on another processor,
 642	 * as well as CPU removal
 643	 */
 644	this_cpu = get_cpu();
 645
 646	/*
 647	 * Can deadlock when called with interrupts disabled.
 648	 * We allow cpu's that are not yet online though, as no one else can
 649	 * send smp call function interrupt to this cpu and as such deadlocks
 650	 * can't happen.
 651	 */
 652	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 653		     && !oops_in_progress);
 654
 655	/*
 656	 * When @wait we can deadlock when we interrupt between llist_add() and
 657	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
 658	 * csd_lock() on because the interrupt context uses the same csd
 659	 * storage.
 660	 */
 661	WARN_ON_ONCE(!in_task());
 662
 663	csd = &csd_stack;
 664	if (!wait) {
 665		csd = this_cpu_ptr(&csd_data);
 666		csd_lock(csd);
 667	}
 668
 669	csd->func = func;
 670	csd->info = info;
 671#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 672	csd->node.src = smp_processor_id();
 673	csd->node.dst = cpu;
 674#endif
 675
 676	err = generic_exec_single(cpu, csd);
 677
 678	if (wait)
 679		csd_lock_wait(csd);
 680
 681	put_cpu();
 682
 683	return err;
 684}
 685EXPORT_SYMBOL(smp_call_function_single);
 686
 687/**
 688 * smp_call_function_single_async() - Run an asynchronous function on a
 689 * 			         specific CPU.
 690 * @cpu: The CPU to run on.
 691 * @csd: Pre-allocated and setup data structure
 692 *
 693 * Like smp_call_function_single(), but the call is asynchonous and
 694 * can thus be done from contexts with disabled interrupts.
 695 *
 696 * The caller passes his own pre-allocated data structure
 697 * (ie: embedded in an object) and is responsible for synchronizing it
 698 * such that the IPIs performed on the @csd are strictly serialized.
 699 *
 700 * If the function is called with one csd which has not yet been
 701 * processed by previous call to smp_call_function_single_async(), the
 702 * function will return immediately with -EBUSY showing that the csd
 703 * object is still in progress.
 704 *
 705 * NOTE: Be careful, there is unfortunately no current debugging facility to
 706 * validate the correctness of this serialization.
 707 *
 708 * Return: %0 on success or negative errno value on error
 709 */
 710int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 711{
 712	int err = 0;
 713
 714	preempt_disable();
 715
 716	if (csd->node.u_flags & CSD_FLAG_LOCK) {
 717		err = -EBUSY;
 718		goto out;
 719	}
 720
 721	csd->node.u_flags = CSD_FLAG_LOCK;
 722	smp_wmb();
 723
 724	err = generic_exec_single(cpu, csd);
 725
 726out:
 727	preempt_enable();
 728
 729	return err;
 730}
 731EXPORT_SYMBOL_GPL(smp_call_function_single_async);
 732
 733/*
 734 * smp_call_function_any - Run a function on any of the given cpus
 735 * @mask: The mask of cpus it can run on.
 736 * @func: The function to run. This must be fast and non-blocking.
 737 * @info: An arbitrary pointer to pass to the function.
 738 * @wait: If true, wait until function has completed.
 739 *
 740 * Returns 0 on success, else a negative status code (if no cpus were online).
 741 *
 742 * Selection preference:
 743 *	1) current cpu if in @mask
 744 *	2) any cpu of current node if in @mask
 745 *	3) any other online cpu in @mask
 746 */
 747int smp_call_function_any(const struct cpumask *mask,
 748			  smp_call_func_t func, void *info, int wait)
 749{
 750	unsigned int cpu;
 751	const struct cpumask *nodemask;
 752	int ret;
 753
 754	/* Try for same CPU (cheapest) */
 755	cpu = get_cpu();
 756	if (cpumask_test_cpu(cpu, mask))
 757		goto call;
 758
 759	/* Try for same node. */
 760	nodemask = cpumask_of_node(cpu_to_node(cpu));
 761	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
 762	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
 763		if (cpu_online(cpu))
 764			goto call;
 765	}
 766
 767	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
 768	cpu = cpumask_any_and(mask, cpu_online_mask);
 769call:
 770	ret = smp_call_function_single(cpu, func, info, wait);
 771	put_cpu();
 772	return ret;
 773}
 774EXPORT_SYMBOL_GPL(smp_call_function_any);
 775
 776/*
 777 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
 
 
 
 
 
 
 
 778 *
 779 * %SCF_WAIT:		Wait until function execution is completed
 780 * %SCF_RUN_LOCAL:	Run also locally if local cpu is set in cpumask
 
 781 */
 782#define SCF_WAIT	(1U << 0)
 783#define SCF_RUN_LOCAL	(1U << 1)
 784
 785static void smp_call_function_many_cond(const struct cpumask *mask,
 786					smp_call_func_t func, void *info,
 787					unsigned int scf_flags,
 788					smp_cond_func_t cond_func)
 789{
 790	int cpu, last_cpu, this_cpu = smp_processor_id();
 791	struct call_function_data *cfd;
 792	bool wait = scf_flags & SCF_WAIT;
 793	int nr_cpus = 0;
 794	bool run_remote = false;
 795	bool run_local = false;
 796
 797	lockdep_assert_preemption_disabled();
 798
 799	/*
 800	 * Can deadlock when called with interrupts disabled.
 801	 * We allow cpu's that are not yet online though, as no one else can
 802	 * send smp call function interrupt to this cpu and as such deadlocks
 803	 * can't happen.
 804	 */
 805	if (cpu_online(this_cpu) && !oops_in_progress &&
 806	    !early_boot_irqs_disabled)
 807		lockdep_assert_irqs_enabled();
 808
 809	/*
 810	 * When @wait we can deadlock when we interrupt between llist_add() and
 811	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
 812	 * csd_lock() on because the interrupt context uses the same csd
 813	 * storage.
 814	 */
 815	WARN_ON_ONCE(!in_task());
 816
 817	/* Check if we need local execution. */
 818	if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
 819		run_local = true;
 820
 821	/* Check if we need remote execution, i.e., any CPU excluding this one. */
 822	cpu = cpumask_first_and(mask, cpu_online_mask);
 823	if (cpu == this_cpu)
 824		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 825	if (cpu < nr_cpu_ids)
 826		run_remote = true;
 827
 828	if (run_remote) {
 829		cfd = this_cpu_ptr(&cfd_data);
 830		cpumask_and(cfd->cpumask, mask, cpu_online_mask);
 831		__cpumask_clear_cpu(this_cpu, cfd->cpumask);
 832
 833		cpumask_clear(cfd->cpumask_ipi);
 834		for_each_cpu(cpu, cfd->cpumask) {
 835			call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 
 
 
 
 
 
 
 836
 837			if (cond_func && !cond_func(cpu, info)) {
 838				__cpumask_clear_cpu(cpu, cfd->cpumask);
 839				continue;
 840			}
 841
 842			csd_lock(csd);
 843			if (wait)
 844				csd->node.u_flags |= CSD_TYPE_SYNC;
 845			csd->func = func;
 846			csd->info = info;
 847#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 848			csd->node.src = smp_processor_id();
 849			csd->node.dst = cpu;
 850#endif
 851			trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
 852
 853			if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
 854				__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
 855				nr_cpus++;
 856				last_cpu = cpu;
 857			}
 858		}
 859
 860		/*
 861		 * Choose the most efficient way to send an IPI. Note that the
 862		 * number of CPUs might be zero due to concurrent changes to the
 863		 * provided mask.
 864		 */
 865		if (nr_cpus == 1)
 866			send_call_function_single_ipi(last_cpu);
 867		else if (likely(nr_cpus > 1))
 868			send_call_function_ipi_mask(cfd->cpumask_ipi);
 869	}
 870
 871	if (run_local && (!cond_func || cond_func(this_cpu, info))) {
 872		unsigned long flags;
 
 873
 874		local_irq_save(flags);
 875		csd_do_func(func, info, NULL);
 876		local_irq_restore(flags);
 
 
 
 
 877	}
 878
 879	if (run_remote && wait) {
 
 
 
 880		for_each_cpu(cpu, cfd->cpumask) {
 881			call_single_data_t *csd;
 882
 883			csd = per_cpu_ptr(cfd->csd, cpu);
 884			csd_lock_wait(csd);
 885		}
 886	}
 887}
 888
 889/**
 890 * smp_call_function_many(): Run a function on a set of CPUs.
 891 * @mask: The set of cpus to run on (only runs on online subset).
 892 * @func: The function to run. This must be fast and non-blocking.
 893 * @info: An arbitrary pointer to pass to the function.
 894 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
 895 *        (atomically) until function has completed on other CPUs. If
 896 *        %SCF_RUN_LOCAL is set, the function will also be run locally
 897 *        if the local CPU is set in the @cpumask.
 898 *
 899 * If @wait is true, then returns once @func has returned.
 900 *
 901 * You must not call this function with disabled interrupts or from a
 902 * hardware interrupt handler or from a bottom half handler. Preemption
 903 * must be disabled when calling this function.
 904 */
 905void smp_call_function_many(const struct cpumask *mask,
 906			    smp_call_func_t func, void *info, bool wait)
 907{
 908	smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
 909}
 910EXPORT_SYMBOL(smp_call_function_many);
 911
 912/**
 913 * smp_call_function(): Run a function on all other CPUs.
 914 * @func: The function to run. This must be fast and non-blocking.
 915 * @info: An arbitrary pointer to pass to the function.
 916 * @wait: If true, wait (atomically) until function has completed
 917 *        on other CPUs.
 918 *
 919 * Returns 0.
 920 *
 921 * If @wait is true, then returns once @func has returned; otherwise
 922 * it returns just before the target cpu calls @func.
 923 *
 924 * You must not call this function with disabled interrupts or from a
 925 * hardware interrupt handler or from a bottom half handler.
 926 */
 927void smp_call_function(smp_call_func_t func, void *info, int wait)
 928{
 929	preempt_disable();
 930	smp_call_function_many(cpu_online_mask, func, info, wait);
 931	preempt_enable();
 
 
 932}
 933EXPORT_SYMBOL(smp_call_function);
 934
 935/* Setup configured maximum number of CPUs to activate */
 936unsigned int setup_max_cpus = NR_CPUS;
 937EXPORT_SYMBOL(setup_max_cpus);
 938
 939
 940/*
 941 * Setup routine for controlling SMP activation
 942 *
 943 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
 944 * activation entirely (the MPS table probe still happens, though).
 945 *
 946 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
 947 * greater than 0, limits the maximum number of CPUs activated in
 948 * SMP mode to <NUM>.
 949 */
 950
 951void __weak __init arch_disable_smp_support(void) { }
 952
 953static int __init nosmp(char *str)
 954{
 955	setup_max_cpus = 0;
 956	arch_disable_smp_support();
 957
 958	return 0;
 959}
 960
 961early_param("nosmp", nosmp);
 962
 963/* this is hard limit */
 964static int __init nrcpus(char *str)
 965{
 966	int nr_cpus;
 967
 968	if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
 969		set_nr_cpu_ids(nr_cpus);
 
 970
 971	return 0;
 972}
 973
 974early_param("nr_cpus", nrcpus);
 975
 976static int __init maxcpus(char *str)
 977{
 978	get_option(&str, &setup_max_cpus);
 979	if (setup_max_cpus == 0)
 980		arch_disable_smp_support();
 981
 982	return 0;
 983}
 984
 985early_param("maxcpus", maxcpus);
 986
 987#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
 988/* Setup number of possible processor ids */
 989unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
 990EXPORT_SYMBOL(nr_cpu_ids);
 991#endif
 992
 993/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
 994void __init setup_nr_cpu_ids(void)
 995{
 996	set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
 997}
 998
 999/* Called by boot processor to activate the rest. */
1000void __init smp_init(void)
1001{
1002	int num_nodes, num_cpus;
 
1003
1004	idle_threads_init();
1005	cpuhp_threads_init();
1006
1007	pr_info("Bringing up secondary CPUs ...\n");
1008
1009	bringup_nonboot_cpus(setup_max_cpus);
 
 
 
 
 
 
1010
1011	num_nodes = num_online_nodes();
1012	num_cpus  = num_online_cpus();
1013	pr_info("Brought up %d node%s, %d CPU%s\n",
1014		num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus));
 
1015
1016	/* Any cleanup work */
1017	smp_cpus_done(setup_max_cpus);
1018}
1019
1020/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021 * on_each_cpu_cond(): Call a function on each processor for which
1022 * the supplied function cond_func returns true, optionally waiting
1023 * for all the required CPUs to finish. This may include the local
1024 * processor.
1025 * @cond_func:	A callback function that is passed a cpu id and
1026 *		the info parameter. The function is called
1027 *		with preemption disabled. The function should
1028 *		return a blooean value indicating whether to IPI
1029 *		the specified CPU.
1030 * @func:	The function to run on all applicable CPUs.
1031 *		This must be fast and non-blocking.
1032 * @info:	An arbitrary pointer to pass to both functions.
1033 * @wait:	If true, wait (atomically) until function has
1034 *		completed on other CPUs.
 
 
 
 
 
1035 *
1036 * Preemption is disabled to protect against CPUs going offline but not online.
1037 * CPUs going online during the call will not be seen or sent an IPI.
1038 *
1039 * You must not call this function with disabled interrupts or
1040 * from a hardware interrupt handler or from a bottom half handler.
1041 */
1042void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1043			   void *info, bool wait, const struct cpumask *mask)
 
1044{
1045	unsigned int scf_flags = SCF_RUN_LOCAL;
 
1046
1047	if (wait)
1048		scf_flags |= SCF_WAIT;
1049
1050	preempt_disable();
1051	smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1052	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053}
1054EXPORT_SYMBOL(on_each_cpu_cond_mask);
1055
1056static void do_nothing(void *unused)
1057{
1058}
1059
1060/**
1061 * kick_all_cpus_sync - Force all cpus out of idle
1062 *
1063 * Used to synchronize the update of pm_idle function pointer. It's
1064 * called after the pointer is updated and returns after the dummy
1065 * callback function has been executed on all cpus. The execution of
1066 * the function can only happen on the remote cpus after they have
1067 * left the idle function which had been called via pm_idle function
1068 * pointer. So it's guaranteed that nothing uses the previous pointer
1069 * anymore.
1070 */
1071void kick_all_cpus_sync(void)
1072{
1073	/* Make sure the change is visible before we kick the cpus */
1074	smp_mb();
1075	smp_call_function(do_nothing, NULL, 1);
1076}
1077EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1078
1079/**
1080 * wake_up_all_idle_cpus - break all cpus out of idle
1081 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1082 * including idle polling cpus, for non-idle cpus, we will do nothing
1083 * for them.
1084 */
1085void wake_up_all_idle_cpus(void)
1086{
1087	int cpu;
1088
1089	for_each_possible_cpu(cpu) {
1090		preempt_disable();
1091		if (cpu != smp_processor_id() && cpu_online(cpu))
1092			wake_up_if_idle(cpu);
1093		preempt_enable();
 
1094	}
 
1095}
1096EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1097
1098/**
1099 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1100 * @work: &work_struct
1101 * @done: &completion to signal
1102 * @func: function to call
1103 * @data: function's data argument
1104 * @ret: return value from @func
1105 * @cpu: target CPU (%-1 for any CPU)
1106 *
1107 * Used to call a function on a specific cpu and wait for it to return.
1108 * Optionally make sure the call is done on a specified physical cpu via vcpu
1109 * pinning in order to support virtualized environments.
1110 */
1111struct smp_call_on_cpu_struct {
1112	struct work_struct	work;
1113	struct completion	done;
1114	int			(*func)(void *);
1115	void			*data;
1116	int			ret;
1117	int			cpu;
1118};
1119
1120static void smp_call_on_cpu_callback(struct work_struct *work)
1121{
1122	struct smp_call_on_cpu_struct *sscs;
1123
1124	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1125	if (sscs->cpu >= 0)
1126		hypervisor_pin_vcpu(sscs->cpu);
1127	sscs->ret = sscs->func(sscs->data);
1128	if (sscs->cpu >= 0)
1129		hypervisor_pin_vcpu(-1);
1130
1131	complete(&sscs->done);
1132}
1133
1134int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1135{
1136	struct smp_call_on_cpu_struct sscs = {
1137		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1138		.func = func,
1139		.data = par,
1140		.cpu  = phys ? cpu : -1,
1141	};
1142
1143	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1144
1145	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1146		return -ENXIO;
1147
1148	queue_work_on(cpu, system_wq, &sscs.work);
1149	wait_for_completion(&sscs.done);
1150	destroy_work_on_stack(&sscs.work);
1151
1152	return sscs.ret;
1153}
1154EXPORT_SYMBOL_GPL(smp_call_on_cpu);