Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Generic helpers for smp ipi calls
  3 *
  4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/irq_work.h>
 10#include <linux/rcupdate.h>
 11#include <linux/rculist.h>
 12#include <linux/kernel.h>
 13#include <linux/export.h>
 14#include <linux/percpu.h>
 15#include <linux/init.h>
 16#include <linux/gfp.h>
 17#include <linux/smp.h>
 18#include <linux/cpu.h>
 19#include <linux/sched.h>
 
 20#include <linux/hypervisor.h>
 21
 22#include "smpboot.h"
 23
 24enum {
 25	CSD_FLAG_LOCK		= 0x01,
 26	CSD_FLAG_SYNCHRONOUS	= 0x02,
 27};
 28
 29struct call_function_data {
 30	struct call_single_data	__percpu *csd;
 31	cpumask_var_t		cpumask;
 
 32};
 33
 34static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 35
 36static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 37
 38static void flush_smp_call_function_queue(bool warn_cpu_offline);
 39
 40int smpcfd_prepare_cpu(unsigned int cpu)
 41{
 42	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 43
 44	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 45				     cpu_to_node(cpu)))
 46		return -ENOMEM;
 47	cfd->csd = alloc_percpu(struct call_single_data);
 
 
 
 
 
 48	if (!cfd->csd) {
 49		free_cpumask_var(cfd->cpumask);
 
 50		return -ENOMEM;
 51	}
 52
 53	return 0;
 54}
 55
 56int smpcfd_dead_cpu(unsigned int cpu)
 57{
 58	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 59
 60	free_cpumask_var(cfd->cpumask);
 
 61	free_percpu(cfd->csd);
 62	return 0;
 63}
 64
 65int smpcfd_dying_cpu(unsigned int cpu)
 66{
 67	/*
 68	 * The IPIs for the smp-call-function callbacks queued by other
 69	 * CPUs might arrive late, either due to hardware latencies or
 70	 * because this CPU disabled interrupts (inside stop-machine)
 71	 * before the IPIs were sent. So flush out any pending callbacks
 72	 * explicitly (without waiting for the IPIs to arrive), to
 73	 * ensure that the outgoing CPU doesn't go offline with work
 74	 * still pending.
 75	 */
 76	flush_smp_call_function_queue(false);
 77	return 0;
 78}
 79
 80void __init call_function_init(void)
 81{
 82	int i;
 83
 84	for_each_possible_cpu(i)
 85		init_llist_head(&per_cpu(call_single_queue, i));
 86
 87	smpcfd_prepare_cpu(smp_processor_id());
 88}
 89
 90/*
 91 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 92 *
 93 * For non-synchronous ipi calls the csd can still be in use by the
 94 * previous function call. For multi-cpu calls its even more interesting
 95 * as we'll have to ensure no other cpu is observing our csd.
 96 */
 97static __always_inline void csd_lock_wait(struct call_single_data *csd)
 98{
 99	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
100}
101
102static __always_inline void csd_lock(struct call_single_data *csd)
103{
104	csd_lock_wait(csd);
105	csd->flags |= CSD_FLAG_LOCK;
106
107	/*
108	 * prevent CPU from reordering the above assignment
109	 * to ->flags with any subsequent assignments to other
110	 * fields of the specified call_single_data structure:
111	 */
112	smp_wmb();
113}
114
115static __always_inline void csd_unlock(struct call_single_data *csd)
116{
117	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
118
119	/*
120	 * ensure we're all done before releasing data:
121	 */
122	smp_store_release(&csd->flags, 0);
123}
124
125static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
126
127/*
128 * Insert a previously allocated call_single_data element
129 * for execution on the given CPU. data must already have
130 * ->func, ->info, and ->flags set.
131 */
132static int generic_exec_single(int cpu, struct call_single_data *csd,
133			       smp_call_func_t func, void *info)
134{
135	if (cpu == smp_processor_id()) {
136		unsigned long flags;
137
138		/*
139		 * We can unlock early even for the synchronous on-stack case,
140		 * since we're doing this from the same CPU..
141		 */
142		csd_unlock(csd);
143		local_irq_save(flags);
144		func(info);
145		local_irq_restore(flags);
146		return 0;
147	}
148
149
150	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
151		csd_unlock(csd);
152		return -ENXIO;
153	}
154
155	csd->func = func;
156	csd->info = info;
157
158	/*
159	 * The list addition should be visible before sending the IPI
160	 * handler locks the list to pull the entry off it because of
161	 * normal cache coherency rules implied by spinlocks.
162	 *
163	 * If IPIs can go out of order to the cache coherency protocol
164	 * in an architecture, sufficient synchronisation should be added
165	 * to arch code to make it appear to obey cache coherency WRT
166	 * locking and barrier primitives. Generic code isn't really
167	 * equipped to do the right thing...
168	 */
169	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
170		arch_send_call_function_single_ipi(cpu);
171
172	return 0;
173}
174
175/**
176 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
177 *
178 * Invoked by arch to handle an IPI for call function single.
179 * Must be called with interrupts disabled.
180 */
181void generic_smp_call_function_single_interrupt(void)
182{
183	flush_smp_call_function_queue(true);
184}
185
186/**
187 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
188 *
189 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
190 *		      offline CPU. Skip this check if set to 'false'.
191 *
192 * Flush any pending smp-call-function callbacks queued on this CPU. This is
193 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
194 * to ensure that all pending IPI callbacks are run before it goes completely
195 * offline.
196 *
197 * Loop through the call_single_queue and run all the queued callbacks.
198 * Must be called with interrupts disabled.
199 */
200static void flush_smp_call_function_queue(bool warn_cpu_offline)
201{
202	struct llist_head *head;
203	struct llist_node *entry;
204	struct call_single_data *csd, *csd_next;
205	static bool warned;
206
207	WARN_ON(!irqs_disabled());
208
209	head = this_cpu_ptr(&call_single_queue);
210	entry = llist_del_all(head);
211	entry = llist_reverse_order(entry);
212
213	/* There shouldn't be any pending callbacks on an offline CPU. */
214	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
215		     !warned && !llist_empty(head))) {
216		warned = true;
217		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
218
219		/*
220		 * We don't have to use the _safe() variant here
221		 * because we are not invoking the IPI handlers yet.
222		 */
223		llist_for_each_entry(csd, entry, llist)
224			pr_warn("IPI callback %pS sent to offline CPU\n",
225				csd->func);
226	}
227
228	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
229		smp_call_func_t func = csd->func;
230		void *info = csd->info;
231
232		/* Do we wait until *after* callback? */
233		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
234			func(info);
235			csd_unlock(csd);
236		} else {
237			csd_unlock(csd);
238			func(info);
239		}
240	}
241
242	/*
243	 * Handle irq works queued remotely by irq_work_queue_on().
244	 * Smp functions above are typically synchronous so they
245	 * better run first since some other CPUs may be busy waiting
246	 * for them.
247	 */
248	irq_work_run();
249}
250
251/*
252 * smp_call_function_single - Run a function on a specific CPU
253 * @func: The function to run. This must be fast and non-blocking.
254 * @info: An arbitrary pointer to pass to the function.
255 * @wait: If true, wait until function has completed on other CPUs.
256 *
257 * Returns 0 on success, else a negative status code.
258 */
259int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
260			     int wait)
261{
262	struct call_single_data *csd;
263	struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
 
 
264	int this_cpu;
265	int err;
266
267	/*
268	 * prevent preemption and reschedule on another processor,
269	 * as well as CPU removal
270	 */
271	this_cpu = get_cpu();
272
273	/*
274	 * Can deadlock when called with interrupts disabled.
275	 * We allow cpu's that are not yet online though, as no one else can
276	 * send smp call function interrupt to this cpu and as such deadlocks
277	 * can't happen.
278	 */
279	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
280		     && !oops_in_progress);
281
282	csd = &csd_stack;
283	if (!wait) {
284		csd = this_cpu_ptr(&csd_data);
285		csd_lock(csd);
286	}
287
288	err = generic_exec_single(cpu, csd, func, info);
289
290	if (wait)
291		csd_lock_wait(csd);
292
293	put_cpu();
294
295	return err;
296}
297EXPORT_SYMBOL(smp_call_function_single);
298
299/**
300 * smp_call_function_single_async(): Run an asynchronous function on a
301 * 			         specific CPU.
302 * @cpu: The CPU to run on.
303 * @csd: Pre-allocated and setup data structure
304 *
305 * Like smp_call_function_single(), but the call is asynchonous and
306 * can thus be done from contexts with disabled interrupts.
307 *
308 * The caller passes his own pre-allocated data structure
309 * (ie: embedded in an object) and is responsible for synchronizing it
310 * such that the IPIs performed on the @csd are strictly serialized.
311 *
312 * NOTE: Be careful, there is unfortunately no current debugging facility to
313 * validate the correctness of this serialization.
314 */
315int smp_call_function_single_async(int cpu, struct call_single_data *csd)
316{
317	int err = 0;
318
319	preempt_disable();
320
321	/* We could deadlock if we have to wait here with interrupts disabled! */
322	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
323		csd_lock_wait(csd);
324
325	csd->flags = CSD_FLAG_LOCK;
326	smp_wmb();
327
328	err = generic_exec_single(cpu, csd, csd->func, csd->info);
329	preempt_enable();
330
331	return err;
332}
333EXPORT_SYMBOL_GPL(smp_call_function_single_async);
334
335/*
336 * smp_call_function_any - Run a function on any of the given cpus
337 * @mask: The mask of cpus it can run on.
338 * @func: The function to run. This must be fast and non-blocking.
339 * @info: An arbitrary pointer to pass to the function.
340 * @wait: If true, wait until function has completed.
341 *
342 * Returns 0 on success, else a negative status code (if no cpus were online).
343 *
344 * Selection preference:
345 *	1) current cpu if in @mask
346 *	2) any cpu of current node if in @mask
347 *	3) any other online cpu in @mask
348 */
349int smp_call_function_any(const struct cpumask *mask,
350			  smp_call_func_t func, void *info, int wait)
351{
352	unsigned int cpu;
353	const struct cpumask *nodemask;
354	int ret;
355
356	/* Try for same CPU (cheapest) */
357	cpu = get_cpu();
358	if (cpumask_test_cpu(cpu, mask))
359		goto call;
360
361	/* Try for same node. */
362	nodemask = cpumask_of_node(cpu_to_node(cpu));
363	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
364	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
365		if (cpu_online(cpu))
366			goto call;
367	}
368
369	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
370	cpu = cpumask_any_and(mask, cpu_online_mask);
371call:
372	ret = smp_call_function_single(cpu, func, info, wait);
373	put_cpu();
374	return ret;
375}
376EXPORT_SYMBOL_GPL(smp_call_function_any);
377
378/**
379 * smp_call_function_many(): Run a function on a set of other CPUs.
380 * @mask: The set of cpus to run on (only runs on online subset).
381 * @func: The function to run. This must be fast and non-blocking.
382 * @info: An arbitrary pointer to pass to the function.
383 * @wait: If true, wait (atomically) until function has completed
384 *        on other CPUs.
385 *
386 * If @wait is true, then returns once @func has returned.
387 *
388 * You must not call this function with disabled interrupts or from a
389 * hardware interrupt handler or from a bottom half handler. Preemption
390 * must be disabled when calling this function.
391 */
392void smp_call_function_many(const struct cpumask *mask,
393			    smp_call_func_t func, void *info, bool wait)
394{
395	struct call_function_data *cfd;
396	int cpu, next_cpu, this_cpu = smp_processor_id();
397
398	/*
399	 * Can deadlock when called with interrupts disabled.
400	 * We allow cpu's that are not yet online though, as no one else can
401	 * send smp call function interrupt to this cpu and as such deadlocks
402	 * can't happen.
403	 */
404	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
405		     && !oops_in_progress && !early_boot_irqs_disabled);
406
407	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
408	cpu = cpumask_first_and(mask, cpu_online_mask);
409	if (cpu == this_cpu)
410		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
411
412	/* No online cpus?  We're done. */
413	if (cpu >= nr_cpu_ids)
414		return;
415
416	/* Do we have another CPU which isn't us? */
417	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
418	if (next_cpu == this_cpu)
419		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
420
421	/* Fastpath: do that cpu by itself. */
422	if (next_cpu >= nr_cpu_ids) {
423		smp_call_function_single(cpu, func, info, wait);
424		return;
425	}
426
427	cfd = this_cpu_ptr(&cfd_data);
428
429	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
430	cpumask_clear_cpu(this_cpu, cfd->cpumask);
431
432	/* Some callers race with other cpus changing the passed mask */
433	if (unlikely(!cpumask_weight(cfd->cpumask)))
434		return;
435
 
436	for_each_cpu(cpu, cfd->cpumask) {
437		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
438
439		csd_lock(csd);
440		if (wait)
441			csd->flags |= CSD_FLAG_SYNCHRONOUS;
442		csd->func = func;
443		csd->info = info;
444		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
 
445	}
446
447	/* Send a message to all CPUs in the map */
448	arch_send_call_function_ipi_mask(cfd->cpumask);
449
450	if (wait) {
451		for_each_cpu(cpu, cfd->cpumask) {
452			struct call_single_data *csd;
453
454			csd = per_cpu_ptr(cfd->csd, cpu);
455			csd_lock_wait(csd);
456		}
457	}
458}
459EXPORT_SYMBOL(smp_call_function_many);
460
461/**
462 * smp_call_function(): Run a function on all other CPUs.
463 * @func: The function to run. This must be fast and non-blocking.
464 * @info: An arbitrary pointer to pass to the function.
465 * @wait: If true, wait (atomically) until function has completed
466 *        on other CPUs.
467 *
468 * Returns 0.
469 *
470 * If @wait is true, then returns once @func has returned; otherwise
471 * it returns just before the target cpu calls @func.
472 *
473 * You must not call this function with disabled interrupts or from a
474 * hardware interrupt handler or from a bottom half handler.
475 */
476int smp_call_function(smp_call_func_t func, void *info, int wait)
477{
478	preempt_disable();
479	smp_call_function_many(cpu_online_mask, func, info, wait);
480	preempt_enable();
481
482	return 0;
483}
484EXPORT_SYMBOL(smp_call_function);
485
486/* Setup configured maximum number of CPUs to activate */
487unsigned int setup_max_cpus = NR_CPUS;
488EXPORT_SYMBOL(setup_max_cpus);
489
490
491/*
492 * Setup routine for controlling SMP activation
493 *
494 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
495 * activation entirely (the MPS table probe still happens, though).
496 *
497 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
498 * greater than 0, limits the maximum number of CPUs activated in
499 * SMP mode to <NUM>.
500 */
501
502void __weak arch_disable_smp_support(void) { }
503
504static int __init nosmp(char *str)
505{
506	setup_max_cpus = 0;
507	arch_disable_smp_support();
508
509	return 0;
510}
511
512early_param("nosmp", nosmp);
513
514/* this is hard limit */
515static int __init nrcpus(char *str)
516{
517	int nr_cpus;
518
519	get_option(&str, &nr_cpus);
520	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
521		nr_cpu_ids = nr_cpus;
522
523	return 0;
524}
525
526early_param("nr_cpus", nrcpus);
527
528static int __init maxcpus(char *str)
529{
530	get_option(&str, &setup_max_cpus);
531	if (setup_max_cpus == 0)
532		arch_disable_smp_support();
533
534	return 0;
535}
536
537early_param("maxcpus", maxcpus);
538
539/* Setup number of possible processor ids */
540int nr_cpu_ids __read_mostly = NR_CPUS;
541EXPORT_SYMBOL(nr_cpu_ids);
542
543/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
544void __init setup_nr_cpu_ids(void)
545{
546	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
547}
548
549/* Called by boot processor to activate the rest. */
550void __init smp_init(void)
551{
552	int num_nodes, num_cpus;
553	unsigned int cpu;
554
555	idle_threads_init();
556	cpuhp_threads_init();
557
558	pr_info("Bringing up secondary CPUs ...\n");
559
560	/* FIXME: This should be done in userspace --RR */
561	for_each_present_cpu(cpu) {
562		if (num_online_cpus() >= setup_max_cpus)
563			break;
564		if (!cpu_online(cpu))
565			cpu_up(cpu);
566	}
567
568	num_nodes = num_online_nodes();
569	num_cpus  = num_online_cpus();
570	pr_info("Brought up %d node%s, %d CPU%s\n",
571		num_nodes, (num_nodes > 1 ? "s" : ""),
572		num_cpus,  (num_cpus  > 1 ? "s" : ""));
573
574	/* Any cleanup work */
575	smp_cpus_done(setup_max_cpus);
576}
577
578/*
579 * Call a function on all processors.  May be used during early boot while
580 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
581 * of local_irq_disable/enable().
582 */
583int on_each_cpu(void (*func) (void *info), void *info, int wait)
584{
585	unsigned long flags;
586	int ret = 0;
587
588	preempt_disable();
589	ret = smp_call_function(func, info, wait);
590	local_irq_save(flags);
591	func(info);
592	local_irq_restore(flags);
593	preempt_enable();
594	return ret;
595}
596EXPORT_SYMBOL(on_each_cpu);
597
598/**
599 * on_each_cpu_mask(): Run a function on processors specified by
600 * cpumask, which may include the local processor.
601 * @mask: The set of cpus to run on (only runs on online subset).
602 * @func: The function to run. This must be fast and non-blocking.
603 * @info: An arbitrary pointer to pass to the function.
604 * @wait: If true, wait (atomically) until function has completed
605 *        on other CPUs.
606 *
607 * If @wait is true, then returns once @func has returned.
608 *
609 * You must not call this function with disabled interrupts or from a
610 * hardware interrupt handler or from a bottom half handler.  The
611 * exception is that it may be used during early boot while
612 * early_boot_irqs_disabled is set.
613 */
614void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
615			void *info, bool wait)
616{
617	int cpu = get_cpu();
618
619	smp_call_function_many(mask, func, info, wait);
620	if (cpumask_test_cpu(cpu, mask)) {
621		unsigned long flags;
622		local_irq_save(flags);
623		func(info);
624		local_irq_restore(flags);
625	}
626	put_cpu();
627}
628EXPORT_SYMBOL(on_each_cpu_mask);
629
630/*
631 * on_each_cpu_cond(): Call a function on each processor for which
632 * the supplied function cond_func returns true, optionally waiting
633 * for all the required CPUs to finish. This may include the local
634 * processor.
635 * @cond_func:	A callback function that is passed a cpu id and
636 *		the the info parameter. The function is called
637 *		with preemption disabled. The function should
638 *		return a blooean value indicating whether to IPI
639 *		the specified CPU.
640 * @func:	The function to run on all applicable CPUs.
641 *		This must be fast and non-blocking.
642 * @info:	An arbitrary pointer to pass to both functions.
643 * @wait:	If true, wait (atomically) until function has
644 *		completed on other CPUs.
645 * @gfp_flags:	GFP flags to use when allocating the cpumask
646 *		used internally by the function.
647 *
648 * The function might sleep if the GFP flags indicates a non
649 * atomic allocation is allowed.
650 *
651 * Preemption is disabled to protect against CPUs going offline but not online.
652 * CPUs going online during the call will not be seen or sent an IPI.
653 *
654 * You must not call this function with disabled interrupts or
655 * from a hardware interrupt handler or from a bottom half handler.
656 */
657void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
658			smp_call_func_t func, void *info, bool wait,
659			gfp_t gfp_flags)
660{
661	cpumask_var_t cpus;
662	int cpu, ret;
663
664	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
665
666	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
667		preempt_disable();
668		for_each_online_cpu(cpu)
669			if (cond_func(cpu, info))
670				cpumask_set_cpu(cpu, cpus);
671		on_each_cpu_mask(cpus, func, info, wait);
672		preempt_enable();
673		free_cpumask_var(cpus);
674	} else {
675		/*
676		 * No free cpumask, bother. No matter, we'll
677		 * just have to IPI them one by one.
678		 */
679		preempt_disable();
680		for_each_online_cpu(cpu)
681			if (cond_func(cpu, info)) {
682				ret = smp_call_function_single(cpu, func,
683								info, wait);
684				WARN_ON_ONCE(ret);
685			}
686		preempt_enable();
687	}
688}
689EXPORT_SYMBOL(on_each_cpu_cond);
690
691static void do_nothing(void *unused)
692{
693}
694
695/**
696 * kick_all_cpus_sync - Force all cpus out of idle
697 *
698 * Used to synchronize the update of pm_idle function pointer. It's
699 * called after the pointer is updated and returns after the dummy
700 * callback function has been executed on all cpus. The execution of
701 * the function can only happen on the remote cpus after they have
702 * left the idle function which had been called via pm_idle function
703 * pointer. So it's guaranteed that nothing uses the previous pointer
704 * anymore.
705 */
706void kick_all_cpus_sync(void)
707{
708	/* Make sure the change is visible before we kick the cpus */
709	smp_mb();
710	smp_call_function(do_nothing, NULL, 1);
711}
712EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
713
714/**
715 * wake_up_all_idle_cpus - break all cpus out of idle
716 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
717 * including idle polling cpus, for non-idle cpus, we will do nothing
718 * for them.
719 */
720void wake_up_all_idle_cpus(void)
721{
722	int cpu;
723
724	preempt_disable();
725	for_each_online_cpu(cpu) {
726		if (cpu == smp_processor_id())
727			continue;
728
729		wake_up_if_idle(cpu);
730	}
731	preempt_enable();
732}
733EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
734
735/**
736 * smp_call_on_cpu - Call a function on a specific cpu
737 *
738 * Used to call a function on a specific cpu and wait for it to return.
739 * Optionally make sure the call is done on a specified physical cpu via vcpu
740 * pinning in order to support virtualized environments.
741 */
742struct smp_call_on_cpu_struct {
743	struct work_struct	work;
744	struct completion	done;
745	int			(*func)(void *);
746	void			*data;
747	int			ret;
748	int			cpu;
749};
750
751static void smp_call_on_cpu_callback(struct work_struct *work)
752{
753	struct smp_call_on_cpu_struct *sscs;
754
755	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
756	if (sscs->cpu >= 0)
757		hypervisor_pin_vcpu(sscs->cpu);
758	sscs->ret = sscs->func(sscs->data);
759	if (sscs->cpu >= 0)
760		hypervisor_pin_vcpu(-1);
761
762	complete(&sscs->done);
763}
764
765int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
766{
767	struct smp_call_on_cpu_struct sscs = {
768		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
769		.func = func,
770		.data = par,
771		.cpu  = phys ? cpu : -1,
772	};
773
774	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
775
776	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
777		return -ENXIO;
778
779	queue_work_on(cpu, system_wq, &sscs.work);
780	wait_for_completion(&sscs.done);
781
782	return sscs.ret;
783}
784EXPORT_SYMBOL_GPL(smp_call_on_cpu);
v4.17
  1/*
  2 * Generic helpers for smp ipi calls
  3 *
  4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/irq_work.h>
 10#include <linux/rcupdate.h>
 11#include <linux/rculist.h>
 12#include <linux/kernel.h>
 13#include <linux/export.h>
 14#include <linux/percpu.h>
 15#include <linux/init.h>
 16#include <linux/gfp.h>
 17#include <linux/smp.h>
 18#include <linux/cpu.h>
 19#include <linux/sched.h>
 20#include <linux/sched/idle.h>
 21#include <linux/hypervisor.h>
 22
 23#include "smpboot.h"
 24
 25enum {
 26	CSD_FLAG_LOCK		= 0x01,
 27	CSD_FLAG_SYNCHRONOUS	= 0x02,
 28};
 29
 30struct call_function_data {
 31	call_single_data_t	__percpu *csd;
 32	cpumask_var_t		cpumask;
 33	cpumask_var_t		cpumask_ipi;
 34};
 35
 36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 37
 38static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 39
 40static void flush_smp_call_function_queue(bool warn_cpu_offline);
 41
 42int smpcfd_prepare_cpu(unsigned int cpu)
 43{
 44	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 45
 46	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 47				     cpu_to_node(cpu)))
 48		return -ENOMEM;
 49	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
 50				     cpu_to_node(cpu))) {
 51		free_cpumask_var(cfd->cpumask);
 52		return -ENOMEM;
 53	}
 54	cfd->csd = alloc_percpu(call_single_data_t);
 55	if (!cfd->csd) {
 56		free_cpumask_var(cfd->cpumask);
 57		free_cpumask_var(cfd->cpumask_ipi);
 58		return -ENOMEM;
 59	}
 60
 61	return 0;
 62}
 63
 64int smpcfd_dead_cpu(unsigned int cpu)
 65{
 66	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 67
 68	free_cpumask_var(cfd->cpumask);
 69	free_cpumask_var(cfd->cpumask_ipi);
 70	free_percpu(cfd->csd);
 71	return 0;
 72}
 73
 74int smpcfd_dying_cpu(unsigned int cpu)
 75{
 76	/*
 77	 * The IPIs for the smp-call-function callbacks queued by other
 78	 * CPUs might arrive late, either due to hardware latencies or
 79	 * because this CPU disabled interrupts (inside stop-machine)
 80	 * before the IPIs were sent. So flush out any pending callbacks
 81	 * explicitly (without waiting for the IPIs to arrive), to
 82	 * ensure that the outgoing CPU doesn't go offline with work
 83	 * still pending.
 84	 */
 85	flush_smp_call_function_queue(false);
 86	return 0;
 87}
 88
 89void __init call_function_init(void)
 90{
 91	int i;
 92
 93	for_each_possible_cpu(i)
 94		init_llist_head(&per_cpu(call_single_queue, i));
 95
 96	smpcfd_prepare_cpu(smp_processor_id());
 97}
 98
 99/*
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
101 *
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
105 */
106static __always_inline void csd_lock_wait(call_single_data_t *csd)
107{
108	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
109}
110
111static __always_inline void csd_lock(call_single_data_t *csd)
112{
113	csd_lock_wait(csd);
114	csd->flags |= CSD_FLAG_LOCK;
115
116	/*
117	 * prevent CPU from reordering the above assignment
118	 * to ->flags with any subsequent assignments to other
119	 * fields of the specified call_single_data_t structure:
120	 */
121	smp_wmb();
122}
123
124static __always_inline void csd_unlock(call_single_data_t *csd)
125{
126	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
127
128	/*
129	 * ensure we're all done before releasing data:
130	 */
131	smp_store_release(&csd->flags, 0);
132}
133
134static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135
136/*
137 * Insert a previously allocated call_single_data_t element
138 * for execution on the given CPU. data must already have
139 * ->func, ->info, and ->flags set.
140 */
141static int generic_exec_single(int cpu, call_single_data_t *csd,
142			       smp_call_func_t func, void *info)
143{
144	if (cpu == smp_processor_id()) {
145		unsigned long flags;
146
147		/*
148		 * We can unlock early even for the synchronous on-stack case,
149		 * since we're doing this from the same CPU..
150		 */
151		csd_unlock(csd);
152		local_irq_save(flags);
153		func(info);
154		local_irq_restore(flags);
155		return 0;
156	}
157
158
159	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
160		csd_unlock(csd);
161		return -ENXIO;
162	}
163
164	csd->func = func;
165	csd->info = info;
166
167	/*
168	 * The list addition should be visible before sending the IPI
169	 * handler locks the list to pull the entry off it because of
170	 * normal cache coherency rules implied by spinlocks.
171	 *
172	 * If IPIs can go out of order to the cache coherency protocol
173	 * in an architecture, sufficient synchronisation should be added
174	 * to arch code to make it appear to obey cache coherency WRT
175	 * locking and barrier primitives. Generic code isn't really
176	 * equipped to do the right thing...
177	 */
178	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
179		arch_send_call_function_single_ipi(cpu);
180
181	return 0;
182}
183
184/**
185 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
186 *
187 * Invoked by arch to handle an IPI for call function single.
188 * Must be called with interrupts disabled.
189 */
190void generic_smp_call_function_single_interrupt(void)
191{
192	flush_smp_call_function_queue(true);
193}
194
195/**
196 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
197 *
198 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
199 *		      offline CPU. Skip this check if set to 'false'.
200 *
201 * Flush any pending smp-call-function callbacks queued on this CPU. This is
202 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
203 * to ensure that all pending IPI callbacks are run before it goes completely
204 * offline.
205 *
206 * Loop through the call_single_queue and run all the queued callbacks.
207 * Must be called with interrupts disabled.
208 */
209static void flush_smp_call_function_queue(bool warn_cpu_offline)
210{
211	struct llist_head *head;
212	struct llist_node *entry;
213	call_single_data_t *csd, *csd_next;
214	static bool warned;
215
216	lockdep_assert_irqs_disabled();
217
218	head = this_cpu_ptr(&call_single_queue);
219	entry = llist_del_all(head);
220	entry = llist_reverse_order(entry);
221
222	/* There shouldn't be any pending callbacks on an offline CPU. */
223	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
224		     !warned && !llist_empty(head))) {
225		warned = true;
226		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
227
228		/*
229		 * We don't have to use the _safe() variant here
230		 * because we are not invoking the IPI handlers yet.
231		 */
232		llist_for_each_entry(csd, entry, llist)
233			pr_warn("IPI callback %pS sent to offline CPU\n",
234				csd->func);
235	}
236
237	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
238		smp_call_func_t func = csd->func;
239		void *info = csd->info;
240
241		/* Do we wait until *after* callback? */
242		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
243			func(info);
244			csd_unlock(csd);
245		} else {
246			csd_unlock(csd);
247			func(info);
248		}
249	}
250
251	/*
252	 * Handle irq works queued remotely by irq_work_queue_on().
253	 * Smp functions above are typically synchronous so they
254	 * better run first since some other CPUs may be busy waiting
255	 * for them.
256	 */
257	irq_work_run();
258}
259
260/*
261 * smp_call_function_single - Run a function on a specific CPU
262 * @func: The function to run. This must be fast and non-blocking.
263 * @info: An arbitrary pointer to pass to the function.
264 * @wait: If true, wait until function has completed on other CPUs.
265 *
266 * Returns 0 on success, else a negative status code.
267 */
268int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
269			     int wait)
270{
271	call_single_data_t *csd;
272	call_single_data_t csd_stack = {
273		.flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
274	};
275	int this_cpu;
276	int err;
277
278	/*
279	 * prevent preemption and reschedule on another processor,
280	 * as well as CPU removal
281	 */
282	this_cpu = get_cpu();
283
284	/*
285	 * Can deadlock when called with interrupts disabled.
286	 * We allow cpu's that are not yet online though, as no one else can
287	 * send smp call function interrupt to this cpu and as such deadlocks
288	 * can't happen.
289	 */
290	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
291		     && !oops_in_progress);
292
293	csd = &csd_stack;
294	if (!wait) {
295		csd = this_cpu_ptr(&csd_data);
296		csd_lock(csd);
297	}
298
299	err = generic_exec_single(cpu, csd, func, info);
300
301	if (wait)
302		csd_lock_wait(csd);
303
304	put_cpu();
305
306	return err;
307}
308EXPORT_SYMBOL(smp_call_function_single);
309
310/**
311 * smp_call_function_single_async(): Run an asynchronous function on a
312 * 			         specific CPU.
313 * @cpu: The CPU to run on.
314 * @csd: Pre-allocated and setup data structure
315 *
316 * Like smp_call_function_single(), but the call is asynchonous and
317 * can thus be done from contexts with disabled interrupts.
318 *
319 * The caller passes his own pre-allocated data structure
320 * (ie: embedded in an object) and is responsible for synchronizing it
321 * such that the IPIs performed on the @csd are strictly serialized.
322 *
323 * NOTE: Be careful, there is unfortunately no current debugging facility to
324 * validate the correctness of this serialization.
325 */
326int smp_call_function_single_async(int cpu, call_single_data_t *csd)
327{
328	int err = 0;
329
330	preempt_disable();
331
332	/* We could deadlock if we have to wait here with interrupts disabled! */
333	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
334		csd_lock_wait(csd);
335
336	csd->flags = CSD_FLAG_LOCK;
337	smp_wmb();
338
339	err = generic_exec_single(cpu, csd, csd->func, csd->info);
340	preempt_enable();
341
342	return err;
343}
344EXPORT_SYMBOL_GPL(smp_call_function_single_async);
345
346/*
347 * smp_call_function_any - Run a function on any of the given cpus
348 * @mask: The mask of cpus it can run on.
349 * @func: The function to run. This must be fast and non-blocking.
350 * @info: An arbitrary pointer to pass to the function.
351 * @wait: If true, wait until function has completed.
352 *
353 * Returns 0 on success, else a negative status code (if no cpus were online).
354 *
355 * Selection preference:
356 *	1) current cpu if in @mask
357 *	2) any cpu of current node if in @mask
358 *	3) any other online cpu in @mask
359 */
360int smp_call_function_any(const struct cpumask *mask,
361			  smp_call_func_t func, void *info, int wait)
362{
363	unsigned int cpu;
364	const struct cpumask *nodemask;
365	int ret;
366
367	/* Try for same CPU (cheapest) */
368	cpu = get_cpu();
369	if (cpumask_test_cpu(cpu, mask))
370		goto call;
371
372	/* Try for same node. */
373	nodemask = cpumask_of_node(cpu_to_node(cpu));
374	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
375	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
376		if (cpu_online(cpu))
377			goto call;
378	}
379
380	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
381	cpu = cpumask_any_and(mask, cpu_online_mask);
382call:
383	ret = smp_call_function_single(cpu, func, info, wait);
384	put_cpu();
385	return ret;
386}
387EXPORT_SYMBOL_GPL(smp_call_function_any);
388
389/**
390 * smp_call_function_many(): Run a function on a set of other CPUs.
391 * @mask: The set of cpus to run on (only runs on online subset).
392 * @func: The function to run. This must be fast and non-blocking.
393 * @info: An arbitrary pointer to pass to the function.
394 * @wait: If true, wait (atomically) until function has completed
395 *        on other CPUs.
396 *
397 * If @wait is true, then returns once @func has returned.
398 *
399 * You must not call this function with disabled interrupts or from a
400 * hardware interrupt handler or from a bottom half handler. Preemption
401 * must be disabled when calling this function.
402 */
403void smp_call_function_many(const struct cpumask *mask,
404			    smp_call_func_t func, void *info, bool wait)
405{
406	struct call_function_data *cfd;
407	int cpu, next_cpu, this_cpu = smp_processor_id();
408
409	/*
410	 * Can deadlock when called with interrupts disabled.
411	 * We allow cpu's that are not yet online though, as no one else can
412	 * send smp call function interrupt to this cpu and as such deadlocks
413	 * can't happen.
414	 */
415	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
416		     && !oops_in_progress && !early_boot_irqs_disabled);
417
418	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
419	cpu = cpumask_first_and(mask, cpu_online_mask);
420	if (cpu == this_cpu)
421		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
422
423	/* No online cpus?  We're done. */
424	if (cpu >= nr_cpu_ids)
425		return;
426
427	/* Do we have another CPU which isn't us? */
428	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
429	if (next_cpu == this_cpu)
430		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
431
432	/* Fastpath: do that cpu by itself. */
433	if (next_cpu >= nr_cpu_ids) {
434		smp_call_function_single(cpu, func, info, wait);
435		return;
436	}
437
438	cfd = this_cpu_ptr(&cfd_data);
439
440	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
441	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
442
443	/* Some callers race with other cpus changing the passed mask */
444	if (unlikely(!cpumask_weight(cfd->cpumask)))
445		return;
446
447	cpumask_clear(cfd->cpumask_ipi);
448	for_each_cpu(cpu, cfd->cpumask) {
449		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
450
451		csd_lock(csd);
452		if (wait)
453			csd->flags |= CSD_FLAG_SYNCHRONOUS;
454		csd->func = func;
455		csd->info = info;
456		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
457			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
458	}
459
460	/* Send a message to all CPUs in the map */
461	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
462
463	if (wait) {
464		for_each_cpu(cpu, cfd->cpumask) {
465			call_single_data_t *csd;
466
467			csd = per_cpu_ptr(cfd->csd, cpu);
468			csd_lock_wait(csd);
469		}
470	}
471}
472EXPORT_SYMBOL(smp_call_function_many);
473
474/**
475 * smp_call_function(): Run a function on all other CPUs.
476 * @func: The function to run. This must be fast and non-blocking.
477 * @info: An arbitrary pointer to pass to the function.
478 * @wait: If true, wait (atomically) until function has completed
479 *        on other CPUs.
480 *
481 * Returns 0.
482 *
483 * If @wait is true, then returns once @func has returned; otherwise
484 * it returns just before the target cpu calls @func.
485 *
486 * You must not call this function with disabled interrupts or from a
487 * hardware interrupt handler or from a bottom half handler.
488 */
489int smp_call_function(smp_call_func_t func, void *info, int wait)
490{
491	preempt_disable();
492	smp_call_function_many(cpu_online_mask, func, info, wait);
493	preempt_enable();
494
495	return 0;
496}
497EXPORT_SYMBOL(smp_call_function);
498
499/* Setup configured maximum number of CPUs to activate */
500unsigned int setup_max_cpus = NR_CPUS;
501EXPORT_SYMBOL(setup_max_cpus);
502
503
504/*
505 * Setup routine for controlling SMP activation
506 *
507 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
508 * activation entirely (the MPS table probe still happens, though).
509 *
510 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
511 * greater than 0, limits the maximum number of CPUs activated in
512 * SMP mode to <NUM>.
513 */
514
515void __weak arch_disable_smp_support(void) { }
516
517static int __init nosmp(char *str)
518{
519	setup_max_cpus = 0;
520	arch_disable_smp_support();
521
522	return 0;
523}
524
525early_param("nosmp", nosmp);
526
527/* this is hard limit */
528static int __init nrcpus(char *str)
529{
530	int nr_cpus;
531
532	get_option(&str, &nr_cpus);
533	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
534		nr_cpu_ids = nr_cpus;
535
536	return 0;
537}
538
539early_param("nr_cpus", nrcpus);
540
541static int __init maxcpus(char *str)
542{
543	get_option(&str, &setup_max_cpus);
544	if (setup_max_cpus == 0)
545		arch_disable_smp_support();
546
547	return 0;
548}
549
550early_param("maxcpus", maxcpus);
551
552/* Setup number of possible processor ids */
553unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
554EXPORT_SYMBOL(nr_cpu_ids);
555
556/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
557void __init setup_nr_cpu_ids(void)
558{
559	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
560}
561
562/* Called by boot processor to activate the rest. */
563void __init smp_init(void)
564{
565	int num_nodes, num_cpus;
566	unsigned int cpu;
567
568	idle_threads_init();
569	cpuhp_threads_init();
570
571	pr_info("Bringing up secondary CPUs ...\n");
572
573	/* FIXME: This should be done in userspace --RR */
574	for_each_present_cpu(cpu) {
575		if (num_online_cpus() >= setup_max_cpus)
576			break;
577		if (!cpu_online(cpu))
578			cpu_up(cpu);
579	}
580
581	num_nodes = num_online_nodes();
582	num_cpus  = num_online_cpus();
583	pr_info("Brought up %d node%s, %d CPU%s\n",
584		num_nodes, (num_nodes > 1 ? "s" : ""),
585		num_cpus,  (num_cpus  > 1 ? "s" : ""));
586
587	/* Any cleanup work */
588	smp_cpus_done(setup_max_cpus);
589}
590
591/*
592 * Call a function on all processors.  May be used during early boot while
593 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
594 * of local_irq_disable/enable().
595 */
596int on_each_cpu(void (*func) (void *info), void *info, int wait)
597{
598	unsigned long flags;
599	int ret = 0;
600
601	preempt_disable();
602	ret = smp_call_function(func, info, wait);
603	local_irq_save(flags);
604	func(info);
605	local_irq_restore(flags);
606	preempt_enable();
607	return ret;
608}
609EXPORT_SYMBOL(on_each_cpu);
610
611/**
612 * on_each_cpu_mask(): Run a function on processors specified by
613 * cpumask, which may include the local processor.
614 * @mask: The set of cpus to run on (only runs on online subset).
615 * @func: The function to run. This must be fast and non-blocking.
616 * @info: An arbitrary pointer to pass to the function.
617 * @wait: If true, wait (atomically) until function has completed
618 *        on other CPUs.
619 *
620 * If @wait is true, then returns once @func has returned.
621 *
622 * You must not call this function with disabled interrupts or from a
623 * hardware interrupt handler or from a bottom half handler.  The
624 * exception is that it may be used during early boot while
625 * early_boot_irqs_disabled is set.
626 */
627void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
628			void *info, bool wait)
629{
630	int cpu = get_cpu();
631
632	smp_call_function_many(mask, func, info, wait);
633	if (cpumask_test_cpu(cpu, mask)) {
634		unsigned long flags;
635		local_irq_save(flags);
636		func(info);
637		local_irq_restore(flags);
638	}
639	put_cpu();
640}
641EXPORT_SYMBOL(on_each_cpu_mask);
642
643/*
644 * on_each_cpu_cond(): Call a function on each processor for which
645 * the supplied function cond_func returns true, optionally waiting
646 * for all the required CPUs to finish. This may include the local
647 * processor.
648 * @cond_func:	A callback function that is passed a cpu id and
649 *		the the info parameter. The function is called
650 *		with preemption disabled. The function should
651 *		return a blooean value indicating whether to IPI
652 *		the specified CPU.
653 * @func:	The function to run on all applicable CPUs.
654 *		This must be fast and non-blocking.
655 * @info:	An arbitrary pointer to pass to both functions.
656 * @wait:	If true, wait (atomically) until function has
657 *		completed on other CPUs.
658 * @gfp_flags:	GFP flags to use when allocating the cpumask
659 *		used internally by the function.
660 *
661 * The function might sleep if the GFP flags indicates a non
662 * atomic allocation is allowed.
663 *
664 * Preemption is disabled to protect against CPUs going offline but not online.
665 * CPUs going online during the call will not be seen or sent an IPI.
666 *
667 * You must not call this function with disabled interrupts or
668 * from a hardware interrupt handler or from a bottom half handler.
669 */
670void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
671			smp_call_func_t func, void *info, bool wait,
672			gfp_t gfp_flags)
673{
674	cpumask_var_t cpus;
675	int cpu, ret;
676
677	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
678
679	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
680		preempt_disable();
681		for_each_online_cpu(cpu)
682			if (cond_func(cpu, info))
683				cpumask_set_cpu(cpu, cpus);
684		on_each_cpu_mask(cpus, func, info, wait);
685		preempt_enable();
686		free_cpumask_var(cpus);
687	} else {
688		/*
689		 * No free cpumask, bother. No matter, we'll
690		 * just have to IPI them one by one.
691		 */
692		preempt_disable();
693		for_each_online_cpu(cpu)
694			if (cond_func(cpu, info)) {
695				ret = smp_call_function_single(cpu, func,
696								info, wait);
697				WARN_ON_ONCE(ret);
698			}
699		preempt_enable();
700	}
701}
702EXPORT_SYMBOL(on_each_cpu_cond);
703
704static void do_nothing(void *unused)
705{
706}
707
708/**
709 * kick_all_cpus_sync - Force all cpus out of idle
710 *
711 * Used to synchronize the update of pm_idle function pointer. It's
712 * called after the pointer is updated and returns after the dummy
713 * callback function has been executed on all cpus. The execution of
714 * the function can only happen on the remote cpus after they have
715 * left the idle function which had been called via pm_idle function
716 * pointer. So it's guaranteed that nothing uses the previous pointer
717 * anymore.
718 */
719void kick_all_cpus_sync(void)
720{
721	/* Make sure the change is visible before we kick the cpus */
722	smp_mb();
723	smp_call_function(do_nothing, NULL, 1);
724}
725EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
726
727/**
728 * wake_up_all_idle_cpus - break all cpus out of idle
729 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
730 * including idle polling cpus, for non-idle cpus, we will do nothing
731 * for them.
732 */
733void wake_up_all_idle_cpus(void)
734{
735	int cpu;
736
737	preempt_disable();
738	for_each_online_cpu(cpu) {
739		if (cpu == smp_processor_id())
740			continue;
741
742		wake_up_if_idle(cpu);
743	}
744	preempt_enable();
745}
746EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
747
748/**
749 * smp_call_on_cpu - Call a function on a specific cpu
750 *
751 * Used to call a function on a specific cpu and wait for it to return.
752 * Optionally make sure the call is done on a specified physical cpu via vcpu
753 * pinning in order to support virtualized environments.
754 */
755struct smp_call_on_cpu_struct {
756	struct work_struct	work;
757	struct completion	done;
758	int			(*func)(void *);
759	void			*data;
760	int			ret;
761	int			cpu;
762};
763
764static void smp_call_on_cpu_callback(struct work_struct *work)
765{
766	struct smp_call_on_cpu_struct *sscs;
767
768	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
769	if (sscs->cpu >= 0)
770		hypervisor_pin_vcpu(sscs->cpu);
771	sscs->ret = sscs->func(sscs->data);
772	if (sscs->cpu >= 0)
773		hypervisor_pin_vcpu(-1);
774
775	complete(&sscs->done);
776}
777
778int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
779{
780	struct smp_call_on_cpu_struct sscs = {
781		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
782		.func = func,
783		.data = par,
784		.cpu  = phys ? cpu : -1,
785	};
786
787	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
788
789	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
790		return -ENXIO;
791
792	queue_work_on(cpu, system_wq, &sscs.work);
793	wait_for_completion(&sscs.done);
794
795	return sscs.ret;
796}
797EXPORT_SYMBOL_GPL(smp_call_on_cpu);