Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Generic helpers for smp ipi calls
  3 *
  4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5 */
 
 
 
 
  6#include <linux/rcupdate.h>
  7#include <linux/rculist.h>
  8#include <linux/kernel.h>
  9#include <linux/export.h>
 10#include <linux/percpu.h>
 11#include <linux/init.h>
 12#include <linux/gfp.h>
 13#include <linux/smp.h>
 14#include <linux/cpu.h>
 
 
 
 15
 16#include "smpboot.h"
 
 17
 18enum {
 19	CSD_FLAG_LOCK		= 0x01,
 20	CSD_FLAG_WAIT		= 0x02,
 21};
 22
 23struct call_function_data {
 24	struct call_single_data	__percpu *csd;
 25	cpumask_var_t		cpumask;
 
 26};
 27
 28static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 29
 30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 31
 32static int
 33hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
 34{
 35	long cpu = (long)hcpu;
 36	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 37
 38	switch (action) {
 39	case CPU_UP_PREPARE:
 40	case CPU_UP_PREPARE_FROZEN:
 41		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 42				cpu_to_node(cpu)))
 43			return notifier_from_errno(-ENOMEM);
 44		cfd->csd = alloc_percpu(struct call_single_data);
 45		if (!cfd->csd) {
 46			free_cpumask_var(cfd->cpumask);
 47			return notifier_from_errno(-ENOMEM);
 48		}
 49		break;
 
 
 50
 51#ifdef CONFIG_HOTPLUG_CPU
 52	case CPU_UP_CANCELED:
 53	case CPU_UP_CANCELED_FROZEN:
 54
 55	case CPU_DEAD:
 56	case CPU_DEAD_FROZEN:
 57		free_cpumask_var(cfd->cpumask);
 58		free_percpu(cfd->csd);
 59		break;
 60#endif
 61	};
 62
 63	return NOTIFY_OK;
 
 
 
 64}
 65
 66static struct notifier_block hotplug_cfd_notifier = {
 67	.notifier_call		= hotplug_cfd,
 68};
 
 
 
 
 
 
 
 
 
 
 
 
 69
 70void __init call_function_init(void)
 71{
 72	void *cpu = (void *)(long)smp_processor_id();
 73	int i;
 74
 75	for_each_possible_cpu(i)
 76		init_llist_head(&per_cpu(call_single_queue, i));
 77
 78	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
 79	register_cpu_notifier(&hotplug_cfd_notifier);
 80}
 81
 82/*
 83 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 84 *
 85 * For non-synchronous ipi calls the csd can still be in use by the
 86 * previous function call. For multi-cpu calls its even more interesting
 87 * as we'll have to ensure no other cpu is observing our csd.
 88 */
 89static void csd_lock_wait(struct call_single_data *csd)
 90{
 91	while (csd->flags & CSD_FLAG_LOCK)
 92		cpu_relax();
 93}
 94
 95static void csd_lock(struct call_single_data *csd)
 96{
 97	csd_lock_wait(csd);
 98	csd->flags |= CSD_FLAG_LOCK;
 99
100	/*
101	 * prevent CPU from reordering the above assignment
102	 * to ->flags with any subsequent assignments to other
103	 * fields of the specified call_single_data structure:
104	 */
105	smp_mb();
106}
107
108static void csd_unlock(struct call_single_data *csd)
109{
110	WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
111
112	/*
113	 * ensure we're all done before releasing data:
114	 */
115	smp_mb();
116
117	csd->flags &= ~CSD_FLAG_LOCK;
118}
119
120static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122/*
123 * Insert a previously allocated call_single_data element
124 * for execution on the given CPU. data must already have
125 * ->func, ->info, and ->flags set.
126 */
127static int generic_exec_single(int cpu, struct call_single_data *csd,
128			       smp_call_func_t func, void *info, int wait)
129{
130	struct call_single_data csd_stack = { .flags = 0 };
131	unsigned long flags;
132
133
134	if (cpu == smp_processor_id()) {
 
 
 
 
 
 
 
 
 
135		local_irq_save(flags);
136		func(info);
137		local_irq_restore(flags);
138		return 0;
139	}
140
141
142	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
143		return -ENXIO;
 
 
 
144
 
 
145
146	if (!csd) {
147		csd = &csd_stack;
148		if (!wait)
149			csd = &__get_cpu_var(csd_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150	}
151
152	csd_lock(csd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
154	csd->func = func;
155	csd->info = info;
 
 
 
 
156
157	if (wait)
158		csd->flags |= CSD_FLAG_WAIT;
159
160	/*
161	 * The list addition should be visible before sending the IPI
162	 * handler locks the list to pull the entry off it because of
163	 * normal cache coherency rules implied by spinlocks.
164	 *
165	 * If IPIs can go out of order to the cache coherency protocol
166	 * in an architecture, sufficient synchronisation should be added
167	 * to arch code to make it appear to obey cache coherency WRT
168	 * locking and barrier primitives. Generic code isn't really
169	 * equipped to do the right thing...
170	 */
171	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
172		arch_send_call_function_single_ipi(cpu);
 
173
174	if (wait)
175		csd_lock_wait(csd);
 
 
 
 
176
177	return 0;
178}
 
 
 
 
 
 
 
179
180/*
181 * Invoked by arch to handle an IPI for call function single. Must be
182 * called from the arch with interrupts disabled.
183 */
184void generic_smp_call_function_single_interrupt(void)
185{
186	struct llist_node *entry;
187	struct call_single_data *csd, *csd_next;
188
189	/*
190	 * Shouldn't receive this interrupt on a cpu that is not yet online.
191	 */
192	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
 
 
193
194	entry = llist_del_all(&__get_cpu_var(call_single_queue));
195	entry = llist_reverse_order(entry);
 
196
197	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
198		csd->func(csd->info);
199		csd_unlock(csd);
200	}
 
 
201}
202
203/*
204 * smp_call_function_single - Run a function on a specific CPU
205 * @func: The function to run. This must be fast and non-blocking.
206 * @info: An arbitrary pointer to pass to the function.
207 * @wait: If true, wait until function has completed on other CPUs.
208 *
209 * Returns 0 on success, else a negative status code.
210 */
211int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
212			     int wait)
213{
 
 
 
 
214	int this_cpu;
215	int err;
216
217	/*
218	 * prevent preemption and reschedule on another processor,
219	 * as well as CPU removal
220	 */
221	this_cpu = get_cpu();
222
223	/*
224	 * Can deadlock when called with interrupts disabled.
225	 * We allow cpu's that are not yet online though, as no one else can
226	 * send smp call function interrupt to this cpu and as such deadlocks
227	 * can't happen.
228	 */
229	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
230		     && !oops_in_progress);
231
232	err = generic_exec_single(cpu, NULL, func, info, wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
234	put_cpu();
235
236	return err;
237}
238EXPORT_SYMBOL(smp_call_function_single);
239
240/**
241 * smp_call_function_single_async(): Run an asynchronous function on a
242 * 			         specific CPU.
243 * @cpu: The CPU to run on.
244 * @csd: Pre-allocated and setup data structure
245 *
246 * Like smp_call_function_single(), but the call is asynchonous and
247 * can thus be done from contexts with disabled interrupts.
248 *
249 * The caller passes his own pre-allocated data structure
250 * (ie: embedded in an object) and is responsible for synchronizing it
251 * such that the IPIs performed on the @csd are strictly serialized.
252 *
 
 
 
 
 
253 * NOTE: Be careful, there is unfortunately no current debugging facility to
254 * validate the correctness of this serialization.
255 */
256int smp_call_function_single_async(int cpu, struct call_single_data *csd)
257{
258	int err = 0;
259
260	preempt_disable();
261	err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
 
 
 
 
 
 
 
 
 
 
 
262	preempt_enable();
263
264	return err;
265}
266EXPORT_SYMBOL_GPL(smp_call_function_single_async);
267
268/*
269 * smp_call_function_any - Run a function on any of the given cpus
270 * @mask: The mask of cpus it can run on.
271 * @func: The function to run. This must be fast and non-blocking.
272 * @info: An arbitrary pointer to pass to the function.
273 * @wait: If true, wait until function has completed.
274 *
275 * Returns 0 on success, else a negative status code (if no cpus were online).
276 *
277 * Selection preference:
278 *	1) current cpu if in @mask
279 *	2) any cpu of current node if in @mask
280 *	3) any other online cpu in @mask
281 */
282int smp_call_function_any(const struct cpumask *mask,
283			  smp_call_func_t func, void *info, int wait)
284{
285	unsigned int cpu;
286	const struct cpumask *nodemask;
287	int ret;
288
289	/* Try for same CPU (cheapest) */
290	cpu = get_cpu();
291	if (cpumask_test_cpu(cpu, mask))
292		goto call;
293
294	/* Try for same node. */
295	nodemask = cpumask_of_node(cpu_to_node(cpu));
296	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
297	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
298		if (cpu_online(cpu))
299			goto call;
300	}
301
302	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
303	cpu = cpumask_any_and(mask, cpu_online_mask);
304call:
305	ret = smp_call_function_single(cpu, func, info, wait);
306	put_cpu();
307	return ret;
308}
309EXPORT_SYMBOL_GPL(smp_call_function_any);
310
311/**
312 * smp_call_function_many(): Run a function on a set of other CPUs.
313 * @mask: The set of cpus to run on (only runs on online subset).
314 * @func: The function to run. This must be fast and non-blocking.
315 * @info: An arbitrary pointer to pass to the function.
316 * @wait: If true, wait (atomically) until function has completed
317 *        on other CPUs.
318 *
319 * If @wait is true, then returns once @func has returned.
320 *
321 * You must not call this function with disabled interrupts or from a
322 * hardware interrupt handler or from a bottom half handler. Preemption
323 * must be disabled when calling this function.
324 */
325void smp_call_function_many(const struct cpumask *mask,
326			    smp_call_func_t func, void *info, bool wait)
327{
328	struct call_function_data *cfd;
329	int cpu, next_cpu, this_cpu = smp_processor_id();
330
331	/*
332	 * Can deadlock when called with interrupts disabled.
333	 * We allow cpu's that are not yet online though, as no one else can
334	 * send smp call function interrupt to this cpu and as such deadlocks
335	 * can't happen.
336	 */
337	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
338		     && !oops_in_progress && !early_boot_irqs_disabled);
339
 
 
 
 
 
 
 
 
340	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
341	cpu = cpumask_first_and(mask, cpu_online_mask);
342	if (cpu == this_cpu)
343		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
344
345	/* No online cpus?  We're done. */
346	if (cpu >= nr_cpu_ids)
347		return;
348
349	/* Do we have another CPU which isn't us? */
350	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
351	if (next_cpu == this_cpu)
352		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
353
354	/* Fastpath: do that cpu by itself. */
355	if (next_cpu >= nr_cpu_ids) {
356		smp_call_function_single(cpu, func, info, wait);
 
357		return;
358	}
359
360	cfd = &__get_cpu_var(cfd_data);
361
362	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
363	cpumask_clear_cpu(this_cpu, cfd->cpumask);
364
365	/* Some callers race with other cpus changing the passed mask */
366	if (unlikely(!cpumask_weight(cfd->cpumask)))
367		return;
368
 
369	for_each_cpu(cpu, cfd->cpumask) {
370		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
 
 
 
371
372		csd_lock(csd);
 
 
373		csd->func = func;
374		csd->info = info;
375		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
 
376	}
377
378	/* Send a message to all CPUs in the map */
379	arch_send_call_function_ipi_mask(cfd->cpumask);
380
381	if (wait) {
382		for_each_cpu(cpu, cfd->cpumask) {
383			struct call_single_data *csd;
384
385			csd = per_cpu_ptr(cfd->csd, cpu);
386			csd_lock_wait(csd);
387		}
388	}
389}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390EXPORT_SYMBOL(smp_call_function_many);
391
392/**
393 * smp_call_function(): Run a function on all other CPUs.
394 * @func: The function to run. This must be fast and non-blocking.
395 * @info: An arbitrary pointer to pass to the function.
396 * @wait: If true, wait (atomically) until function has completed
397 *        on other CPUs.
398 *
399 * Returns 0.
400 *
401 * If @wait is true, then returns once @func has returned; otherwise
402 * it returns just before the target cpu calls @func.
403 *
404 * You must not call this function with disabled interrupts or from a
405 * hardware interrupt handler or from a bottom half handler.
406 */
407int smp_call_function(smp_call_func_t func, void *info, int wait)
408{
409	preempt_disable();
410	smp_call_function_many(cpu_online_mask, func, info, wait);
411	preempt_enable();
412
413	return 0;
414}
415EXPORT_SYMBOL(smp_call_function);
416
417/* Setup configured maximum number of CPUs to activate */
418unsigned int setup_max_cpus = NR_CPUS;
419EXPORT_SYMBOL(setup_max_cpus);
420
421
422/*
423 * Setup routine for controlling SMP activation
424 *
425 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
426 * activation entirely (the MPS table probe still happens, though).
427 *
428 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
429 * greater than 0, limits the maximum number of CPUs activated in
430 * SMP mode to <NUM>.
431 */
432
433void __weak arch_disable_smp_support(void) { }
434
435static int __init nosmp(char *str)
436{
437	setup_max_cpus = 0;
438	arch_disable_smp_support();
439
440	return 0;
441}
442
443early_param("nosmp", nosmp);
444
445/* this is hard limit */
446static int __init nrcpus(char *str)
447{
448	int nr_cpus;
449
450	get_option(&str, &nr_cpus);
451	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
452		nr_cpu_ids = nr_cpus;
453
454	return 0;
455}
456
457early_param("nr_cpus", nrcpus);
458
459static int __init maxcpus(char *str)
460{
461	get_option(&str, &setup_max_cpus);
462	if (setup_max_cpus == 0)
463		arch_disable_smp_support();
464
465	return 0;
466}
467
468early_param("maxcpus", maxcpus);
469
470/* Setup number of possible processor ids */
471int nr_cpu_ids __read_mostly = NR_CPUS;
472EXPORT_SYMBOL(nr_cpu_ids);
473
474/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
475void __init setup_nr_cpu_ids(void)
476{
477	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
478}
479
480void __weak smp_announce(void)
481{
482	printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
483}
484
485/* Called by boot processor to activate the rest. */
486void __init smp_init(void)
487{
488	unsigned int cpu;
489
490	idle_threads_init();
 
491
492	/* FIXME: This should be done in userspace --RR */
493	for_each_present_cpu(cpu) {
494		if (num_online_cpus() >= setup_max_cpus)
495			break;
496		if (!cpu_online(cpu))
497			cpu_up(cpu);
498	}
 
 
499
500	/* Any cleanup work */
501	smp_announce();
502	smp_cpus_done(setup_max_cpus);
503}
504
505/*
506 * Call a function on all processors.  May be used during early boot while
507 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
508 * of local_irq_disable/enable().
509 */
510int on_each_cpu(void (*func) (void *info), void *info, int wait)
511{
512	unsigned long flags;
513	int ret = 0;
514
515	preempt_disable();
516	ret = smp_call_function(func, info, wait);
517	local_irq_save(flags);
518	func(info);
519	local_irq_restore(flags);
520	preempt_enable();
521	return ret;
522}
523EXPORT_SYMBOL(on_each_cpu);
524
525/**
526 * on_each_cpu_mask(): Run a function on processors specified by
527 * cpumask, which may include the local processor.
528 * @mask: The set of cpus to run on (only runs on online subset).
529 * @func: The function to run. This must be fast and non-blocking.
530 * @info: An arbitrary pointer to pass to the function.
531 * @wait: If true, wait (atomically) until function has completed
532 *        on other CPUs.
533 *
534 * If @wait is true, then returns once @func has returned.
535 *
536 * You must not call this function with disabled interrupts or from a
537 * hardware interrupt handler or from a bottom half handler.  The
538 * exception is that it may be used during early boot while
539 * early_boot_irqs_disabled is set.
540 */
541void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
542			void *info, bool wait)
543{
544	int cpu = get_cpu();
545
546	smp_call_function_many(mask, func, info, wait);
547	if (cpumask_test_cpu(cpu, mask)) {
548		unsigned long flags;
549		local_irq_save(flags);
550		func(info);
551		local_irq_restore(flags);
552	}
553	put_cpu();
554}
555EXPORT_SYMBOL(on_each_cpu_mask);
556
557/*
558 * on_each_cpu_cond(): Call a function on each processor for which
559 * the supplied function cond_func returns true, optionally waiting
560 * for all the required CPUs to finish. This may include the local
561 * processor.
562 * @cond_func:	A callback function that is passed a cpu id and
563 *		the the info parameter. The function is called
564 *		with preemption disabled. The function should
565 *		return a blooean value indicating whether to IPI
566 *		the specified CPU.
567 * @func:	The function to run on all applicable CPUs.
568 *		This must be fast and non-blocking.
569 * @info:	An arbitrary pointer to pass to both functions.
570 * @wait:	If true, wait (atomically) until function has
571 *		completed on other CPUs.
572 * @gfp_flags:	GFP flags to use when allocating the cpumask
573 *		used internally by the function.
574 *
575 * The function might sleep if the GFP flags indicates a non
576 * atomic allocation is allowed.
577 *
578 * Preemption is disabled to protect against CPUs going offline but not online.
579 * CPUs going online during the call will not be seen or sent an IPI.
580 *
581 * You must not call this function with disabled interrupts or
582 * from a hardware interrupt handler or from a bottom half handler.
583 */
584void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
585			smp_call_func_t func, void *info, bool wait,
586			gfp_t gfp_flags)
587{
588	cpumask_var_t cpus;
589	int cpu, ret;
590
591	might_sleep_if(gfp_flags & __GFP_WAIT);
592
593	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
594		preempt_disable();
595		for_each_online_cpu(cpu)
596			if (cond_func(cpu, info))
597				cpumask_set_cpu(cpu, cpus);
598		on_each_cpu_mask(cpus, func, info, wait);
599		preempt_enable();
600		free_cpumask_var(cpus);
601	} else {
602		/*
603		 * No free cpumask, bother. No matter, we'll
604		 * just have to IPI them one by one.
605		 */
606		preempt_disable();
607		for_each_online_cpu(cpu)
608			if (cond_func(cpu, info)) {
609				ret = smp_call_function_single(cpu, func,
610								info, wait);
611				WARN_ON_ONCE(!ret);
612			}
613		preempt_enable();
614	}
 
 
 
 
 
 
 
 
615}
616EXPORT_SYMBOL(on_each_cpu_cond);
617
618static void do_nothing(void *unused)
619{
620}
621
622/**
623 * kick_all_cpus_sync - Force all cpus out of idle
624 *
625 * Used to synchronize the update of pm_idle function pointer. It's
626 * called after the pointer is updated and returns after the dummy
627 * callback function has been executed on all cpus. The execution of
628 * the function can only happen on the remote cpus after they have
629 * left the idle function which had been called via pm_idle function
630 * pointer. So it's guaranteed that nothing uses the previous pointer
631 * anymore.
632 */
633void kick_all_cpus_sync(void)
634{
635	/* Make sure the change is visible before we kick the cpus */
636	smp_mb();
637	smp_call_function(do_nothing, NULL, 1);
638}
639EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic helpers for smp ipi calls
  4 *
  5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/irq_work.h>
 11#include <linux/rcupdate.h>
 12#include <linux/rculist.h>
 13#include <linux/kernel.h>
 14#include <linux/export.h>
 15#include <linux/percpu.h>
 16#include <linux/init.h>
 17#include <linux/gfp.h>
 18#include <linux/smp.h>
 19#include <linux/cpu.h>
 20#include <linux/sched.h>
 21#include <linux/sched/idle.h>
 22#include <linux/hypervisor.h>
 23
 24#include "smpboot.h"
 25#include "sched/smp.h"
 26
 27#define CSD_TYPE(_csd)	((_csd)->flags & CSD_FLAG_TYPE_MASK)
 
 
 
 28
 29struct call_function_data {
 30	call_single_data_t	__percpu *csd;
 31	cpumask_var_t		cpumask;
 32	cpumask_var_t		cpumask_ipi;
 33};
 34
 35static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
 36
 37static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 38
 39static void flush_smp_call_function_queue(bool warn_cpu_offline);
 40
 41int smpcfd_prepare_cpu(unsigned int cpu)
 42{
 
 43	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 44
 45	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 46				     cpu_to_node(cpu)))
 47		return -ENOMEM;
 48	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
 49				     cpu_to_node(cpu))) {
 50		free_cpumask_var(cfd->cpumask);
 51		return -ENOMEM;
 52	}
 53	cfd->csd = alloc_percpu(call_single_data_t);
 54	if (!cfd->csd) {
 55		free_cpumask_var(cfd->cpumask);
 56		free_cpumask_var(cfd->cpumask_ipi);
 57		return -ENOMEM;
 58	}
 59
 60	return 0;
 61}
 
 62
 63int smpcfd_dead_cpu(unsigned int cpu)
 64{
 65	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 
 
 
 
 66
 67	free_cpumask_var(cfd->cpumask);
 68	free_cpumask_var(cfd->cpumask_ipi);
 69	free_percpu(cfd->csd);
 70	return 0;
 71}
 72
 73int smpcfd_dying_cpu(unsigned int cpu)
 74{
 75	/*
 76	 * The IPIs for the smp-call-function callbacks queued by other
 77	 * CPUs might arrive late, either due to hardware latencies or
 78	 * because this CPU disabled interrupts (inside stop-machine)
 79	 * before the IPIs were sent. So flush out any pending callbacks
 80	 * explicitly (without waiting for the IPIs to arrive), to
 81	 * ensure that the outgoing CPU doesn't go offline with work
 82	 * still pending.
 83	 */
 84	flush_smp_call_function_queue(false);
 85	irq_work_run();
 86	return 0;
 87}
 88
 89void __init call_function_init(void)
 90{
 
 91	int i;
 92
 93	for_each_possible_cpu(i)
 94		init_llist_head(&per_cpu(call_single_queue, i));
 95
 96	smpcfd_prepare_cpu(smp_processor_id());
 
 97}
 98
 99/*
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
101 *
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
105 */
106static __always_inline void csd_lock_wait(call_single_data_t *csd)
107{
108	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
 
109}
110
111static __always_inline void csd_lock(call_single_data_t *csd)
112{
113	csd_lock_wait(csd);
114	csd->flags |= CSD_FLAG_LOCK;
115
116	/*
117	 * prevent CPU from reordering the above assignment
118	 * to ->flags with any subsequent assignments to other
119	 * fields of the specified call_single_data_t structure:
120	 */
121	smp_wmb();
122}
123
124static __always_inline void csd_unlock(call_single_data_t *csd)
125{
126	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
127
128	/*
129	 * ensure we're all done before releasing data:
130	 */
131	smp_store_release(&csd->flags, 0);
 
 
132}
133
134static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135
136void __smp_call_single_queue(int cpu, struct llist_node *node)
137{
138	/*
139	 * The list addition should be visible before sending the IPI
140	 * handler locks the list to pull the entry off it because of
141	 * normal cache coherency rules implied by spinlocks.
142	 *
143	 * If IPIs can go out of order to the cache coherency protocol
144	 * in an architecture, sufficient synchronisation should be added
145	 * to arch code to make it appear to obey cache coherency WRT
146	 * locking and barrier primitives. Generic code isn't really
147	 * equipped to do the right thing...
148	 */
149	if (llist_add(node, &per_cpu(call_single_queue, cpu)))
150		send_call_function_single_ipi(cpu);
151}
152
153/*
154 * Insert a previously allocated call_single_data_t element
155 * for execution on the given CPU. data must already have
156 * ->func, ->info, and ->flags set.
157 */
158static int generic_exec_single(int cpu, call_single_data_t *csd)
 
159{
 
 
 
 
160	if (cpu == smp_processor_id()) {
161		smp_call_func_t func = csd->func;
162		void *info = csd->info;
163		unsigned long flags;
164
165		/*
166		 * We can unlock early even for the synchronous on-stack case,
167		 * since we're doing this from the same CPU..
168		 */
169		csd_unlock(csd);
170		local_irq_save(flags);
171		func(info);
172		local_irq_restore(flags);
173		return 0;
174	}
175
176	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
177		csd_unlock(csd);
178		return -ENXIO;
179	}
180
181	__smp_call_single_queue(cpu, &csd->llist);
182
183	return 0;
184}
185
186/**
187 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
188 *
189 * Invoked by arch to handle an IPI for call function single.
190 * Must be called with interrupts disabled.
191 */
192void generic_smp_call_function_single_interrupt(void)
193{
194	flush_smp_call_function_queue(true);
195}
196
197/**
198 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
199 *
200 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
201 *		      offline CPU. Skip this check if set to 'false'.
202 *
203 * Flush any pending smp-call-function callbacks queued on this CPU. This is
204 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
205 * to ensure that all pending IPI callbacks are run before it goes completely
206 * offline.
207 *
208 * Loop through the call_single_queue and run all the queued callbacks.
209 * Must be called with interrupts disabled.
210 */
211static void flush_smp_call_function_queue(bool warn_cpu_offline)
212{
213	call_single_data_t *csd, *csd_next;
214	struct llist_node *entry, *prev;
215	struct llist_head *head;
216	static bool warned;
217
218	lockdep_assert_irqs_disabled();
219
220	head = this_cpu_ptr(&call_single_queue);
221	entry = llist_del_all(head);
222	entry = llist_reverse_order(entry);
223
224	/* There shouldn't be any pending callbacks on an offline CPU. */
225	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
226		     !warned && !llist_empty(head))) {
227		warned = true;
228		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
229
230		/*
231		 * We don't have to use the _safe() variant here
232		 * because we are not invoking the IPI handlers yet.
233		 */
234		llist_for_each_entry(csd, entry, llist) {
235			switch (CSD_TYPE(csd)) {
236			case CSD_TYPE_ASYNC:
237			case CSD_TYPE_SYNC:
238			case CSD_TYPE_IRQ_WORK:
239				pr_warn("IPI callback %pS sent to offline CPU\n",
240					csd->func);
241				break;
242
243			case CSD_TYPE_TTWU:
244				pr_warn("IPI task-wakeup sent to offline CPU\n");
245				break;
246
247			default:
248				pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
249					CSD_TYPE(csd));
250				break;
251			}
252		}
253	}
254
255	/*
256	 * First; run all SYNC callbacks, people are waiting for us.
257	 */
258	prev = NULL;
259	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
260		/* Do we wait until *after* callback? */
261		if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
262			smp_call_func_t func = csd->func;
263			void *info = csd->info;
264
265			if (prev) {
266				prev->next = &csd_next->llist;
267			} else {
268				entry = &csd_next->llist;
269			}
270
271			func(info);
272			csd_unlock(csd);
273		} else {
274			prev = &csd->llist;
275		}
276	}
277
278	if (!entry)
279		return;
280
281	/*
282	 * Second; run all !SYNC callbacks.
 
 
 
 
 
 
 
 
283	 */
284	prev = NULL;
285	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
286		int type = CSD_TYPE(csd);
287
288		if (type != CSD_TYPE_TTWU) {
289			if (prev) {
290				prev->next = &csd_next->llist;
291			} else {
292				entry = &csd_next->llist;
293			}
294
295			if (type == CSD_TYPE_ASYNC) {
296				smp_call_func_t func = csd->func;
297				void *info = csd->info;
298
299				csd_unlock(csd);
300				func(info);
301			} else if (type == CSD_TYPE_IRQ_WORK) {
302				irq_work_single(csd);
303			}
304
305		} else {
306			prev = &csd->llist;
307		}
308	}
 
 
 
 
309
310	/*
311	 * Third; only CSD_TYPE_TTWU is left, issue those.
312	 */
313	if (entry)
314		sched_ttwu_pending(entry);
315}
316
317void flush_smp_call_function_from_idle(void)
318{
319	unsigned long flags;
320
321	if (llist_empty(this_cpu_ptr(&call_single_queue)))
322		return;
323
324	local_irq_save(flags);
325	flush_smp_call_function_queue(true);
326	local_irq_restore(flags);
327}
328
329/*
330 * smp_call_function_single - Run a function on a specific CPU
331 * @func: The function to run. This must be fast and non-blocking.
332 * @info: An arbitrary pointer to pass to the function.
333 * @wait: If true, wait until function has completed on other CPUs.
334 *
335 * Returns 0 on success, else a negative status code.
336 */
337int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
338			     int wait)
339{
340	call_single_data_t *csd;
341	call_single_data_t csd_stack = {
342		.flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
343	};
344	int this_cpu;
345	int err;
346
347	/*
348	 * prevent preemption and reschedule on another processor,
349	 * as well as CPU removal
350	 */
351	this_cpu = get_cpu();
352
353	/*
354	 * Can deadlock when called with interrupts disabled.
355	 * We allow cpu's that are not yet online though, as no one else can
356	 * send smp call function interrupt to this cpu and as such deadlocks
357	 * can't happen.
358	 */
359	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
360		     && !oops_in_progress);
361
362	/*
363	 * When @wait we can deadlock when we interrupt between llist_add() and
364	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
365	 * csd_lock() on because the interrupt context uses the same csd
366	 * storage.
367	 */
368	WARN_ON_ONCE(!in_task());
369
370	csd = &csd_stack;
371	if (!wait) {
372		csd = this_cpu_ptr(&csd_data);
373		csd_lock(csd);
374	}
375
376	csd->func = func;
377	csd->info = info;
378
379	err = generic_exec_single(cpu, csd);
380
381	if (wait)
382		csd_lock_wait(csd);
383
384	put_cpu();
385
386	return err;
387}
388EXPORT_SYMBOL(smp_call_function_single);
389
390/**
391 * smp_call_function_single_async(): Run an asynchronous function on a
392 * 			         specific CPU.
393 * @cpu: The CPU to run on.
394 * @csd: Pre-allocated and setup data structure
395 *
396 * Like smp_call_function_single(), but the call is asynchonous and
397 * can thus be done from contexts with disabled interrupts.
398 *
399 * The caller passes his own pre-allocated data structure
400 * (ie: embedded in an object) and is responsible for synchronizing it
401 * such that the IPIs performed on the @csd are strictly serialized.
402 *
403 * If the function is called with one csd which has not yet been
404 * processed by previous call to smp_call_function_single_async(), the
405 * function will return immediately with -EBUSY showing that the csd
406 * object is still in progress.
407 *
408 * NOTE: Be careful, there is unfortunately no current debugging facility to
409 * validate the correctness of this serialization.
410 */
411int smp_call_function_single_async(int cpu, call_single_data_t *csd)
412{
413	int err = 0;
414
415	preempt_disable();
416
417	if (csd->flags & CSD_FLAG_LOCK) {
418		err = -EBUSY;
419		goto out;
420	}
421
422	csd->flags = CSD_FLAG_LOCK;
423	smp_wmb();
424
425	err = generic_exec_single(cpu, csd);
426
427out:
428	preempt_enable();
429
430	return err;
431}
432EXPORT_SYMBOL_GPL(smp_call_function_single_async);
433
434/*
435 * smp_call_function_any - Run a function on any of the given cpus
436 * @mask: The mask of cpus it can run on.
437 * @func: The function to run. This must be fast and non-blocking.
438 * @info: An arbitrary pointer to pass to the function.
439 * @wait: If true, wait until function has completed.
440 *
441 * Returns 0 on success, else a negative status code (if no cpus were online).
442 *
443 * Selection preference:
444 *	1) current cpu if in @mask
445 *	2) any cpu of current node if in @mask
446 *	3) any other online cpu in @mask
447 */
448int smp_call_function_any(const struct cpumask *mask,
449			  smp_call_func_t func, void *info, int wait)
450{
451	unsigned int cpu;
452	const struct cpumask *nodemask;
453	int ret;
454
455	/* Try for same CPU (cheapest) */
456	cpu = get_cpu();
457	if (cpumask_test_cpu(cpu, mask))
458		goto call;
459
460	/* Try for same node. */
461	nodemask = cpumask_of_node(cpu_to_node(cpu));
462	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
463	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
464		if (cpu_online(cpu))
465			goto call;
466	}
467
468	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
469	cpu = cpumask_any_and(mask, cpu_online_mask);
470call:
471	ret = smp_call_function_single(cpu, func, info, wait);
472	put_cpu();
473	return ret;
474}
475EXPORT_SYMBOL_GPL(smp_call_function_any);
476
477static void smp_call_function_many_cond(const struct cpumask *mask,
478					smp_call_func_t func, void *info,
479					bool wait, smp_cond_func_t cond_func)
 
 
 
 
 
 
 
 
 
 
 
 
 
480{
481	struct call_function_data *cfd;
482	int cpu, next_cpu, this_cpu = smp_processor_id();
483
484	/*
485	 * Can deadlock when called with interrupts disabled.
486	 * We allow cpu's that are not yet online though, as no one else can
487	 * send smp call function interrupt to this cpu and as such deadlocks
488	 * can't happen.
489	 */
490	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
491		     && !oops_in_progress && !early_boot_irqs_disabled);
492
493	/*
494	 * When @wait we can deadlock when we interrupt between llist_add() and
495	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
496	 * csd_lock() on because the interrupt context uses the same csd
497	 * storage.
498	 */
499	WARN_ON_ONCE(!in_task());
500
501	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
502	cpu = cpumask_first_and(mask, cpu_online_mask);
503	if (cpu == this_cpu)
504		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
505
506	/* No online cpus?  We're done. */
507	if (cpu >= nr_cpu_ids)
508		return;
509
510	/* Do we have another CPU which isn't us? */
511	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
512	if (next_cpu == this_cpu)
513		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
514
515	/* Fastpath: do that cpu by itself. */
516	if (next_cpu >= nr_cpu_ids) {
517		if (!cond_func || cond_func(cpu, info))
518			smp_call_function_single(cpu, func, info, wait);
519		return;
520	}
521
522	cfd = this_cpu_ptr(&cfd_data);
523
524	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
525	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
526
527	/* Some callers race with other cpus changing the passed mask */
528	if (unlikely(!cpumask_weight(cfd->cpumask)))
529		return;
530
531	cpumask_clear(cfd->cpumask_ipi);
532	for_each_cpu(cpu, cfd->cpumask) {
533		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
534
535		if (cond_func && !cond_func(cpu, info))
536			continue;
537
538		csd_lock(csd);
539		if (wait)
540			csd->flags |= CSD_TYPE_SYNC;
541		csd->func = func;
542		csd->info = info;
543		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
544			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
545	}
546
547	/* Send a message to all CPUs in the map */
548	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
549
550	if (wait) {
551		for_each_cpu(cpu, cfd->cpumask) {
552			call_single_data_t *csd;
553
554			csd = per_cpu_ptr(cfd->csd, cpu);
555			csd_lock_wait(csd);
556		}
557	}
558}
559
560/**
561 * smp_call_function_many(): Run a function on a set of other CPUs.
562 * @mask: The set of cpus to run on (only runs on online subset).
563 * @func: The function to run. This must be fast and non-blocking.
564 * @info: An arbitrary pointer to pass to the function.
565 * @wait: If true, wait (atomically) until function has completed
566 *        on other CPUs.
567 *
568 * If @wait is true, then returns once @func has returned.
569 *
570 * You must not call this function with disabled interrupts or from a
571 * hardware interrupt handler or from a bottom half handler. Preemption
572 * must be disabled when calling this function.
573 */
574void smp_call_function_many(const struct cpumask *mask,
575			    smp_call_func_t func, void *info, bool wait)
576{
577	smp_call_function_many_cond(mask, func, info, wait, NULL);
578}
579EXPORT_SYMBOL(smp_call_function_many);
580
581/**
582 * smp_call_function(): Run a function on all other CPUs.
583 * @func: The function to run. This must be fast and non-blocking.
584 * @info: An arbitrary pointer to pass to the function.
585 * @wait: If true, wait (atomically) until function has completed
586 *        on other CPUs.
587 *
588 * Returns 0.
589 *
590 * If @wait is true, then returns once @func has returned; otherwise
591 * it returns just before the target cpu calls @func.
592 *
593 * You must not call this function with disabled interrupts or from a
594 * hardware interrupt handler or from a bottom half handler.
595 */
596void smp_call_function(smp_call_func_t func, void *info, int wait)
597{
598	preempt_disable();
599	smp_call_function_many(cpu_online_mask, func, info, wait);
600	preempt_enable();
 
 
601}
602EXPORT_SYMBOL(smp_call_function);
603
604/* Setup configured maximum number of CPUs to activate */
605unsigned int setup_max_cpus = NR_CPUS;
606EXPORT_SYMBOL(setup_max_cpus);
607
608
609/*
610 * Setup routine for controlling SMP activation
611 *
612 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
613 * activation entirely (the MPS table probe still happens, though).
614 *
615 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
616 * greater than 0, limits the maximum number of CPUs activated in
617 * SMP mode to <NUM>.
618 */
619
620void __weak arch_disable_smp_support(void) { }
621
622static int __init nosmp(char *str)
623{
624	setup_max_cpus = 0;
625	arch_disable_smp_support();
626
627	return 0;
628}
629
630early_param("nosmp", nosmp);
631
632/* this is hard limit */
633static int __init nrcpus(char *str)
634{
635	int nr_cpus;
636
637	if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
 
638		nr_cpu_ids = nr_cpus;
639
640	return 0;
641}
642
643early_param("nr_cpus", nrcpus);
644
645static int __init maxcpus(char *str)
646{
647	get_option(&str, &setup_max_cpus);
648	if (setup_max_cpus == 0)
649		arch_disable_smp_support();
650
651	return 0;
652}
653
654early_param("maxcpus", maxcpus);
655
656/* Setup number of possible processor ids */
657unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
658EXPORT_SYMBOL(nr_cpu_ids);
659
660/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
661void __init setup_nr_cpu_ids(void)
662{
663	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
664}
665
 
 
 
 
 
666/* Called by boot processor to activate the rest. */
667void __init smp_init(void)
668{
669	int num_nodes, num_cpus;
670
671	idle_threads_init();
672	cpuhp_threads_init();
673
674	pr_info("Bringing up secondary CPUs ...\n");
675
676	bringup_nonboot_cpus(setup_max_cpus);
677
678	num_nodes = num_online_nodes();
679	num_cpus  = num_online_cpus();
680	pr_info("Brought up %d node%s, %d CPU%s\n",
681		num_nodes, (num_nodes > 1 ? "s" : ""),
682		num_cpus,  (num_cpus  > 1 ? "s" : ""));
683
684	/* Any cleanup work */
 
685	smp_cpus_done(setup_max_cpus);
686}
687
688/*
689 * Call a function on all processors.  May be used during early boot while
690 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
691 * of local_irq_disable/enable().
692 */
693void on_each_cpu(smp_call_func_t func, void *info, int wait)
694{
695	unsigned long flags;
 
696
697	preempt_disable();
698	smp_call_function(func, info, wait);
699	local_irq_save(flags);
700	func(info);
701	local_irq_restore(flags);
702	preempt_enable();
 
703}
704EXPORT_SYMBOL(on_each_cpu);
705
706/**
707 * on_each_cpu_mask(): Run a function on processors specified by
708 * cpumask, which may include the local processor.
709 * @mask: The set of cpus to run on (only runs on online subset).
710 * @func: The function to run. This must be fast and non-blocking.
711 * @info: An arbitrary pointer to pass to the function.
712 * @wait: If true, wait (atomically) until function has completed
713 *        on other CPUs.
714 *
715 * If @wait is true, then returns once @func has returned.
716 *
717 * You must not call this function with disabled interrupts or from a
718 * hardware interrupt handler or from a bottom half handler.  The
719 * exception is that it may be used during early boot while
720 * early_boot_irqs_disabled is set.
721 */
722void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
723			void *info, bool wait)
724{
725	int cpu = get_cpu();
726
727	smp_call_function_many(mask, func, info, wait);
728	if (cpumask_test_cpu(cpu, mask)) {
729		unsigned long flags;
730		local_irq_save(flags);
731		func(info);
732		local_irq_restore(flags);
733	}
734	put_cpu();
735}
736EXPORT_SYMBOL(on_each_cpu_mask);
737
738/*
739 * on_each_cpu_cond(): Call a function on each processor for which
740 * the supplied function cond_func returns true, optionally waiting
741 * for all the required CPUs to finish. This may include the local
742 * processor.
743 * @cond_func:	A callback function that is passed a cpu id and
744 *		the the info parameter. The function is called
745 *		with preemption disabled. The function should
746 *		return a blooean value indicating whether to IPI
747 *		the specified CPU.
748 * @func:	The function to run on all applicable CPUs.
749 *		This must be fast and non-blocking.
750 * @info:	An arbitrary pointer to pass to both functions.
751 * @wait:	If true, wait (atomically) until function has
752 *		completed on other CPUs.
 
 
 
 
 
753 *
754 * Preemption is disabled to protect against CPUs going offline but not online.
755 * CPUs going online during the call will not be seen or sent an IPI.
756 *
757 * You must not call this function with disabled interrupts or
758 * from a hardware interrupt handler or from a bottom half handler.
759 */
760void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
761			   void *info, bool wait, const struct cpumask *mask)
762{
763	int cpu = get_cpu();
764
765	smp_call_function_many_cond(mask, func, info, wait, cond_func);
766	if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
767		unsigned long flags;
768
769		local_irq_save(flags);
770		func(info);
771		local_irq_restore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
772	}
773	put_cpu();
774}
775EXPORT_SYMBOL(on_each_cpu_cond_mask);
776
777void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
778		      void *info, bool wait)
779{
780	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
781}
782EXPORT_SYMBOL(on_each_cpu_cond);
783
784static void do_nothing(void *unused)
785{
786}
787
788/**
789 * kick_all_cpus_sync - Force all cpus out of idle
790 *
791 * Used to synchronize the update of pm_idle function pointer. It's
792 * called after the pointer is updated and returns after the dummy
793 * callback function has been executed on all cpus. The execution of
794 * the function can only happen on the remote cpus after they have
795 * left the idle function which had been called via pm_idle function
796 * pointer. So it's guaranteed that nothing uses the previous pointer
797 * anymore.
798 */
799void kick_all_cpus_sync(void)
800{
801	/* Make sure the change is visible before we kick the cpus */
802	smp_mb();
803	smp_call_function(do_nothing, NULL, 1);
804}
805EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
806
807/**
808 * wake_up_all_idle_cpus - break all cpus out of idle
809 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
810 * including idle polling cpus, for non-idle cpus, we will do nothing
811 * for them.
812 */
813void wake_up_all_idle_cpus(void)
814{
815	int cpu;
816
817	preempt_disable();
818	for_each_online_cpu(cpu) {
819		if (cpu == smp_processor_id())
820			continue;
821
822		wake_up_if_idle(cpu);
823	}
824	preempt_enable();
825}
826EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
827
828/**
829 * smp_call_on_cpu - Call a function on a specific cpu
830 *
831 * Used to call a function on a specific cpu and wait for it to return.
832 * Optionally make sure the call is done on a specified physical cpu via vcpu
833 * pinning in order to support virtualized environments.
834 */
835struct smp_call_on_cpu_struct {
836	struct work_struct	work;
837	struct completion	done;
838	int			(*func)(void *);
839	void			*data;
840	int			ret;
841	int			cpu;
842};
843
844static void smp_call_on_cpu_callback(struct work_struct *work)
845{
846	struct smp_call_on_cpu_struct *sscs;
847
848	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
849	if (sscs->cpu >= 0)
850		hypervisor_pin_vcpu(sscs->cpu);
851	sscs->ret = sscs->func(sscs->data);
852	if (sscs->cpu >= 0)
853		hypervisor_pin_vcpu(-1);
854
855	complete(&sscs->done);
856}
857
858int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
859{
860	struct smp_call_on_cpu_struct sscs = {
861		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
862		.func = func,
863		.data = par,
864		.cpu  = phys ? cpu : -1,
865	};
866
867	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
868
869	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
870		return -ENXIO;
871
872	queue_work_on(cpu, system_wq, &sscs.work);
873	wait_for_completion(&sscs.done);
874
875	return sscs.ret;
876}
877EXPORT_SYMBOL_GPL(smp_call_on_cpu);