Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic helpers for smp ipi calls
  4 *
  5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/irq_work.h>
 11#include <linux/rcupdate.h>
 12#include <linux/rculist.h>
 13#include <linux/kernel.h>
 14#include <linux/export.h>
 15#include <linux/percpu.h>
 16#include <linux/init.h>
 17#include <linux/gfp.h>
 18#include <linux/smp.h>
 19#include <linux/cpu.h>
 20#include <linux/sched.h>
 21#include <linux/sched/idle.h>
 22#include <linux/hypervisor.h>
 23
 24#include "smpboot.h"
 25
 26enum {
 27	CSD_FLAG_LOCK		= 0x01,
 28	CSD_FLAG_SYNCHRONOUS	= 0x02,
 29};
 30
 31struct call_function_data {
 32	call_single_data_t	__percpu *csd;
 33	cpumask_var_t		cpumask;
 34	cpumask_var_t		cpumask_ipi;
 35};
 36
 37static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
 38
 39static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 40
 41static void flush_smp_call_function_queue(bool warn_cpu_offline);
 42
 43int smpcfd_prepare_cpu(unsigned int cpu)
 44{
 45	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 46
 47	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 48				     cpu_to_node(cpu)))
 49		return -ENOMEM;
 50	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
 51				     cpu_to_node(cpu))) {
 52		free_cpumask_var(cfd->cpumask);
 53		return -ENOMEM;
 54	}
 55	cfd->csd = alloc_percpu(call_single_data_t);
 56	if (!cfd->csd) {
 57		free_cpumask_var(cfd->cpumask);
 58		free_cpumask_var(cfd->cpumask_ipi);
 59		return -ENOMEM;
 60	}
 61
 62	return 0;
 63}
 64
 65int smpcfd_dead_cpu(unsigned int cpu)
 66{
 67	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 68
 69	free_cpumask_var(cfd->cpumask);
 70	free_cpumask_var(cfd->cpumask_ipi);
 71	free_percpu(cfd->csd);
 72	return 0;
 73}
 74
 75int smpcfd_dying_cpu(unsigned int cpu)
 76{
 77	/*
 78	 * The IPIs for the smp-call-function callbacks queued by other
 79	 * CPUs might arrive late, either due to hardware latencies or
 80	 * because this CPU disabled interrupts (inside stop-machine)
 81	 * before the IPIs were sent. So flush out any pending callbacks
 82	 * explicitly (without waiting for the IPIs to arrive), to
 83	 * ensure that the outgoing CPU doesn't go offline with work
 84	 * still pending.
 85	 */
 86	flush_smp_call_function_queue(false);
 87	return 0;
 88}
 89
 90void __init call_function_init(void)
 91{
 92	int i;
 93
 94	for_each_possible_cpu(i)
 95		init_llist_head(&per_cpu(call_single_queue, i));
 96
 97	smpcfd_prepare_cpu(smp_processor_id());
 98}
 99
100/*
101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102 *
103 * For non-synchronous ipi calls the csd can still be in use by the
104 * previous function call. For multi-cpu calls its even more interesting
105 * as we'll have to ensure no other cpu is observing our csd.
106 */
107static __always_inline void csd_lock_wait(call_single_data_t *csd)
108{
109	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
110}
111
112static __always_inline void csd_lock(call_single_data_t *csd)
113{
114	csd_lock_wait(csd);
115	csd->flags |= CSD_FLAG_LOCK;
116
117	/*
118	 * prevent CPU from reordering the above assignment
119	 * to ->flags with any subsequent assignments to other
120	 * fields of the specified call_single_data_t structure:
121	 */
122	smp_wmb();
123}
124
125static __always_inline void csd_unlock(call_single_data_t *csd)
126{
127	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
128
129	/*
130	 * ensure we're all done before releasing data:
131	 */
132	smp_store_release(&csd->flags, 0);
133}
134
135static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
136
137/*
138 * Insert a previously allocated call_single_data_t element
139 * for execution on the given CPU. data must already have
140 * ->func, ->info, and ->flags set.
141 */
142static int generic_exec_single(int cpu, call_single_data_t *csd,
143			       smp_call_func_t func, void *info)
144{
145	if (cpu == smp_processor_id()) {
146		unsigned long flags;
147
148		/*
149		 * We can unlock early even for the synchronous on-stack case,
150		 * since we're doing this from the same CPU..
151		 */
152		csd_unlock(csd);
153		local_irq_save(flags);
154		func(info);
155		local_irq_restore(flags);
156		return 0;
157	}
158
159
160	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
161		csd_unlock(csd);
162		return -ENXIO;
163	}
164
165	csd->func = func;
166	csd->info = info;
167
168	/*
169	 * The list addition should be visible before sending the IPI
170	 * handler locks the list to pull the entry off it because of
171	 * normal cache coherency rules implied by spinlocks.
172	 *
173	 * If IPIs can go out of order to the cache coherency protocol
174	 * in an architecture, sufficient synchronisation should be added
175	 * to arch code to make it appear to obey cache coherency WRT
176	 * locking and barrier primitives. Generic code isn't really
177	 * equipped to do the right thing...
178	 */
179	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
180		arch_send_call_function_single_ipi(cpu);
181
182	return 0;
183}
184
185/**
186 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
187 *
188 * Invoked by arch to handle an IPI for call function single.
189 * Must be called with interrupts disabled.
190 */
191void generic_smp_call_function_single_interrupt(void)
192{
193	flush_smp_call_function_queue(true);
194}
195
196/**
197 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
198 *
199 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
200 *		      offline CPU. Skip this check if set to 'false'.
201 *
202 * Flush any pending smp-call-function callbacks queued on this CPU. This is
203 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
204 * to ensure that all pending IPI callbacks are run before it goes completely
205 * offline.
206 *
207 * Loop through the call_single_queue and run all the queued callbacks.
208 * Must be called with interrupts disabled.
209 */
210static void flush_smp_call_function_queue(bool warn_cpu_offline)
211{
212	struct llist_head *head;
213	struct llist_node *entry;
214	call_single_data_t *csd, *csd_next;
215	static bool warned;
216
217	lockdep_assert_irqs_disabled();
218
219	head = this_cpu_ptr(&call_single_queue);
220	entry = llist_del_all(head);
221	entry = llist_reverse_order(entry);
222
223	/* There shouldn't be any pending callbacks on an offline CPU. */
224	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
225		     !warned && !llist_empty(head))) {
226		warned = true;
227		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
228
229		/*
230		 * We don't have to use the _safe() variant here
231		 * because we are not invoking the IPI handlers yet.
232		 */
233		llist_for_each_entry(csd, entry, llist)
234			pr_warn("IPI callback %pS sent to offline CPU\n",
235				csd->func);
236	}
237
238	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
239		smp_call_func_t func = csd->func;
240		void *info = csd->info;
241
242		/* Do we wait until *after* callback? */
243		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
244			func(info);
245			csd_unlock(csd);
246		} else {
247			csd_unlock(csd);
248			func(info);
249		}
250	}
251
252	/*
253	 * Handle irq works queued remotely by irq_work_queue_on().
254	 * Smp functions above are typically synchronous so they
255	 * better run first since some other CPUs may be busy waiting
256	 * for them.
257	 */
258	irq_work_run();
259}
260
261/*
262 * smp_call_function_single - Run a function on a specific CPU
263 * @func: The function to run. This must be fast and non-blocking.
264 * @info: An arbitrary pointer to pass to the function.
265 * @wait: If true, wait until function has completed on other CPUs.
266 *
267 * Returns 0 on success, else a negative status code.
268 */
269int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
270			     int wait)
271{
272	call_single_data_t *csd;
273	call_single_data_t csd_stack = {
274		.flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
275	};
276	int this_cpu;
277	int err;
278
279	/*
280	 * prevent preemption and reschedule on another processor,
281	 * as well as CPU removal
282	 */
283	this_cpu = get_cpu();
284
285	/*
286	 * Can deadlock when called with interrupts disabled.
287	 * We allow cpu's that are not yet online though, as no one else can
288	 * send smp call function interrupt to this cpu and as such deadlocks
289	 * can't happen.
290	 */
291	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292		     && !oops_in_progress);
293
294	/*
295	 * When @wait we can deadlock when we interrupt between llist_add() and
296	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
297	 * csd_lock() on because the interrupt context uses the same csd
298	 * storage.
299	 */
300	WARN_ON_ONCE(!in_task());
301
302	csd = &csd_stack;
303	if (!wait) {
304		csd = this_cpu_ptr(&csd_data);
305		csd_lock(csd);
306	}
307
308	err = generic_exec_single(cpu, csd, func, info);
309
310	if (wait)
311		csd_lock_wait(csd);
312
313	put_cpu();
314
315	return err;
316}
317EXPORT_SYMBOL(smp_call_function_single);
318
319/**
320 * smp_call_function_single_async(): Run an asynchronous function on a
321 * 			         specific CPU.
322 * @cpu: The CPU to run on.
323 * @csd: Pre-allocated and setup data structure
324 *
325 * Like smp_call_function_single(), but the call is asynchonous and
326 * can thus be done from contexts with disabled interrupts.
327 *
328 * The caller passes his own pre-allocated data structure
329 * (ie: embedded in an object) and is responsible for synchronizing it
330 * such that the IPIs performed on the @csd are strictly serialized.
331 *
332 * NOTE: Be careful, there is unfortunately no current debugging facility to
333 * validate the correctness of this serialization.
334 */
335int smp_call_function_single_async(int cpu, call_single_data_t *csd)
336{
337	int err = 0;
338
339	preempt_disable();
340
341	/* We could deadlock if we have to wait here with interrupts disabled! */
342	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
343		csd_lock_wait(csd);
344
345	csd->flags = CSD_FLAG_LOCK;
346	smp_wmb();
347
348	err = generic_exec_single(cpu, csd, csd->func, csd->info);
349	preempt_enable();
350
351	return err;
352}
353EXPORT_SYMBOL_GPL(smp_call_function_single_async);
354
355/*
356 * smp_call_function_any - Run a function on any of the given cpus
357 * @mask: The mask of cpus it can run on.
358 * @func: The function to run. This must be fast and non-blocking.
359 * @info: An arbitrary pointer to pass to the function.
360 * @wait: If true, wait until function has completed.
361 *
362 * Returns 0 on success, else a negative status code (if no cpus were online).
363 *
364 * Selection preference:
365 *	1) current cpu if in @mask
366 *	2) any cpu of current node if in @mask
367 *	3) any other online cpu in @mask
368 */
369int smp_call_function_any(const struct cpumask *mask,
370			  smp_call_func_t func, void *info, int wait)
371{
372	unsigned int cpu;
373	const struct cpumask *nodemask;
374	int ret;
375
376	/* Try for same CPU (cheapest) */
377	cpu = get_cpu();
378	if (cpumask_test_cpu(cpu, mask))
379		goto call;
380
381	/* Try for same node. */
382	nodemask = cpumask_of_node(cpu_to_node(cpu));
383	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
384	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
385		if (cpu_online(cpu))
386			goto call;
387	}
388
389	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
390	cpu = cpumask_any_and(mask, cpu_online_mask);
391call:
392	ret = smp_call_function_single(cpu, func, info, wait);
393	put_cpu();
394	return ret;
395}
396EXPORT_SYMBOL_GPL(smp_call_function_any);
397
398/**
399 * smp_call_function_many(): Run a function on a set of other CPUs.
400 * @mask: The set of cpus to run on (only runs on online subset).
401 * @func: The function to run. This must be fast and non-blocking.
402 * @info: An arbitrary pointer to pass to the function.
403 * @wait: If true, wait (atomically) until function has completed
404 *        on other CPUs.
405 *
406 * If @wait is true, then returns once @func has returned.
407 *
408 * You must not call this function with disabled interrupts or from a
409 * hardware interrupt handler or from a bottom half handler. Preemption
410 * must be disabled when calling this function.
411 */
412void smp_call_function_many(const struct cpumask *mask,
413			    smp_call_func_t func, void *info, bool wait)
414{
415	struct call_function_data *cfd;
416	int cpu, next_cpu, this_cpu = smp_processor_id();
417
418	/*
419	 * Can deadlock when called with interrupts disabled.
420	 * We allow cpu's that are not yet online though, as no one else can
421	 * send smp call function interrupt to this cpu and as such deadlocks
422	 * can't happen.
423	 */
424	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
425		     && !oops_in_progress && !early_boot_irqs_disabled);
426
427	/*
428	 * When @wait we can deadlock when we interrupt between llist_add() and
429	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
430	 * csd_lock() on because the interrupt context uses the same csd
431	 * storage.
432	 */
433	WARN_ON_ONCE(!in_task());
434
435	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
436	cpu = cpumask_first_and(mask, cpu_online_mask);
437	if (cpu == this_cpu)
438		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
439
440	/* No online cpus?  We're done. */
441	if (cpu >= nr_cpu_ids)
442		return;
443
444	/* Do we have another CPU which isn't us? */
445	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
446	if (next_cpu == this_cpu)
447		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
448
449	/* Fastpath: do that cpu by itself. */
450	if (next_cpu >= nr_cpu_ids) {
451		smp_call_function_single(cpu, func, info, wait);
452		return;
453	}
454
455	cfd = this_cpu_ptr(&cfd_data);
456
457	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
458	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
459
460	/* Some callers race with other cpus changing the passed mask */
461	if (unlikely(!cpumask_weight(cfd->cpumask)))
462		return;
463
464	cpumask_clear(cfd->cpumask_ipi);
465	for_each_cpu(cpu, cfd->cpumask) {
466		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
467
468		csd_lock(csd);
469		if (wait)
470			csd->flags |= CSD_FLAG_SYNCHRONOUS;
471		csd->func = func;
472		csd->info = info;
473		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
474			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
475	}
476
477	/* Send a message to all CPUs in the map */
478	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
479
480	if (wait) {
481		for_each_cpu(cpu, cfd->cpumask) {
482			call_single_data_t *csd;
483
484			csd = per_cpu_ptr(cfd->csd, cpu);
485			csd_lock_wait(csd);
486		}
487	}
488}
489EXPORT_SYMBOL(smp_call_function_many);
490
491/**
492 * smp_call_function(): Run a function on all other CPUs.
493 * @func: The function to run. This must be fast and non-blocking.
494 * @info: An arbitrary pointer to pass to the function.
495 * @wait: If true, wait (atomically) until function has completed
496 *        on other CPUs.
497 *
498 * Returns 0.
499 *
500 * If @wait is true, then returns once @func has returned; otherwise
501 * it returns just before the target cpu calls @func.
502 *
503 * You must not call this function with disabled interrupts or from a
504 * hardware interrupt handler or from a bottom half handler.
505 */
506void smp_call_function(smp_call_func_t func, void *info, int wait)
507{
508	preempt_disable();
509	smp_call_function_many(cpu_online_mask, func, info, wait);
510	preempt_enable();
 
 
511}
512EXPORT_SYMBOL(smp_call_function);
513
514/* Setup configured maximum number of CPUs to activate */
515unsigned int setup_max_cpus = NR_CPUS;
516EXPORT_SYMBOL(setup_max_cpus);
517
518
519/*
520 * Setup routine for controlling SMP activation
521 *
522 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
523 * activation entirely (the MPS table probe still happens, though).
524 *
525 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
526 * greater than 0, limits the maximum number of CPUs activated in
527 * SMP mode to <NUM>.
528 */
529
530void __weak arch_disable_smp_support(void) { }
531
532static int __init nosmp(char *str)
533{
534	setup_max_cpus = 0;
535	arch_disable_smp_support();
536
537	return 0;
538}
539
540early_param("nosmp", nosmp);
541
542/* this is hard limit */
543static int __init nrcpus(char *str)
544{
545	int nr_cpus;
546
547	get_option(&str, &nr_cpus);
548	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
549		nr_cpu_ids = nr_cpus;
550
551	return 0;
552}
553
554early_param("nr_cpus", nrcpus);
555
556static int __init maxcpus(char *str)
557{
558	get_option(&str, &setup_max_cpus);
559	if (setup_max_cpus == 0)
560		arch_disable_smp_support();
561
562	return 0;
563}
564
565early_param("maxcpus", maxcpus);
566
567/* Setup number of possible processor ids */
568unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
569EXPORT_SYMBOL(nr_cpu_ids);
570
571/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
572void __init setup_nr_cpu_ids(void)
573{
574	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
575}
576
577/* Called by boot processor to activate the rest. */
578void __init smp_init(void)
579{
580	int num_nodes, num_cpus;
581	unsigned int cpu;
582
583	idle_threads_init();
584	cpuhp_threads_init();
585
586	pr_info("Bringing up secondary CPUs ...\n");
587
588	/* FIXME: This should be done in userspace --RR */
589	for_each_present_cpu(cpu) {
590		if (num_online_cpus() >= setup_max_cpus)
591			break;
592		if (!cpu_online(cpu))
593			cpu_up(cpu);
594	}
595
596	num_nodes = num_online_nodes();
597	num_cpus  = num_online_cpus();
598	pr_info("Brought up %d node%s, %d CPU%s\n",
599		num_nodes, (num_nodes > 1 ? "s" : ""),
600		num_cpus,  (num_cpus  > 1 ? "s" : ""));
601
602	/* Any cleanup work */
603	smp_cpus_done(setup_max_cpus);
604}
605
606/*
607 * Call a function on all processors.  May be used during early boot while
608 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
609 * of local_irq_disable/enable().
610 */
611void on_each_cpu(void (*func) (void *info), void *info, int wait)
612{
613	unsigned long flags;
 
614
615	preempt_disable();
616	smp_call_function(func, info, wait);
617	local_irq_save(flags);
618	func(info);
619	local_irq_restore(flags);
620	preempt_enable();
 
621}
622EXPORT_SYMBOL(on_each_cpu);
623
624/**
625 * on_each_cpu_mask(): Run a function on processors specified by
626 * cpumask, which may include the local processor.
627 * @mask: The set of cpus to run on (only runs on online subset).
628 * @func: The function to run. This must be fast and non-blocking.
629 * @info: An arbitrary pointer to pass to the function.
630 * @wait: If true, wait (atomically) until function has completed
631 *        on other CPUs.
632 *
633 * If @wait is true, then returns once @func has returned.
634 *
635 * You must not call this function with disabled interrupts or from a
636 * hardware interrupt handler or from a bottom half handler.  The
637 * exception is that it may be used during early boot while
638 * early_boot_irqs_disabled is set.
639 */
640void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
641			void *info, bool wait)
642{
643	int cpu = get_cpu();
644
645	smp_call_function_many(mask, func, info, wait);
646	if (cpumask_test_cpu(cpu, mask)) {
647		unsigned long flags;
648		local_irq_save(flags);
649		func(info);
650		local_irq_restore(flags);
651	}
652	put_cpu();
653}
654EXPORT_SYMBOL(on_each_cpu_mask);
655
656/*
657 * on_each_cpu_cond(): Call a function on each processor for which
658 * the supplied function cond_func returns true, optionally waiting
659 * for all the required CPUs to finish. This may include the local
660 * processor.
661 * @cond_func:	A callback function that is passed a cpu id and
662 *		the the info parameter. The function is called
663 *		with preemption disabled. The function should
664 *		return a blooean value indicating whether to IPI
665 *		the specified CPU.
666 * @func:	The function to run on all applicable CPUs.
667 *		This must be fast and non-blocking.
668 * @info:	An arbitrary pointer to pass to both functions.
669 * @wait:	If true, wait (atomically) until function has
670 *		completed on other CPUs.
671 * @gfp_flags:	GFP flags to use when allocating the cpumask
672 *		used internally by the function.
673 *
674 * The function might sleep if the GFP flags indicates a non
675 * atomic allocation is allowed.
676 *
677 * Preemption is disabled to protect against CPUs going offline but not online.
678 * CPUs going online during the call will not be seen or sent an IPI.
679 *
680 * You must not call this function with disabled interrupts or
681 * from a hardware interrupt handler or from a bottom half handler.
682 */
683void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
684			smp_call_func_t func, void *info, bool wait,
685			gfp_t gfp_flags, const struct cpumask *mask)
686{
687	cpumask_var_t cpus;
688	int cpu, ret;
689
690	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
691
692	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
693		preempt_disable();
694		for_each_cpu(cpu, mask)
695			if (cond_func(cpu, info))
696				__cpumask_set_cpu(cpu, cpus);
697		on_each_cpu_mask(cpus, func, info, wait);
698		preempt_enable();
699		free_cpumask_var(cpus);
700	} else {
701		/*
702		 * No free cpumask, bother. No matter, we'll
703		 * just have to IPI them one by one.
704		 */
705		preempt_disable();
706		for_each_cpu(cpu, mask)
707			if (cond_func(cpu, info)) {
708				ret = smp_call_function_single(cpu, func,
709								info, wait);
710				WARN_ON_ONCE(ret);
711			}
712		preempt_enable();
713	}
714}
715EXPORT_SYMBOL(on_each_cpu_cond_mask);
716
717void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
718			smp_call_func_t func, void *info, bool wait,
719			gfp_t gfp_flags)
720{
721	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
722				cpu_online_mask);
723}
724EXPORT_SYMBOL(on_each_cpu_cond);
725
726static void do_nothing(void *unused)
727{
728}
729
730/**
731 * kick_all_cpus_sync - Force all cpus out of idle
732 *
733 * Used to synchronize the update of pm_idle function pointer. It's
734 * called after the pointer is updated and returns after the dummy
735 * callback function has been executed on all cpus. The execution of
736 * the function can only happen on the remote cpus after they have
737 * left the idle function which had been called via pm_idle function
738 * pointer. So it's guaranteed that nothing uses the previous pointer
739 * anymore.
740 */
741void kick_all_cpus_sync(void)
742{
743	/* Make sure the change is visible before we kick the cpus */
744	smp_mb();
745	smp_call_function(do_nothing, NULL, 1);
746}
747EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
748
749/**
750 * wake_up_all_idle_cpus - break all cpus out of idle
751 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
752 * including idle polling cpus, for non-idle cpus, we will do nothing
753 * for them.
754 */
755void wake_up_all_idle_cpus(void)
756{
757	int cpu;
758
759	preempt_disable();
760	for_each_online_cpu(cpu) {
761		if (cpu == smp_processor_id())
762			continue;
763
764		wake_up_if_idle(cpu);
765	}
766	preempt_enable();
767}
768EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
769
770/**
771 * smp_call_on_cpu - Call a function on a specific cpu
772 *
773 * Used to call a function on a specific cpu and wait for it to return.
774 * Optionally make sure the call is done on a specified physical cpu via vcpu
775 * pinning in order to support virtualized environments.
776 */
777struct smp_call_on_cpu_struct {
778	struct work_struct	work;
779	struct completion	done;
780	int			(*func)(void *);
781	void			*data;
782	int			ret;
783	int			cpu;
784};
785
786static void smp_call_on_cpu_callback(struct work_struct *work)
787{
788	struct smp_call_on_cpu_struct *sscs;
789
790	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
791	if (sscs->cpu >= 0)
792		hypervisor_pin_vcpu(sscs->cpu);
793	sscs->ret = sscs->func(sscs->data);
794	if (sscs->cpu >= 0)
795		hypervisor_pin_vcpu(-1);
796
797	complete(&sscs->done);
798}
799
800int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
801{
802	struct smp_call_on_cpu_struct sscs = {
803		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
804		.func = func,
805		.data = par,
806		.cpu  = phys ? cpu : -1,
807	};
808
809	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
810
811	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
812		return -ENXIO;
813
814	queue_work_on(cpu, system_wq, &sscs.work);
815	wait_for_completion(&sscs.done);
816
817	return sscs.ret;
818}
819EXPORT_SYMBOL_GPL(smp_call_on_cpu);
v4.17
 
  1/*
  2 * Generic helpers for smp ipi calls
  3 *
  4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/irq_work.h>
 10#include <linux/rcupdate.h>
 11#include <linux/rculist.h>
 12#include <linux/kernel.h>
 13#include <linux/export.h>
 14#include <linux/percpu.h>
 15#include <linux/init.h>
 16#include <linux/gfp.h>
 17#include <linux/smp.h>
 18#include <linux/cpu.h>
 19#include <linux/sched.h>
 20#include <linux/sched/idle.h>
 21#include <linux/hypervisor.h>
 22
 23#include "smpboot.h"
 24
 25enum {
 26	CSD_FLAG_LOCK		= 0x01,
 27	CSD_FLAG_SYNCHRONOUS	= 0x02,
 28};
 29
 30struct call_function_data {
 31	call_single_data_t	__percpu *csd;
 32	cpumask_var_t		cpumask;
 33	cpumask_var_t		cpumask_ipi;
 34};
 35
 36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 37
 38static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 39
 40static void flush_smp_call_function_queue(bool warn_cpu_offline);
 41
 42int smpcfd_prepare_cpu(unsigned int cpu)
 43{
 44	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 45
 46	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 47				     cpu_to_node(cpu)))
 48		return -ENOMEM;
 49	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
 50				     cpu_to_node(cpu))) {
 51		free_cpumask_var(cfd->cpumask);
 52		return -ENOMEM;
 53	}
 54	cfd->csd = alloc_percpu(call_single_data_t);
 55	if (!cfd->csd) {
 56		free_cpumask_var(cfd->cpumask);
 57		free_cpumask_var(cfd->cpumask_ipi);
 58		return -ENOMEM;
 59	}
 60
 61	return 0;
 62}
 63
 64int smpcfd_dead_cpu(unsigned int cpu)
 65{
 66	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 67
 68	free_cpumask_var(cfd->cpumask);
 69	free_cpumask_var(cfd->cpumask_ipi);
 70	free_percpu(cfd->csd);
 71	return 0;
 72}
 73
 74int smpcfd_dying_cpu(unsigned int cpu)
 75{
 76	/*
 77	 * The IPIs for the smp-call-function callbacks queued by other
 78	 * CPUs might arrive late, either due to hardware latencies or
 79	 * because this CPU disabled interrupts (inside stop-machine)
 80	 * before the IPIs were sent. So flush out any pending callbacks
 81	 * explicitly (without waiting for the IPIs to arrive), to
 82	 * ensure that the outgoing CPU doesn't go offline with work
 83	 * still pending.
 84	 */
 85	flush_smp_call_function_queue(false);
 86	return 0;
 87}
 88
 89void __init call_function_init(void)
 90{
 91	int i;
 92
 93	for_each_possible_cpu(i)
 94		init_llist_head(&per_cpu(call_single_queue, i));
 95
 96	smpcfd_prepare_cpu(smp_processor_id());
 97}
 98
 99/*
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
101 *
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
105 */
106static __always_inline void csd_lock_wait(call_single_data_t *csd)
107{
108	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
109}
110
111static __always_inline void csd_lock(call_single_data_t *csd)
112{
113	csd_lock_wait(csd);
114	csd->flags |= CSD_FLAG_LOCK;
115
116	/*
117	 * prevent CPU from reordering the above assignment
118	 * to ->flags with any subsequent assignments to other
119	 * fields of the specified call_single_data_t structure:
120	 */
121	smp_wmb();
122}
123
124static __always_inline void csd_unlock(call_single_data_t *csd)
125{
126	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
127
128	/*
129	 * ensure we're all done before releasing data:
130	 */
131	smp_store_release(&csd->flags, 0);
132}
133
134static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135
136/*
137 * Insert a previously allocated call_single_data_t element
138 * for execution on the given CPU. data must already have
139 * ->func, ->info, and ->flags set.
140 */
141static int generic_exec_single(int cpu, call_single_data_t *csd,
142			       smp_call_func_t func, void *info)
143{
144	if (cpu == smp_processor_id()) {
145		unsigned long flags;
146
147		/*
148		 * We can unlock early even for the synchronous on-stack case,
149		 * since we're doing this from the same CPU..
150		 */
151		csd_unlock(csd);
152		local_irq_save(flags);
153		func(info);
154		local_irq_restore(flags);
155		return 0;
156	}
157
158
159	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
160		csd_unlock(csd);
161		return -ENXIO;
162	}
163
164	csd->func = func;
165	csd->info = info;
166
167	/*
168	 * The list addition should be visible before sending the IPI
169	 * handler locks the list to pull the entry off it because of
170	 * normal cache coherency rules implied by spinlocks.
171	 *
172	 * If IPIs can go out of order to the cache coherency protocol
173	 * in an architecture, sufficient synchronisation should be added
174	 * to arch code to make it appear to obey cache coherency WRT
175	 * locking and barrier primitives. Generic code isn't really
176	 * equipped to do the right thing...
177	 */
178	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
179		arch_send_call_function_single_ipi(cpu);
180
181	return 0;
182}
183
184/**
185 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
186 *
187 * Invoked by arch to handle an IPI for call function single.
188 * Must be called with interrupts disabled.
189 */
190void generic_smp_call_function_single_interrupt(void)
191{
192	flush_smp_call_function_queue(true);
193}
194
195/**
196 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
197 *
198 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
199 *		      offline CPU. Skip this check if set to 'false'.
200 *
201 * Flush any pending smp-call-function callbacks queued on this CPU. This is
202 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
203 * to ensure that all pending IPI callbacks are run before it goes completely
204 * offline.
205 *
206 * Loop through the call_single_queue and run all the queued callbacks.
207 * Must be called with interrupts disabled.
208 */
209static void flush_smp_call_function_queue(bool warn_cpu_offline)
210{
211	struct llist_head *head;
212	struct llist_node *entry;
213	call_single_data_t *csd, *csd_next;
214	static bool warned;
215
216	lockdep_assert_irqs_disabled();
217
218	head = this_cpu_ptr(&call_single_queue);
219	entry = llist_del_all(head);
220	entry = llist_reverse_order(entry);
221
222	/* There shouldn't be any pending callbacks on an offline CPU. */
223	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
224		     !warned && !llist_empty(head))) {
225		warned = true;
226		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
227
228		/*
229		 * We don't have to use the _safe() variant here
230		 * because we are not invoking the IPI handlers yet.
231		 */
232		llist_for_each_entry(csd, entry, llist)
233			pr_warn("IPI callback %pS sent to offline CPU\n",
234				csd->func);
235	}
236
237	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
238		smp_call_func_t func = csd->func;
239		void *info = csd->info;
240
241		/* Do we wait until *after* callback? */
242		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
243			func(info);
244			csd_unlock(csd);
245		} else {
246			csd_unlock(csd);
247			func(info);
248		}
249	}
250
251	/*
252	 * Handle irq works queued remotely by irq_work_queue_on().
253	 * Smp functions above are typically synchronous so they
254	 * better run first since some other CPUs may be busy waiting
255	 * for them.
256	 */
257	irq_work_run();
258}
259
260/*
261 * smp_call_function_single - Run a function on a specific CPU
262 * @func: The function to run. This must be fast and non-blocking.
263 * @info: An arbitrary pointer to pass to the function.
264 * @wait: If true, wait until function has completed on other CPUs.
265 *
266 * Returns 0 on success, else a negative status code.
267 */
268int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
269			     int wait)
270{
271	call_single_data_t *csd;
272	call_single_data_t csd_stack = {
273		.flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
274	};
275	int this_cpu;
276	int err;
277
278	/*
279	 * prevent preemption and reschedule on another processor,
280	 * as well as CPU removal
281	 */
282	this_cpu = get_cpu();
283
284	/*
285	 * Can deadlock when called with interrupts disabled.
286	 * We allow cpu's that are not yet online though, as no one else can
287	 * send smp call function interrupt to this cpu and as such deadlocks
288	 * can't happen.
289	 */
290	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
291		     && !oops_in_progress);
292
 
 
 
 
 
 
 
 
293	csd = &csd_stack;
294	if (!wait) {
295		csd = this_cpu_ptr(&csd_data);
296		csd_lock(csd);
297	}
298
299	err = generic_exec_single(cpu, csd, func, info);
300
301	if (wait)
302		csd_lock_wait(csd);
303
304	put_cpu();
305
306	return err;
307}
308EXPORT_SYMBOL(smp_call_function_single);
309
310/**
311 * smp_call_function_single_async(): Run an asynchronous function on a
312 * 			         specific CPU.
313 * @cpu: The CPU to run on.
314 * @csd: Pre-allocated and setup data structure
315 *
316 * Like smp_call_function_single(), but the call is asynchonous and
317 * can thus be done from contexts with disabled interrupts.
318 *
319 * The caller passes his own pre-allocated data structure
320 * (ie: embedded in an object) and is responsible for synchronizing it
321 * such that the IPIs performed on the @csd are strictly serialized.
322 *
323 * NOTE: Be careful, there is unfortunately no current debugging facility to
324 * validate the correctness of this serialization.
325 */
326int smp_call_function_single_async(int cpu, call_single_data_t *csd)
327{
328	int err = 0;
329
330	preempt_disable();
331
332	/* We could deadlock if we have to wait here with interrupts disabled! */
333	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
334		csd_lock_wait(csd);
335
336	csd->flags = CSD_FLAG_LOCK;
337	smp_wmb();
338
339	err = generic_exec_single(cpu, csd, csd->func, csd->info);
340	preempt_enable();
341
342	return err;
343}
344EXPORT_SYMBOL_GPL(smp_call_function_single_async);
345
346/*
347 * smp_call_function_any - Run a function on any of the given cpus
348 * @mask: The mask of cpus it can run on.
349 * @func: The function to run. This must be fast and non-blocking.
350 * @info: An arbitrary pointer to pass to the function.
351 * @wait: If true, wait until function has completed.
352 *
353 * Returns 0 on success, else a negative status code (if no cpus were online).
354 *
355 * Selection preference:
356 *	1) current cpu if in @mask
357 *	2) any cpu of current node if in @mask
358 *	3) any other online cpu in @mask
359 */
360int smp_call_function_any(const struct cpumask *mask,
361			  smp_call_func_t func, void *info, int wait)
362{
363	unsigned int cpu;
364	const struct cpumask *nodemask;
365	int ret;
366
367	/* Try for same CPU (cheapest) */
368	cpu = get_cpu();
369	if (cpumask_test_cpu(cpu, mask))
370		goto call;
371
372	/* Try for same node. */
373	nodemask = cpumask_of_node(cpu_to_node(cpu));
374	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
375	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
376		if (cpu_online(cpu))
377			goto call;
378	}
379
380	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
381	cpu = cpumask_any_and(mask, cpu_online_mask);
382call:
383	ret = smp_call_function_single(cpu, func, info, wait);
384	put_cpu();
385	return ret;
386}
387EXPORT_SYMBOL_GPL(smp_call_function_any);
388
389/**
390 * smp_call_function_many(): Run a function on a set of other CPUs.
391 * @mask: The set of cpus to run on (only runs on online subset).
392 * @func: The function to run. This must be fast and non-blocking.
393 * @info: An arbitrary pointer to pass to the function.
394 * @wait: If true, wait (atomically) until function has completed
395 *        on other CPUs.
396 *
397 * If @wait is true, then returns once @func has returned.
398 *
399 * You must not call this function with disabled interrupts or from a
400 * hardware interrupt handler or from a bottom half handler. Preemption
401 * must be disabled when calling this function.
402 */
403void smp_call_function_many(const struct cpumask *mask,
404			    smp_call_func_t func, void *info, bool wait)
405{
406	struct call_function_data *cfd;
407	int cpu, next_cpu, this_cpu = smp_processor_id();
408
409	/*
410	 * Can deadlock when called with interrupts disabled.
411	 * We allow cpu's that are not yet online though, as no one else can
412	 * send smp call function interrupt to this cpu and as such deadlocks
413	 * can't happen.
414	 */
415	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
416		     && !oops_in_progress && !early_boot_irqs_disabled);
417
 
 
 
 
 
 
 
 
418	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
419	cpu = cpumask_first_and(mask, cpu_online_mask);
420	if (cpu == this_cpu)
421		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
422
423	/* No online cpus?  We're done. */
424	if (cpu >= nr_cpu_ids)
425		return;
426
427	/* Do we have another CPU which isn't us? */
428	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
429	if (next_cpu == this_cpu)
430		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
431
432	/* Fastpath: do that cpu by itself. */
433	if (next_cpu >= nr_cpu_ids) {
434		smp_call_function_single(cpu, func, info, wait);
435		return;
436	}
437
438	cfd = this_cpu_ptr(&cfd_data);
439
440	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
441	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
442
443	/* Some callers race with other cpus changing the passed mask */
444	if (unlikely(!cpumask_weight(cfd->cpumask)))
445		return;
446
447	cpumask_clear(cfd->cpumask_ipi);
448	for_each_cpu(cpu, cfd->cpumask) {
449		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
450
451		csd_lock(csd);
452		if (wait)
453			csd->flags |= CSD_FLAG_SYNCHRONOUS;
454		csd->func = func;
455		csd->info = info;
456		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
457			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
458	}
459
460	/* Send a message to all CPUs in the map */
461	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
462
463	if (wait) {
464		for_each_cpu(cpu, cfd->cpumask) {
465			call_single_data_t *csd;
466
467			csd = per_cpu_ptr(cfd->csd, cpu);
468			csd_lock_wait(csd);
469		}
470	}
471}
472EXPORT_SYMBOL(smp_call_function_many);
473
474/**
475 * smp_call_function(): Run a function on all other CPUs.
476 * @func: The function to run. This must be fast and non-blocking.
477 * @info: An arbitrary pointer to pass to the function.
478 * @wait: If true, wait (atomically) until function has completed
479 *        on other CPUs.
480 *
481 * Returns 0.
482 *
483 * If @wait is true, then returns once @func has returned; otherwise
484 * it returns just before the target cpu calls @func.
485 *
486 * You must not call this function with disabled interrupts or from a
487 * hardware interrupt handler or from a bottom half handler.
488 */
489int smp_call_function(smp_call_func_t func, void *info, int wait)
490{
491	preempt_disable();
492	smp_call_function_many(cpu_online_mask, func, info, wait);
493	preempt_enable();
494
495	return 0;
496}
497EXPORT_SYMBOL(smp_call_function);
498
499/* Setup configured maximum number of CPUs to activate */
500unsigned int setup_max_cpus = NR_CPUS;
501EXPORT_SYMBOL(setup_max_cpus);
502
503
504/*
505 * Setup routine for controlling SMP activation
506 *
507 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
508 * activation entirely (the MPS table probe still happens, though).
509 *
510 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
511 * greater than 0, limits the maximum number of CPUs activated in
512 * SMP mode to <NUM>.
513 */
514
515void __weak arch_disable_smp_support(void) { }
516
517static int __init nosmp(char *str)
518{
519	setup_max_cpus = 0;
520	arch_disable_smp_support();
521
522	return 0;
523}
524
525early_param("nosmp", nosmp);
526
527/* this is hard limit */
528static int __init nrcpus(char *str)
529{
530	int nr_cpus;
531
532	get_option(&str, &nr_cpus);
533	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
534		nr_cpu_ids = nr_cpus;
535
536	return 0;
537}
538
539early_param("nr_cpus", nrcpus);
540
541static int __init maxcpus(char *str)
542{
543	get_option(&str, &setup_max_cpus);
544	if (setup_max_cpus == 0)
545		arch_disable_smp_support();
546
547	return 0;
548}
549
550early_param("maxcpus", maxcpus);
551
552/* Setup number of possible processor ids */
553unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
554EXPORT_SYMBOL(nr_cpu_ids);
555
556/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
557void __init setup_nr_cpu_ids(void)
558{
559	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
560}
561
562/* Called by boot processor to activate the rest. */
563void __init smp_init(void)
564{
565	int num_nodes, num_cpus;
566	unsigned int cpu;
567
568	idle_threads_init();
569	cpuhp_threads_init();
570
571	pr_info("Bringing up secondary CPUs ...\n");
572
573	/* FIXME: This should be done in userspace --RR */
574	for_each_present_cpu(cpu) {
575		if (num_online_cpus() >= setup_max_cpus)
576			break;
577		if (!cpu_online(cpu))
578			cpu_up(cpu);
579	}
580
581	num_nodes = num_online_nodes();
582	num_cpus  = num_online_cpus();
583	pr_info("Brought up %d node%s, %d CPU%s\n",
584		num_nodes, (num_nodes > 1 ? "s" : ""),
585		num_cpus,  (num_cpus  > 1 ? "s" : ""));
586
587	/* Any cleanup work */
588	smp_cpus_done(setup_max_cpus);
589}
590
591/*
592 * Call a function on all processors.  May be used during early boot while
593 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
594 * of local_irq_disable/enable().
595 */
596int on_each_cpu(void (*func) (void *info), void *info, int wait)
597{
598	unsigned long flags;
599	int ret = 0;
600
601	preempt_disable();
602	ret = smp_call_function(func, info, wait);
603	local_irq_save(flags);
604	func(info);
605	local_irq_restore(flags);
606	preempt_enable();
607	return ret;
608}
609EXPORT_SYMBOL(on_each_cpu);
610
611/**
612 * on_each_cpu_mask(): Run a function on processors specified by
613 * cpumask, which may include the local processor.
614 * @mask: The set of cpus to run on (only runs on online subset).
615 * @func: The function to run. This must be fast and non-blocking.
616 * @info: An arbitrary pointer to pass to the function.
617 * @wait: If true, wait (atomically) until function has completed
618 *        on other CPUs.
619 *
620 * If @wait is true, then returns once @func has returned.
621 *
622 * You must not call this function with disabled interrupts or from a
623 * hardware interrupt handler or from a bottom half handler.  The
624 * exception is that it may be used during early boot while
625 * early_boot_irqs_disabled is set.
626 */
627void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
628			void *info, bool wait)
629{
630	int cpu = get_cpu();
631
632	smp_call_function_many(mask, func, info, wait);
633	if (cpumask_test_cpu(cpu, mask)) {
634		unsigned long flags;
635		local_irq_save(flags);
636		func(info);
637		local_irq_restore(flags);
638	}
639	put_cpu();
640}
641EXPORT_SYMBOL(on_each_cpu_mask);
642
643/*
644 * on_each_cpu_cond(): Call a function on each processor for which
645 * the supplied function cond_func returns true, optionally waiting
646 * for all the required CPUs to finish. This may include the local
647 * processor.
648 * @cond_func:	A callback function that is passed a cpu id and
649 *		the the info parameter. The function is called
650 *		with preemption disabled. The function should
651 *		return a blooean value indicating whether to IPI
652 *		the specified CPU.
653 * @func:	The function to run on all applicable CPUs.
654 *		This must be fast and non-blocking.
655 * @info:	An arbitrary pointer to pass to both functions.
656 * @wait:	If true, wait (atomically) until function has
657 *		completed on other CPUs.
658 * @gfp_flags:	GFP flags to use when allocating the cpumask
659 *		used internally by the function.
660 *
661 * The function might sleep if the GFP flags indicates a non
662 * atomic allocation is allowed.
663 *
664 * Preemption is disabled to protect against CPUs going offline but not online.
665 * CPUs going online during the call will not be seen or sent an IPI.
666 *
667 * You must not call this function with disabled interrupts or
668 * from a hardware interrupt handler or from a bottom half handler.
669 */
670void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
671			smp_call_func_t func, void *info, bool wait,
672			gfp_t gfp_flags)
673{
674	cpumask_var_t cpus;
675	int cpu, ret;
676
677	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
678
679	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
680		preempt_disable();
681		for_each_online_cpu(cpu)
682			if (cond_func(cpu, info))
683				cpumask_set_cpu(cpu, cpus);
684		on_each_cpu_mask(cpus, func, info, wait);
685		preempt_enable();
686		free_cpumask_var(cpus);
687	} else {
688		/*
689		 * No free cpumask, bother. No matter, we'll
690		 * just have to IPI them one by one.
691		 */
692		preempt_disable();
693		for_each_online_cpu(cpu)
694			if (cond_func(cpu, info)) {
695				ret = smp_call_function_single(cpu, func,
696								info, wait);
697				WARN_ON_ONCE(ret);
698			}
699		preempt_enable();
700	}
 
 
 
 
 
 
 
 
 
701}
702EXPORT_SYMBOL(on_each_cpu_cond);
703
704static void do_nothing(void *unused)
705{
706}
707
708/**
709 * kick_all_cpus_sync - Force all cpus out of idle
710 *
711 * Used to synchronize the update of pm_idle function pointer. It's
712 * called after the pointer is updated and returns after the dummy
713 * callback function has been executed on all cpus. The execution of
714 * the function can only happen on the remote cpus after they have
715 * left the idle function which had been called via pm_idle function
716 * pointer. So it's guaranteed that nothing uses the previous pointer
717 * anymore.
718 */
719void kick_all_cpus_sync(void)
720{
721	/* Make sure the change is visible before we kick the cpus */
722	smp_mb();
723	smp_call_function(do_nothing, NULL, 1);
724}
725EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
726
727/**
728 * wake_up_all_idle_cpus - break all cpus out of idle
729 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
730 * including idle polling cpus, for non-idle cpus, we will do nothing
731 * for them.
732 */
733void wake_up_all_idle_cpus(void)
734{
735	int cpu;
736
737	preempt_disable();
738	for_each_online_cpu(cpu) {
739		if (cpu == smp_processor_id())
740			continue;
741
742		wake_up_if_idle(cpu);
743	}
744	preempt_enable();
745}
746EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
747
748/**
749 * smp_call_on_cpu - Call a function on a specific cpu
750 *
751 * Used to call a function on a specific cpu and wait for it to return.
752 * Optionally make sure the call is done on a specified physical cpu via vcpu
753 * pinning in order to support virtualized environments.
754 */
755struct smp_call_on_cpu_struct {
756	struct work_struct	work;
757	struct completion	done;
758	int			(*func)(void *);
759	void			*data;
760	int			ret;
761	int			cpu;
762};
763
764static void smp_call_on_cpu_callback(struct work_struct *work)
765{
766	struct smp_call_on_cpu_struct *sscs;
767
768	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
769	if (sscs->cpu >= 0)
770		hypervisor_pin_vcpu(sscs->cpu);
771	sscs->ret = sscs->func(sscs->data);
772	if (sscs->cpu >= 0)
773		hypervisor_pin_vcpu(-1);
774
775	complete(&sscs->done);
776}
777
778int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
779{
780	struct smp_call_on_cpu_struct sscs = {
781		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
782		.func = func,
783		.data = par,
784		.cpu  = phys ? cpu : -1,
785	};
786
787	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
788
789	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
790		return -ENXIO;
791
792	queue_work_on(cpu, system_wq, &sscs.work);
793	wait_for_completion(&sscs.done);
794
795	return sscs.ret;
796}
797EXPORT_SYMBOL_GPL(smp_call_on_cpu);