Linux Audio

Check our new training course

Loading...
v6.13.7
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
 4 */
 5
 6#include <linux/interrupt.h>
 7#include <linux/kernel.h>
 8#include <linux/export.h>
 9#include <linux/smp.h>
10#include <linux/hypervisor.h>
11
12int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13				int wait)
14{
15	unsigned long flags;
16
17	if (cpu != 0)
18		return -ENXIO;
19
20	local_irq_save(flags);
21	func(info);
22	local_irq_restore(flags);
23
24	return 0;
25}
26EXPORT_SYMBOL(smp_call_function_single);
27
28int smp_call_function_single_async(int cpu, call_single_data_t *csd)
29{
30	unsigned long flags;
31
32	local_irq_save(flags);
33	csd->func(csd->info);
34	local_irq_restore(flags);
35	return 0;
36}
37EXPORT_SYMBOL(smp_call_function_single_async);
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39/*
40 * Preemption is disabled here to make sure the cond_func is called under the
41 * same conditions in UP and SMP.
42 */
43void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
44			   void *info, bool wait, const struct cpumask *mask)
 
45{
46	unsigned long flags;
47
48	preempt_disable();
49	if ((!cond_func || cond_func(0, info)) && cpumask_test_cpu(0, mask)) {
50		local_irq_save(flags);
51		func(info);
52		local_irq_restore(flags);
53	}
54	preempt_enable();
55}
56EXPORT_SYMBOL(on_each_cpu_cond_mask);
57
58int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
59{
60	int ret;
61
62	if (cpu != 0)
63		return -ENXIO;
64
65	if (phys)
66		hypervisor_pin_vcpu(0);
67	ret = func(par);
68	if (phys)
69		hypervisor_pin_vcpu(-1);
70
71	return ret;
72}
73EXPORT_SYMBOL_GPL(smp_call_on_cpu);
v4.10.11
 
  1/*
  2 * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
  3 */
  4
  5#include <linux/interrupt.h>
  6#include <linux/kernel.h>
  7#include <linux/export.h>
  8#include <linux/smp.h>
  9#include <linux/hypervisor.h>
 10
 11int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 12				int wait)
 13{
 14	unsigned long flags;
 15
 16	WARN_ON(cpu != 0);
 
 17
 18	local_irq_save(flags);
 19	func(info);
 20	local_irq_restore(flags);
 21
 22	return 0;
 23}
 24EXPORT_SYMBOL(smp_call_function_single);
 25
 26int smp_call_function_single_async(int cpu, struct call_single_data *csd)
 27{
 28	unsigned long flags;
 29
 30	local_irq_save(flags);
 31	csd->func(csd->info);
 32	local_irq_restore(flags);
 33	return 0;
 34}
 35EXPORT_SYMBOL(smp_call_function_single_async);
 36
 37int on_each_cpu(smp_call_func_t func, void *info, int wait)
 38{
 39	unsigned long flags;
 40
 41	local_irq_save(flags);
 42	func(info);
 43	local_irq_restore(flags);
 44	return 0;
 45}
 46EXPORT_SYMBOL(on_each_cpu);
 47
 48/*
 49 * Note we still need to test the mask even for UP
 50 * because we actually can get an empty mask from
 51 * code that on SMP might call us without the local
 52 * CPU in the mask.
 53 */
 54void on_each_cpu_mask(const struct cpumask *mask,
 55		      smp_call_func_t func, void *info, bool wait)
 56{
 57	unsigned long flags;
 58
 59	if (cpumask_test_cpu(0, mask)) {
 60		local_irq_save(flags);
 61		func(info);
 62		local_irq_restore(flags);
 63	}
 64}
 65EXPORT_SYMBOL(on_each_cpu_mask);
 66
 67/*
 68 * Preemption is disabled here to make sure the cond_func is called under the
 69 * same condtions in UP and SMP.
 70 */
 71void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 72		      smp_call_func_t func, void *info, bool wait,
 73		      gfp_t gfp_flags)
 74{
 75	unsigned long flags;
 76
 77	preempt_disable();
 78	if (cond_func(0, info)) {
 79		local_irq_save(flags);
 80		func(info);
 81		local_irq_restore(flags);
 82	}
 83	preempt_enable();
 84}
 85EXPORT_SYMBOL(on_each_cpu_cond);
 86
 87int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
 88{
 89	int ret;
 90
 91	if (cpu != 0)
 92		return -ENXIO;
 93
 94	if (phys)
 95		hypervisor_pin_vcpu(0);
 96	ret = func(par);
 97	if (phys)
 98		hypervisor_pin_vcpu(-1);
 99
100	return ret;
101}
102EXPORT_SYMBOL_GPL(smp_call_on_cpu);