Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
4 */
5
6#include <linux/interrupt.h>
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/smp.h>
10#include <linux/hypervisor.h>
11
12int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13 int wait)
14{
15 unsigned long flags;
16
17 if (cpu != 0)
18 return -ENXIO;
19
20 local_irq_save(flags);
21 func(info);
22 local_irq_restore(flags);
23
24 return 0;
25}
26EXPORT_SYMBOL(smp_call_function_single);
27
28int smp_call_function_single_async(int cpu, call_single_data_t *csd)
29{
30 unsigned long flags;
31
32 local_irq_save(flags);
33 csd->func(csd->info);
34 local_irq_restore(flags);
35 return 0;
36}
37EXPORT_SYMBOL(smp_call_function_single_async);
38
39/*
40 * Preemption is disabled here to make sure the cond_func is called under the
41 * same conditions in UP and SMP.
42 */
43void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
44 void *info, bool wait, const struct cpumask *mask)
45{
46 unsigned long flags;
47
48 preempt_disable();
49 if ((!cond_func || cond_func(0, info)) && cpumask_test_cpu(0, mask)) {
50 local_irq_save(flags);
51 func(info);
52 local_irq_restore(flags);
53 }
54 preempt_enable();
55}
56EXPORT_SYMBOL(on_each_cpu_cond_mask);
57
58int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
59{
60 int ret;
61
62 if (cpu != 0)
63 return -ENXIO;
64
65 if (phys)
66 hypervisor_pin_vcpu(0);
67 ret = func(par);
68 if (phys)
69 hypervisor_pin_vcpu(-1);
70
71 return ret;
72}
73EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1/*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
5#include <linux/interrupt.h>
6#include <linux/kernel.h>
7#include <linux/export.h>
8#include <linux/smp.h>
9
10int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
11 int wait)
12{
13 unsigned long flags;
14
15 WARN_ON(cpu != 0);
16
17 local_irq_save(flags);
18 func(info);
19 local_irq_restore(flags);
20
21 return 0;
22}
23EXPORT_SYMBOL(smp_call_function_single);
24
25int smp_call_function_single_async(int cpu, struct call_single_data *csd)
26{
27 unsigned long flags;
28
29 local_irq_save(flags);
30 csd->func(csd->info);
31 local_irq_restore(flags);
32 return 0;
33}
34EXPORT_SYMBOL(smp_call_function_single_async);
35
36int on_each_cpu(smp_call_func_t func, void *info, int wait)
37{
38 unsigned long flags;
39
40 local_irq_save(flags);
41 func(info);
42 local_irq_restore(flags);
43 return 0;
44}
45EXPORT_SYMBOL(on_each_cpu);
46
47/*
48 * Note we still need to test the mask even for UP
49 * because we actually can get an empty mask from
50 * code that on SMP might call us without the local
51 * CPU in the mask.
52 */
53void on_each_cpu_mask(const struct cpumask *mask,
54 smp_call_func_t func, void *info, bool wait)
55{
56 unsigned long flags;
57
58 if (cpumask_test_cpu(0, mask)) {
59 local_irq_save(flags);
60 func(info);
61 local_irq_restore(flags);
62 }
63}
64EXPORT_SYMBOL(on_each_cpu_mask);
65
66/*
67 * Preemption is disabled here to make sure the cond_func is called under the
68 * same condtions in UP and SMP.
69 */
70void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
71 smp_call_func_t func, void *info, bool wait,
72 gfp_t gfp_flags)
73{
74 unsigned long flags;
75
76 preempt_disable();
77 if (cond_func(0, info)) {
78 local_irq_save(flags);
79 func(info);
80 local_irq_restore(flags);
81 }
82 preempt_enable();
83}
84EXPORT_SYMBOL(on_each_cpu_cond);