Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Split spinlock implementation out into its own file, so it can be
4 * compiled in a FTRACE-compatible way.
5 */
6#include <linux/spinlock.h>
7#include <linux/export.h>
8#include <linux/jump_label.h>
9
10#include <asm/paravirt.h>
11
12__visible void __native_queued_spin_unlock(struct qspinlock *lock)
13{
14 native_queued_spin_unlock(lock);
15}
16PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
17
18bool pv_is_native_spin_unlock(void)
19{
20 return pv_lock_ops.queued_spin_unlock.func ==
21 __raw_callee_save___native_queued_spin_unlock;
22}
23
24__visible bool __native_vcpu_is_preempted(long cpu)
25{
26 return false;
27}
28PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
29
30bool pv_is_native_vcpu_is_preempted(void)
31{
32 return pv_lock_ops.vcpu_is_preempted.func ==
33 __raw_callee_save___native_vcpu_is_preempted;
34}
35
36struct pv_lock_ops pv_lock_ops = {
37#ifdef CONFIG_SMP
38 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
39 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
40 .wait = paravirt_nop,
41 .kick = paravirt_nop,
42 .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
43#endif /* SMP */
44};
45EXPORT_SYMBOL(pv_lock_ops);
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/spinlock.h>
6#include <linux/module.h>
7
8#include <asm/paravirt.h>
9
10static inline void
11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{
13 arch_spin_lock(lock);
14}
15
16struct pv_lock_ops pv_lock_ops = {
17#ifdef CONFIG_SMP
18 .spin_is_locked = __ticket_spin_is_locked,
19 .spin_is_contended = __ticket_spin_is_contended,
20
21 .spin_lock = __ticket_spin_lock,
22 .spin_lock_flags = default_spin_lock_flags,
23 .spin_trylock = __ticket_spin_trylock,
24 .spin_unlock = __ticket_spin_unlock,
25#endif
26};
27EXPORT_SYMBOL(pv_lock_ops);
28