Loading...
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/spinlock.h>
6#include <linux/module.h>
7
8#include <asm/paravirt.h>
9
10static inline void
11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{
13 arch_spin_lock(lock);
14}
15
16struct pv_lock_ops pv_lock_ops = {
17#ifdef CONFIG_SMP
18 .spin_is_locked = __ticket_spin_is_locked,
19 .spin_is_contended = __ticket_spin_is_contended,
20
21 .spin_lock = __ticket_spin_lock,
22 .spin_lock_flags = default_spin_lock_flags,
23 .spin_trylock = __ticket_spin_trylock,
24 .spin_unlock = __ticket_spin_unlock,
25#endif
26};
27EXPORT_SYMBOL(pv_lock_ops);
28
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/spinlock.h>
6#include <linux/module.h>
7#include <linux/jump_label.h>
8
9#include <asm/paravirt.h>
10
11#ifdef CONFIG_QUEUED_SPINLOCKS
12__visible void __native_queued_spin_unlock(struct qspinlock *lock)
13{
14 native_queued_spin_unlock(lock);
15}
16
17PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
18
19bool pv_is_native_spin_unlock(void)
20{
21 return pv_lock_ops.queued_spin_unlock.func ==
22 __raw_callee_save___native_queued_spin_unlock;
23}
24#endif
25
26struct pv_lock_ops pv_lock_ops = {
27#ifdef CONFIG_SMP
28#ifdef CONFIG_QUEUED_SPINLOCKS
29 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
30 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
31 .wait = paravirt_nop,
32 .kick = paravirt_nop,
33#else /* !CONFIG_QUEUED_SPINLOCKS */
34 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
35 .unlock_kick = paravirt_nop,
36#endif /* !CONFIG_QUEUED_SPINLOCKS */
37#endif /* SMP */
38};
39EXPORT_SYMBOL(pv_lock_ops);
40
41struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
42EXPORT_SYMBOL(paravirt_ticketlocks_enabled);