Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Queued spinlock
  4 *
  5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  6 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  7 *
  8 * Authors: Waiman Long <waiman.long@hpe.com>
  9 */
 10#ifndef __ASM_GENERIC_QSPINLOCK_H
 11#define __ASM_GENERIC_QSPINLOCK_H
 12
 13#include <asm-generic/qspinlock_types.h>
 14#include <linux/atomic.h>
 15
 16#ifndef queued_spin_is_locked
 17/**
 18 * queued_spin_is_locked - is the spinlock locked?
 19 * @lock: Pointer to queued spinlock structure
 20 * Return: 1 if it is locked, 0 otherwise
 21 */
 22static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 23{
 24	/*
 25	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
 26	 * isn't immediately observable.
 27	 */
 28	return atomic_read(&lock->val);
 29}
 30#endif
 31
 32/**
 33 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 34 * @lock: queued spinlock structure
 35 * Return: 1 if it is unlocked, 0 otherwise
 36 *
 37 * N.B. Whenever there are tasks waiting for the lock, it is considered
 38 *      locked wrt the lockref code to avoid lock stealing by the lockref
 39 *      code and change things underneath the lock. This also allows some
 40 *      optimizations to be applied without conflict with lockref.
 41 */
 42static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
 43{
 44	return !atomic_read(&lock.val);
 45}
 46
 47/**
 48 * queued_spin_is_contended - check if the lock is contended
 49 * @lock : Pointer to queued spinlock structure
 50 * Return: 1 if lock contended, 0 otherwise
 51 */
 52static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 53{
 54	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
 55}
 56/**
 57 * queued_spin_trylock - try to acquire the queued spinlock
 58 * @lock : Pointer to queued spinlock structure
 59 * Return: 1 if lock acquired, 0 if failed
 60 */
 61static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 62{
 63	u32 val = atomic_read(&lock->val);
 64
 65	if (unlikely(val))
 66		return 0;
 67
 68	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 69}
 70
 71extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 72
 73#ifndef queued_spin_lock
 74/**
 75 * queued_spin_lock - acquire a queued spinlock
 76 * @lock: Pointer to queued spinlock structure
 77 */
 78static __always_inline void queued_spin_lock(struct qspinlock *lock)
 79{
 80	u32 val = 0;
 81
 82	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
 83		return;
 84
 85	queued_spin_lock_slowpath(lock, val);
 86}
 87#endif
 88
 89#ifndef queued_spin_unlock
 90/**
 91 * queued_spin_unlock - release a queued spinlock
 92 * @lock : Pointer to queued spinlock structure
 93 */
 94static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 95{
 96	/*
 97	 * unlock() needs release semantics:
 98	 */
 99	smp_store_release(&lock->locked, 0);
100}
101#endif
102
103#ifndef virt_spin_lock
104static __always_inline bool virt_spin_lock(struct qspinlock *lock)
105{
106	return false;
107}
108#endif
109
110/*
111 * Remapping spinlock architecture specific functions to the corresponding
112 * queued spinlock functions.
113 */
114#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
115#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
116#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
117#define arch_spin_lock(l)		queued_spin_lock(l)
118#define arch_spin_trylock(l)		queued_spin_trylock(l)
119#define arch_spin_unlock(l)		queued_spin_unlock(l)
120
121#endif /* __ASM_GENERIC_QSPINLOCK_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Queued spinlock
  4 *
  5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  6 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  7 *
  8 * Authors: Waiman Long <waiman.long@hpe.com>
  9 */
 10#ifndef __ASM_GENERIC_QSPINLOCK_H
 11#define __ASM_GENERIC_QSPINLOCK_H
 12
 13#include <asm-generic/qspinlock_types.h>
 
 14
 
 15/**
 16 * queued_spin_is_locked - is the spinlock locked?
 17 * @lock: Pointer to queued spinlock structure
 18 * Return: 1 if it is locked, 0 otherwise
 19 */
 20static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 21{
 22	/*
 23	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
 24	 * isn't immediately observable.
 25	 */
 26	return atomic_read(&lock->val);
 27}
 
 28
 29/**
 30 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 31 * @lock: queued spinlock structure
 32 * Return: 1 if it is unlocked, 0 otherwise
 33 *
 34 * N.B. Whenever there are tasks waiting for the lock, it is considered
 35 *      locked wrt the lockref code to avoid lock stealing by the lockref
 36 *      code and change things underneath the lock. This also allows some
 37 *      optimizations to be applied without conflict with lockref.
 38 */
 39static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
 40{
 41	return !atomic_read(&lock.val);
 42}
 43
 44/**
 45 * queued_spin_is_contended - check if the lock is contended
 46 * @lock : Pointer to queued spinlock structure
 47 * Return: 1 if lock contended, 0 otherwise
 48 */
 49static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 50{
 51	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
 52}
 53/**
 54 * queued_spin_trylock - try to acquire the queued spinlock
 55 * @lock : Pointer to queued spinlock structure
 56 * Return: 1 if lock acquired, 0 if failed
 57 */
 58static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 59{
 60	u32 val = atomic_read(&lock->val);
 61
 62	if (unlikely(val))
 63		return 0;
 64
 65	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 66}
 67
 68extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 69
 
 70/**
 71 * queued_spin_lock - acquire a queued spinlock
 72 * @lock: Pointer to queued spinlock structure
 73 */
 74static __always_inline void queued_spin_lock(struct qspinlock *lock)
 75{
 76	u32 val = 0;
 77
 78	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
 79		return;
 80
 81	queued_spin_lock_slowpath(lock, val);
 82}
 
 83
 84#ifndef queued_spin_unlock
 85/**
 86 * queued_spin_unlock - release a queued spinlock
 87 * @lock : Pointer to queued spinlock structure
 88 */
 89static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 90{
 91	/*
 92	 * unlock() needs release semantics:
 93	 */
 94	smp_store_release(&lock->locked, 0);
 95}
 96#endif
 97
 98#ifndef virt_spin_lock
 99static __always_inline bool virt_spin_lock(struct qspinlock *lock)
100{
101	return false;
102}
103#endif
104
105/*
106 * Remapping spinlock architecture specific functions to the corresponding
107 * queued spinlock functions.
108 */
109#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
110#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
111#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
112#define arch_spin_lock(l)		queued_spin_lock(l)
113#define arch_spin_trylock(l)		queued_spin_trylock(l)
114#define arch_spin_unlock(l)		queued_spin_unlock(l)
115
116#endif /* __ASM_GENERIC_QSPINLOCK_H */