Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Queued spinlock
  4 *
 
 
 
 
 
 
 
 
 
 
  5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  6 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  7 *
  8 * Authors: Waiman Long <waiman.long@hpe.com>
  9 */
 10#ifndef __ASM_GENERIC_QSPINLOCK_H
 11#define __ASM_GENERIC_QSPINLOCK_H
 12
 13#include <asm-generic/qspinlock_types.h>
 14
 15/**
 16 * queued_spin_is_locked - is the spinlock locked?
 17 * @lock: Pointer to queued spinlock structure
 18 * Return: 1 if it is locked, 0 otherwise
 19 */
 
 20static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 21{
 22	/*
 23	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
 24	 * isn't immediately observable.
 25	 */
 26	return atomic_read(&lock->val);
 27}
 
 28
 29/**
 30 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 31 * @lock: queued spinlock structure
 32 * Return: 1 if it is unlocked, 0 otherwise
 33 *
 34 * N.B. Whenever there are tasks waiting for the lock, it is considered
 35 *      locked wrt the lockref code to avoid lock stealing by the lockref
 36 *      code and change things underneath the lock. This also allows some
 37 *      optimizations to be applied without conflict with lockref.
 38 */
 39static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
 40{
 41	return !atomic_read(&lock.val);
 42}
 43
 44/**
 45 * queued_spin_is_contended - check if the lock is contended
 46 * @lock : Pointer to queued spinlock structure
 47 * Return: 1 if lock contended, 0 otherwise
 48 */
 49static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 50{
 51	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
 52}
 53/**
 54 * queued_spin_trylock - try to acquire the queued spinlock
 55 * @lock : Pointer to queued spinlock structure
 56 * Return: 1 if lock acquired, 0 if failed
 57 */
 58static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 59{
 60	u32 val = atomic_read(&lock->val);
 61
 62	if (unlikely(val))
 63		return 0;
 64
 65	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 66}
 67
 68extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 69
 70/**
 71 * queued_spin_lock - acquire a queued spinlock
 72 * @lock: Pointer to queued spinlock structure
 73 */
 74static __always_inline void queued_spin_lock(struct qspinlock *lock)
 75{
 76	u32 val = 0;
 77
 78	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
 
 79		return;
 80
 81	queued_spin_lock_slowpath(lock, val);
 82}
 83
 84#ifndef queued_spin_unlock
 85/**
 86 * queued_spin_unlock - release a queued spinlock
 87 * @lock : Pointer to queued spinlock structure
 88 */
 89static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 90{
 91	/*
 92	 * unlock() needs release semantics:
 93	 */
 94	smp_store_release(&lock->locked, 0);
 95}
 96#endif
 97
 98#ifndef virt_spin_lock
 99static __always_inline bool virt_spin_lock(struct qspinlock *lock)
100{
101	return false;
102}
103#endif
104
105/*
106 * Remapping spinlock architecture specific functions to the corresponding
107 * queued spinlock functions.
108 */
109#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
110#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
111#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
112#define arch_spin_lock(l)		queued_spin_lock(l)
113#define arch_spin_trylock(l)		queued_spin_trylock(l)
114#define arch_spin_unlock(l)		queued_spin_unlock(l)
115
116#endif /* __ASM_GENERIC_QSPINLOCK_H */
v4.17
 
  1/*
  2 * Queued spinlock
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 16 *
 17 * Authors: Waiman Long <waiman.long@hpe.com>
 18 */
 19#ifndef __ASM_GENERIC_QSPINLOCK_H
 20#define __ASM_GENERIC_QSPINLOCK_H
 21
 22#include <asm-generic/qspinlock_types.h>
 23
 24/**
 25 * queued_spin_is_locked - is the spinlock locked?
 26 * @lock: Pointer to queued spinlock structure
 27 * Return: 1 if it is locked, 0 otherwise
 28 */
 29#ifndef queued_spin_is_locked
 30static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 31{
 32	/*
 33	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
 34	 * isn't immediately observable.
 35	 */
 36	return atomic_read(&lock->val);
 37}
 38#endif
 39
 40/**
 41 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 42 * @lock: queued spinlock structure
 43 * Return: 1 if it is unlocked, 0 otherwise
 44 *
 45 * N.B. Whenever there are tasks waiting for the lock, it is considered
 46 *      locked wrt the lockref code to avoid lock stealing by the lockref
 47 *      code and change things underneath the lock. This also allows some
 48 *      optimizations to be applied without conflict with lockref.
 49 */
 50static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
 51{
 52	return !atomic_read(&lock.val);
 53}
 54
 55/**
 56 * queued_spin_is_contended - check if the lock is contended
 57 * @lock : Pointer to queued spinlock structure
 58 * Return: 1 if lock contended, 0 otherwise
 59 */
 60static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 61{
 62	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
 63}
 64/**
 65 * queued_spin_trylock - try to acquire the queued spinlock
 66 * @lock : Pointer to queued spinlock structure
 67 * Return: 1 if lock acquired, 0 if failed
 68 */
 69static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 70{
 71	if (!atomic_read(&lock->val) &&
 72	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
 73		return 1;
 74	return 0;
 
 
 75}
 76
 77extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 78
 79/**
 80 * queued_spin_lock - acquire a queued spinlock
 81 * @lock: Pointer to queued spinlock structure
 82 */
 83static __always_inline void queued_spin_lock(struct qspinlock *lock)
 84{
 85	u32 val;
 86
 87	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
 88	if (likely(val == 0))
 89		return;
 
 90	queued_spin_lock_slowpath(lock, val);
 91}
 92
 93#ifndef queued_spin_unlock
 94/**
 95 * queued_spin_unlock - release a queued spinlock
 96 * @lock : Pointer to queued spinlock structure
 97 */
 98static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 99{
100	/*
101	 * unlock() needs release semantics:
102	 */
103	(void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
104}
105#endif
106
107#ifndef virt_spin_lock
108static __always_inline bool virt_spin_lock(struct qspinlock *lock)
109{
110	return false;
111}
112#endif
113
114/*
115 * Remapping spinlock architecture specific functions to the corresponding
116 * queued spinlock functions.
117 */
118#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
119#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
120#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
121#define arch_spin_lock(l)		queued_spin_lock(l)
122#define arch_spin_trylock(l)		queued_spin_trylock(l)
123#define arch_spin_unlock(l)		queued_spin_unlock(l)
124
125#endif /* __ASM_GENERIC_QSPINLOCK_H */