Linux Audio

Check our new training course

Loading...
v3.15
  1#include <linux/export.h>
  2#include <linux/lockref.h>
  3#include <linux/mutex.h>
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
  8 * Allow weakly-ordered memory architectures to provide barrier-less
  9 * cmpxchg semantics for lockref updates.
 10 */
 11#ifndef cmpxchg64_relaxed
 12# define cmpxchg64_relaxed cmpxchg64
 13#endif
 14
 15/*
 16 * Note that the "cmpxchg()" reloads the "old" value for the
 17 * failure case.
 18 */
 19#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 20	struct lockref old;							\
 21	BUILD_BUG_ON(sizeof(old) != 8);						\
 22	old.lock_count = ACCESS_ONCE(lockref->lock_count);			\
 23	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 24		struct lockref new = old, prev = old;				\
 25		CODE								\
 26		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 27						   old.lock_count,		\
 28						   new.lock_count);		\
 29		if (likely(old.lock_count == prev.lock_count)) {		\
 30			SUCCESS;						\
 31		}								\
 32		arch_mutex_cpu_relax();						\
 33	}									\
 34} while (0)
 35
 36#else
 37
 38#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 39
 40#endif
 41
 42/**
 43 * lockref_get - Increments reference count unconditionally
 44 * @lockref: pointer to lockref structure
 45 *
 46 * This operation is only valid if you already hold a reference
 47 * to the object, so you know the count cannot be zero.
 48 */
 49void lockref_get(struct lockref *lockref)
 50{
 51	CMPXCHG_LOOP(
 52		new.count++;
 53	,
 54		return;
 55	);
 56
 57	spin_lock(&lockref->lock);
 58	lockref->count++;
 59	spin_unlock(&lockref->lock);
 60}
 61EXPORT_SYMBOL(lockref_get);
 62
 63/**
 64 * lockref_get_not_zero - Increments count unless the count is 0
 65 * @lockref: pointer to lockref structure
 66 * Return: 1 if count updated successfully or 0 if count was zero
 67 */
 68int lockref_get_not_zero(struct lockref *lockref)
 69{
 70	int retval;
 71
 72	CMPXCHG_LOOP(
 73		new.count++;
 74		if (!old.count)
 75			return 0;
 76	,
 77		return 1;
 78	);
 79
 80	spin_lock(&lockref->lock);
 81	retval = 0;
 82	if (lockref->count) {
 83		lockref->count++;
 84		retval = 1;
 85	}
 86	spin_unlock(&lockref->lock);
 87	return retval;
 88}
 89EXPORT_SYMBOL(lockref_get_not_zero);
 90
 91/**
 92 * lockref_get_or_lock - Increments count unless the count is 0
 93 * @lockref: pointer to lockref structure
 94 * Return: 1 if count updated successfully or 0 if count was zero
 95 * and we got the lock instead.
 96 */
 97int lockref_get_or_lock(struct lockref *lockref)
 98{
 99	CMPXCHG_LOOP(
100		new.count++;
101		if (!old.count)
102			break;
103	,
104		return 1;
105	);
106
107	spin_lock(&lockref->lock);
108	if (!lockref->count)
109		return 0;
110	lockref->count++;
111	spin_unlock(&lockref->lock);
112	return 1;
113}
114EXPORT_SYMBOL(lockref_get_or_lock);
115
116/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118 * @lockref: pointer to lockref structure
119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
120 */
121int lockref_put_or_lock(struct lockref *lockref)
122{
123	CMPXCHG_LOOP(
124		new.count--;
125		if (old.count <= 1)
126			break;
127	,
128		return 1;
129	);
130
131	spin_lock(&lockref->lock);
132	if (lockref->count <= 1)
133		return 0;
134	lockref->count--;
135	spin_unlock(&lockref->lock);
136	return 1;
137}
138EXPORT_SYMBOL(lockref_put_or_lock);
139
140/**
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
143 */
144void lockref_mark_dead(struct lockref *lockref)
145{
146	assert_spin_locked(&lockref->lock);
147	lockref->count = -128;
148}
149EXPORT_SYMBOL(lockref_mark_dead);
150
151/**
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
155 */
156int lockref_get_not_dead(struct lockref *lockref)
157{
158	int retval;
159
160	CMPXCHG_LOOP(
161		new.count++;
162		if ((int)old.count < 0)
163			return 0;
164	,
165		return 1;
166	);
167
168	spin_lock(&lockref->lock);
169	retval = 0;
170	if ((int) lockref->count >= 0) {
171		lockref->count++;
172		retval = 1;
173	}
174	spin_unlock(&lockref->lock);
175	return retval;
176}
177EXPORT_SYMBOL(lockref_get_not_dead);
v4.10.11
  1#include <linux/export.h>
  2#include <linux/lockref.h>
 
  3
  4#if USE_CMPXCHG_LOCKREF
  5
  6/*
 
 
 
 
 
 
 
 
  7 * Note that the "cmpxchg()" reloads the "old" value for the
  8 * failure case.
  9 */
 10#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 11	struct lockref old;							\
 12	BUILD_BUG_ON(sizeof(old) != 8);						\
 13	old.lock_count = READ_ONCE(lockref->lock_count);			\
 14	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 15		struct lockref new = old, prev = old;				\
 16		CODE								\
 17		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 18						   old.lock_count,		\
 19						   new.lock_count);		\
 20		if (likely(old.lock_count == prev.lock_count)) {		\
 21			SUCCESS;						\
 22		}								\
 23		cpu_relax();							\
 24	}									\
 25} while (0)
 26
 27#else
 28
 29#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 30
 31#endif
 32
 33/**
 34 * lockref_get - Increments reference count unconditionally
 35 * @lockref: pointer to lockref structure
 36 *
 37 * This operation is only valid if you already hold a reference
 38 * to the object, so you know the count cannot be zero.
 39 */
 40void lockref_get(struct lockref *lockref)
 41{
 42	CMPXCHG_LOOP(
 43		new.count++;
 44	,
 45		return;
 46	);
 47
 48	spin_lock(&lockref->lock);
 49	lockref->count++;
 50	spin_unlock(&lockref->lock);
 51}
 52EXPORT_SYMBOL(lockref_get);
 53
 54/**
 55 * lockref_get_not_zero - Increments count unless the count is 0 or dead
 56 * @lockref: pointer to lockref structure
 57 * Return: 1 if count updated successfully or 0 if count was zero
 58 */
 59int lockref_get_not_zero(struct lockref *lockref)
 60{
 61	int retval;
 62
 63	CMPXCHG_LOOP(
 64		new.count++;
 65		if (old.count <= 0)
 66			return 0;
 67	,
 68		return 1;
 69	);
 70
 71	spin_lock(&lockref->lock);
 72	retval = 0;
 73	if (lockref->count > 0) {
 74		lockref->count++;
 75		retval = 1;
 76	}
 77	spin_unlock(&lockref->lock);
 78	return retval;
 79}
 80EXPORT_SYMBOL(lockref_get_not_zero);
 81
 82/**
 83 * lockref_get_or_lock - Increments count unless the count is 0 or dead
 84 * @lockref: pointer to lockref structure
 85 * Return: 1 if count updated successfully or 0 if count was zero
 86 * and we got the lock instead.
 87 */
 88int lockref_get_or_lock(struct lockref *lockref)
 89{
 90	CMPXCHG_LOOP(
 91		new.count++;
 92		if (old.count <= 0)
 93			break;
 94	,
 95		return 1;
 96	);
 97
 98	spin_lock(&lockref->lock);
 99	if (lockref->count <= 0)
100		return 0;
101	lockref->count++;
102	spin_unlock(&lockref->lock);
103	return 1;
104}
105EXPORT_SYMBOL(lockref_get_or_lock);
106
107/**
108 * lockref_put_return - Decrement reference count if possible
109 * @lockref: pointer to lockref structure
110 *
111 * Decrement the reference count and return the new value.
112 * If the lockref was dead or locked, return an error.
113 */
114int lockref_put_return(struct lockref *lockref)
115{
116	CMPXCHG_LOOP(
117		new.count--;
118		if (old.count <= 0)
119			return -1;
120	,
121		return new.count;
122	);
123	return -1;
124}
125EXPORT_SYMBOL(lockref_put_return);
126
127/**
128 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
129 * @lockref: pointer to lockref structure
130 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
131 */
132int lockref_put_or_lock(struct lockref *lockref)
133{
134	CMPXCHG_LOOP(
135		new.count--;
136		if (old.count <= 1)
137			break;
138	,
139		return 1;
140	);
141
142	spin_lock(&lockref->lock);
143	if (lockref->count <= 1)
144		return 0;
145	lockref->count--;
146	spin_unlock(&lockref->lock);
147	return 1;
148}
149EXPORT_SYMBOL(lockref_put_or_lock);
150
151/**
152 * lockref_mark_dead - mark lockref dead
153 * @lockref: pointer to lockref structure
154 */
155void lockref_mark_dead(struct lockref *lockref)
156{
157	assert_spin_locked(&lockref->lock);
158	lockref->count = -128;
159}
160EXPORT_SYMBOL(lockref_mark_dead);
161
162/**
163 * lockref_get_not_dead - Increments count unless the ref is dead
164 * @lockref: pointer to lockref structure
165 * Return: 1 if count updated successfully or 0 if lockref was dead
166 */
167int lockref_get_not_dead(struct lockref *lockref)
168{
169	int retval;
170
171	CMPXCHG_LOOP(
172		new.count++;
173		if (old.count < 0)
174			return 0;
175	,
176		return 1;
177	);
178
179	spin_lock(&lockref->lock);
180	retval = 0;
181	if (lockref->count >= 0) {
182		lockref->count++;
183		retval = 1;
184	}
185	spin_unlock(&lockref->lock);
186	return retval;
187}
188EXPORT_SYMBOL(lockref_get_not_dead);