Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/export.h>
  2#include <linux/lockref.h>
  3#include <linux/mutex.h>
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
  8 * Allow weakly-ordered memory architectures to provide barrier-less
  9 * cmpxchg semantics for lockref updates.
 10 */
 11#ifndef cmpxchg64_relaxed
 12# define cmpxchg64_relaxed cmpxchg64
 13#endif
 14
 15/*
 16 * Note that the "cmpxchg()" reloads the "old" value for the
 17 * failure case.
 18 */
 19#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 
 20	struct lockref old;							\
 21	BUILD_BUG_ON(sizeof(old) != 8);						\
 22	old.lock_count = ACCESS_ONCE(lockref->lock_count);			\
 23	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 24		struct lockref new = old, prev = old;				\
 25		CODE								\
 26		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 27						   old.lock_count,		\
 28						   new.lock_count);		\
 29		if (likely(old.lock_count == prev.lock_count)) {		\
 30			SUCCESS;						\
 31		}								\
 32		arch_mutex_cpu_relax();						\
 
 
 33	}									\
 34} while (0)
 35
 36#else
 37
 38#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 39
 40#endif
 41
 42/**
 43 * lockref_get - Increments reference count unconditionally
 44 * @lockref: pointer to lockref structure
 45 *
 46 * This operation is only valid if you already hold a reference
 47 * to the object, so you know the count cannot be zero.
 48 */
 49void lockref_get(struct lockref *lockref)
 50{
 51	CMPXCHG_LOOP(
 52		new.count++;
 53	,
 54		return;
 55	);
 56
 57	spin_lock(&lockref->lock);
 58	lockref->count++;
 59	spin_unlock(&lockref->lock);
 60}
 61EXPORT_SYMBOL(lockref_get);
 62
 63/**
 64 * lockref_get_not_zero - Increments count unless the count is 0
 65 * @lockref: pointer to lockref structure
 66 * Return: 1 if count updated successfully or 0 if count was zero
 67 */
 68int lockref_get_not_zero(struct lockref *lockref)
 69{
 70	int retval;
 71
 72	CMPXCHG_LOOP(
 73		new.count++;
 74		if (!old.count)
 75			return 0;
 76	,
 77		return 1;
 78	);
 79
 80	spin_lock(&lockref->lock);
 81	retval = 0;
 82	if (lockref->count) {
 83		lockref->count++;
 84		retval = 1;
 85	}
 86	spin_unlock(&lockref->lock);
 87	return retval;
 88}
 89EXPORT_SYMBOL(lockref_get_not_zero);
 90
 91/**
 92 * lockref_get_or_lock - Increments count unless the count is 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93 * @lockref: pointer to lockref structure
 94 * Return: 1 if count updated successfully or 0 if count was zero
 95 * and we got the lock instead.
 96 */
 97int lockref_get_or_lock(struct lockref *lockref)
 98{
 99	CMPXCHG_LOOP(
100		new.count++;
101		if (!old.count)
102			break;
103	,
104		return 1;
105	);
106
107	spin_lock(&lockref->lock);
108	if (!lockref->count)
109		return 0;
110	lockref->count++;
111	spin_unlock(&lockref->lock);
112	return 1;
113}
114EXPORT_SYMBOL(lockref_get_or_lock);
115
116/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118 * @lockref: pointer to lockref structure
119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
120 */
121int lockref_put_or_lock(struct lockref *lockref)
122{
123	CMPXCHG_LOOP(
124		new.count--;
125		if (old.count <= 1)
126			break;
127	,
128		return 1;
129	);
130
131	spin_lock(&lockref->lock);
132	if (lockref->count <= 1)
133		return 0;
134	lockref->count--;
135	spin_unlock(&lockref->lock);
136	return 1;
137}
138EXPORT_SYMBOL(lockref_put_or_lock);
139
140/**
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
143 */
144void lockref_mark_dead(struct lockref *lockref)
145{
146	assert_spin_locked(&lockref->lock);
147	lockref->count = -128;
148}
149EXPORT_SYMBOL(lockref_mark_dead);
150
151/**
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
155 */
156int lockref_get_not_dead(struct lockref *lockref)
157{
158	int retval;
159
160	CMPXCHG_LOOP(
161		new.count++;
162		if ((int)old.count < 0)
163			return 0;
164	,
165		return 1;
166	);
167
168	spin_lock(&lockref->lock);
169	retval = 0;
170	if ((int) lockref->count >= 0) {
171		lockref->count++;
172		retval = 1;
173	}
174	spin_unlock(&lockref->lock);
175	return retval;
176}
177EXPORT_SYMBOL(lockref_get_not_dead);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/export.h>
  3#include <linux/lockref.h>
 
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
 
 
 
 
 
 
 
 
  8 * Note that the "cmpxchg()" reloads the "old" value for the
  9 * failure case.
 10 */
 11#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 12	int retry = 100;							\
 13	struct lockref old;							\
 14	BUILD_BUG_ON(sizeof(old) != 8);						\
 15	old.lock_count = READ_ONCE(lockref->lock_count);			\
 16	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 17		struct lockref new = old, prev = old;				\
 18		CODE								\
 19		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 20						   old.lock_count,		\
 21						   new.lock_count);		\
 22		if (likely(old.lock_count == prev.lock_count)) {		\
 23			SUCCESS;						\
 24		}								\
 25		if (!--retry)							\
 26			break;							\
 27		cpu_relax();							\
 28	}									\
 29} while (0)
 30
 31#else
 32
 33#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 34
 35#endif
 36
 37/**
 38 * lockref_get - Increments reference count unconditionally
 39 * @lockref: pointer to lockref structure
 40 *
 41 * This operation is only valid if you already hold a reference
 42 * to the object, so you know the count cannot be zero.
 43 */
 44void lockref_get(struct lockref *lockref)
 45{
 46	CMPXCHG_LOOP(
 47		new.count++;
 48	,
 49		return;
 50	);
 51
 52	spin_lock(&lockref->lock);
 53	lockref->count++;
 54	spin_unlock(&lockref->lock);
 55}
 56EXPORT_SYMBOL(lockref_get);
 57
 58/**
 59 * lockref_get_not_zero - Increments count unless the count is 0 or dead
 60 * @lockref: pointer to lockref structure
 61 * Return: 1 if count updated successfully or 0 if count was zero
 62 */
 63int lockref_get_not_zero(struct lockref *lockref)
 64{
 65	int retval;
 66
 67	CMPXCHG_LOOP(
 68		new.count++;
 69		if (old.count <= 0)
 70			return 0;
 71	,
 72		return 1;
 73	);
 74
 75	spin_lock(&lockref->lock);
 76	retval = 0;
 77	if (lockref->count > 0) {
 78		lockref->count++;
 79		retval = 1;
 80	}
 81	spin_unlock(&lockref->lock);
 82	return retval;
 83}
 84EXPORT_SYMBOL(lockref_get_not_zero);
 85
 86/**
 87 * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
 88 * @lockref: pointer to lockref structure
 89 * Return: 1 if count updated successfully or 0 if count would become zero
 90 */
 91int lockref_put_not_zero(struct lockref *lockref)
 92{
 93	int retval;
 94
 95	CMPXCHG_LOOP(
 96		new.count--;
 97		if (old.count <= 1)
 98			return 0;
 99	,
100		return 1;
101	);
102
103	spin_lock(&lockref->lock);
104	retval = 0;
105	if (lockref->count > 1) {
106		lockref->count--;
107		retval = 1;
108	}
109	spin_unlock(&lockref->lock);
110	return retval;
111}
112EXPORT_SYMBOL(lockref_put_not_zero);
113
114/**
115 * lockref_get_or_lock - Increments count unless the count is 0 or dead
116 * @lockref: pointer to lockref structure
117 * Return: 1 if count updated successfully or 0 if count was zero
118 * and we got the lock instead.
119 */
120int lockref_get_or_lock(struct lockref *lockref)
121{
122	CMPXCHG_LOOP(
123		new.count++;
124		if (old.count <= 0)
125			break;
126	,
127		return 1;
128	);
129
130	spin_lock(&lockref->lock);
131	if (lockref->count <= 0)
132		return 0;
133	lockref->count++;
134	spin_unlock(&lockref->lock);
135	return 1;
136}
137EXPORT_SYMBOL(lockref_get_or_lock);
138
139/**
140 * lockref_put_return - Decrement reference count if possible
141 * @lockref: pointer to lockref structure
142 *
143 * Decrement the reference count and return the new value.
144 * If the lockref was dead or locked, return an error.
145 */
146int lockref_put_return(struct lockref *lockref)
147{
148	CMPXCHG_LOOP(
149		new.count--;
150		if (old.count <= 0)
151			return -1;
152	,
153		return new.count;
154	);
155	return -1;
156}
157EXPORT_SYMBOL(lockref_put_return);
158
159/**
160 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
161 * @lockref: pointer to lockref structure
162 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
163 */
164int lockref_put_or_lock(struct lockref *lockref)
165{
166	CMPXCHG_LOOP(
167		new.count--;
168		if (old.count <= 1)
169			break;
170	,
171		return 1;
172	);
173
174	spin_lock(&lockref->lock);
175	if (lockref->count <= 1)
176		return 0;
177	lockref->count--;
178	spin_unlock(&lockref->lock);
179	return 1;
180}
181EXPORT_SYMBOL(lockref_put_or_lock);
182
183/**
184 * lockref_mark_dead - mark lockref dead
185 * @lockref: pointer to lockref structure
186 */
187void lockref_mark_dead(struct lockref *lockref)
188{
189	assert_spin_locked(&lockref->lock);
190	lockref->count = -128;
191}
192EXPORT_SYMBOL(lockref_mark_dead);
193
194/**
195 * lockref_get_not_dead - Increments count unless the ref is dead
196 * @lockref: pointer to lockref structure
197 * Return: 1 if count updated successfully or 0 if lockref was dead
198 */
199int lockref_get_not_dead(struct lockref *lockref)
200{
201	int retval;
202
203	CMPXCHG_LOOP(
204		new.count++;
205		if (old.count < 0)
206			return 0;
207	,
208		return 1;
209	);
210
211	spin_lock(&lockref->lock);
212	retval = 0;
213	if (lockref->count >= 0) {
214		lockref->count++;
215		retval = 1;
216	}
217	spin_unlock(&lockref->lock);
218	return retval;
219}
220EXPORT_SYMBOL(lockref_get_not_dead);