Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v3.15
 
  1#include <linux/export.h>
  2#include <linux/lockref.h>
  3#include <linux/mutex.h>
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
  8 * Allow weakly-ordered memory architectures to provide barrier-less
  9 * cmpxchg semantics for lockref updates.
 10 */
 11#ifndef cmpxchg64_relaxed
 12# define cmpxchg64_relaxed cmpxchg64
 13#endif
 14
 15/*
 16 * Note that the "cmpxchg()" reloads the "old" value for the
 17 * failure case.
 18 */
 19#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 20	struct lockref old;							\
 21	BUILD_BUG_ON(sizeof(old) != 8);						\
 22	old.lock_count = ACCESS_ONCE(lockref->lock_count);			\
 23	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 24		struct lockref new = old, prev = old;				\
 25		CODE								\
 26		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 27						   old.lock_count,		\
 28						   new.lock_count);		\
 29		if (likely(old.lock_count == prev.lock_count)) {		\
 30			SUCCESS;						\
 31		}								\
 32		arch_mutex_cpu_relax();						\
 33	}									\
 34} while (0)
 35
 36#else
 37
 38#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 39
 40#endif
 41
 42/**
 43 * lockref_get - Increments reference count unconditionally
 44 * @lockref: pointer to lockref structure
 45 *
 46 * This operation is only valid if you already hold a reference
 47 * to the object, so you know the count cannot be zero.
 48 */
 49void lockref_get(struct lockref *lockref)
 50{
 51	CMPXCHG_LOOP(
 52		new.count++;
 53	,
 54		return;
 55	);
 56
 57	spin_lock(&lockref->lock);
 58	lockref->count++;
 59	spin_unlock(&lockref->lock);
 60}
 61EXPORT_SYMBOL(lockref_get);
 62
 63/**
 64 * lockref_get_not_zero - Increments count unless the count is 0
 65 * @lockref: pointer to lockref structure
 66 * Return: 1 if count updated successfully or 0 if count was zero
 67 */
 68int lockref_get_not_zero(struct lockref *lockref)
 69{
 70	int retval;
 71
 72	CMPXCHG_LOOP(
 73		new.count++;
 74		if (!old.count)
 75			return 0;
 76	,
 77		return 1;
 78	);
 79
 80	spin_lock(&lockref->lock);
 81	retval = 0;
 82	if (lockref->count) {
 83		lockref->count++;
 84		retval = 1;
 85	}
 86	spin_unlock(&lockref->lock);
 87	return retval;
 88}
 89EXPORT_SYMBOL(lockref_get_not_zero);
 90
 91/**
 92 * lockref_get_or_lock - Increments count unless the count is 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93 * @lockref: pointer to lockref structure
 94 * Return: 1 if count updated successfully or 0 if count was zero
 95 * and we got the lock instead.
 96 */
 97int lockref_get_or_lock(struct lockref *lockref)
 98{
 99	CMPXCHG_LOOP(
100		new.count++;
101		if (!old.count)
102			break;
103	,
104		return 1;
105	);
106
107	spin_lock(&lockref->lock);
108	if (!lockref->count)
109		return 0;
110	lockref->count++;
111	spin_unlock(&lockref->lock);
112	return 1;
113}
114EXPORT_SYMBOL(lockref_get_or_lock);
115
116/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118 * @lockref: pointer to lockref structure
119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
120 */
121int lockref_put_or_lock(struct lockref *lockref)
122{
123	CMPXCHG_LOOP(
124		new.count--;
125		if (old.count <= 1)
126			break;
127	,
128		return 1;
129	);
130
131	spin_lock(&lockref->lock);
132	if (lockref->count <= 1)
133		return 0;
134	lockref->count--;
135	spin_unlock(&lockref->lock);
136	return 1;
137}
138EXPORT_SYMBOL(lockref_put_or_lock);
139
140/**
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
143 */
144void lockref_mark_dead(struct lockref *lockref)
145{
146	assert_spin_locked(&lockref->lock);
147	lockref->count = -128;
148}
149EXPORT_SYMBOL(lockref_mark_dead);
150
151/**
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
155 */
156int lockref_get_not_dead(struct lockref *lockref)
157{
158	int retval;
159
160	CMPXCHG_LOOP(
161		new.count++;
162		if ((int)old.count < 0)
163			return 0;
164	,
165		return 1;
166	);
167
168	spin_lock(&lockref->lock);
169	retval = 0;
170	if ((int) lockref->count >= 0) {
171		lockref->count++;
172		retval = 1;
173	}
174	spin_unlock(&lockref->lock);
175	return retval;
176}
177EXPORT_SYMBOL(lockref_get_not_dead);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/export.h>
  3#include <linux/lockref.h>
 
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
 
 
 
 
 
 
 
 
  8 * Note that the "cmpxchg()" reloads the "old" value for the
  9 * failure case.
 10 */
 11#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 12	struct lockref old;							\
 13	BUILD_BUG_ON(sizeof(old) != 8);						\
 14	old.lock_count = READ_ONCE(lockref->lock_count);			\
 15	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 16		struct lockref new = old, prev = old;				\
 17		CODE								\
 18		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 19						   old.lock_count,		\
 20						   new.lock_count);		\
 21		if (likely(old.lock_count == prev.lock_count)) {		\
 22			SUCCESS;						\
 23		}								\
 24		cpu_relax();							\
 25	}									\
 26} while (0)
 27
 28#else
 29
 30#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 31
 32#endif
 33
 34/**
 35 * lockref_get - Increments reference count unconditionally
 36 * @lockref: pointer to lockref structure
 37 *
 38 * This operation is only valid if you already hold a reference
 39 * to the object, so you know the count cannot be zero.
 40 */
 41void lockref_get(struct lockref *lockref)
 42{
 43	CMPXCHG_LOOP(
 44		new.count++;
 45	,
 46		return;
 47	);
 48
 49	spin_lock(&lockref->lock);
 50	lockref->count++;
 51	spin_unlock(&lockref->lock);
 52}
 53EXPORT_SYMBOL(lockref_get);
 54
 55/**
 56 * lockref_get_not_zero - Increments count unless the count is 0 or dead
 57 * @lockref: pointer to lockref structure
 58 * Return: 1 if count updated successfully or 0 if count was zero
 59 */
 60int lockref_get_not_zero(struct lockref *lockref)
 61{
 62	int retval;
 63
 64	CMPXCHG_LOOP(
 65		new.count++;
 66		if (old.count <= 0)
 67			return 0;
 68	,
 69		return 1;
 70	);
 71
 72	spin_lock(&lockref->lock);
 73	retval = 0;
 74	if (lockref->count > 0) {
 75		lockref->count++;
 76		retval = 1;
 77	}
 78	spin_unlock(&lockref->lock);
 79	return retval;
 80}
 81EXPORT_SYMBOL(lockref_get_not_zero);
 82
 83/**
 84 * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
 85 * @lockref: pointer to lockref structure
 86 * Return: 1 if count updated successfully or 0 if count would become zero
 87 */
 88int lockref_put_not_zero(struct lockref *lockref)
 89{
 90	int retval;
 91
 92	CMPXCHG_LOOP(
 93		new.count--;
 94		if (old.count <= 1)
 95			return 0;
 96	,
 97		return 1;
 98	);
 99
100	spin_lock(&lockref->lock);
101	retval = 0;
102	if (lockref->count > 1) {
103		lockref->count--;
104		retval = 1;
105	}
106	spin_unlock(&lockref->lock);
107	return retval;
108}
109EXPORT_SYMBOL(lockref_put_not_zero);
110
111/**
112 * lockref_get_or_lock - Increments count unless the count is 0 or dead
113 * @lockref: pointer to lockref structure
114 * Return: 1 if count updated successfully or 0 if count was zero
115 * and we got the lock instead.
116 */
117int lockref_get_or_lock(struct lockref *lockref)
118{
119	CMPXCHG_LOOP(
120		new.count++;
121		if (old.count <= 0)
122			break;
123	,
124		return 1;
125	);
126
127	spin_lock(&lockref->lock);
128	if (lockref->count <= 0)
129		return 0;
130	lockref->count++;
131	spin_unlock(&lockref->lock);
132	return 1;
133}
134EXPORT_SYMBOL(lockref_get_or_lock);
135
136/**
137 * lockref_put_return - Decrement reference count if possible
138 * @lockref: pointer to lockref structure
139 *
140 * Decrement the reference count and return the new value.
141 * If the lockref was dead or locked, return an error.
142 */
143int lockref_put_return(struct lockref *lockref)
144{
145	CMPXCHG_LOOP(
146		new.count--;
147		if (old.count <= 0)
148			return -1;
149	,
150		return new.count;
151	);
152	return -1;
153}
154EXPORT_SYMBOL(lockref_put_return);
155
156/**
157 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
158 * @lockref: pointer to lockref structure
159 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
160 */
161int lockref_put_or_lock(struct lockref *lockref)
162{
163	CMPXCHG_LOOP(
164		new.count--;
165		if (old.count <= 1)
166			break;
167	,
168		return 1;
169	);
170
171	spin_lock(&lockref->lock);
172	if (lockref->count <= 1)
173		return 0;
174	lockref->count--;
175	spin_unlock(&lockref->lock);
176	return 1;
177}
178EXPORT_SYMBOL(lockref_put_or_lock);
179
180/**
181 * lockref_mark_dead - mark lockref dead
182 * @lockref: pointer to lockref structure
183 */
184void lockref_mark_dead(struct lockref *lockref)
185{
186	assert_spin_locked(&lockref->lock);
187	lockref->count = -128;
188}
189EXPORT_SYMBOL(lockref_mark_dead);
190
191/**
192 * lockref_get_not_dead - Increments count unless the ref is dead
193 * @lockref: pointer to lockref structure
194 * Return: 1 if count updated successfully or 0 if lockref was dead
195 */
196int lockref_get_not_dead(struct lockref *lockref)
197{
198	int retval;
199
200	CMPXCHG_LOOP(
201		new.count++;
202		if (old.count < 0)
203			return 0;
204	,
205		return 1;
206	);
207
208	spin_lock(&lockref->lock);
209	retval = 0;
210	if (lockref->count >= 0) {
211		lockref->count++;
212		retval = 1;
213	}
214	spin_unlock(&lockref->lock);
215	return retval;
216}
217EXPORT_SYMBOL(lockref_get_not_dead);