Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/export.h>
  2#include <linux/lockref.h>
  3#include <linux/mutex.h>
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
  8 * Allow weakly-ordered memory architectures to provide barrier-less
  9 * cmpxchg semantics for lockref updates.
 10 */
 11#ifndef cmpxchg64_relaxed
 12# define cmpxchg64_relaxed cmpxchg64
 13#endif
 14
 15/*
 16 * Note that the "cmpxchg()" reloads the "old" value for the
 17 * failure case.
 18 */
 19#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 
 20	struct lockref old;							\
 21	BUILD_BUG_ON(sizeof(old) != 8);						\
 22	old.lock_count = ACCESS_ONCE(lockref->lock_count);			\
 23	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 24		struct lockref new = old, prev = old;				\
 25		CODE								\
 26		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
 27						   old.lock_count,		\
 28						   new.lock_count);		\
 29		if (likely(old.lock_count == prev.lock_count)) {		\
 30			SUCCESS;						\
 31		}								\
 32		arch_mutex_cpu_relax();						\
 
 33	}									\
 34} while (0)
 35
 36#else
 37
 38#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 39
 40#endif
 41
 42/**
 43 * lockref_get - Increments reference count unconditionally
 44 * @lockref: pointer to lockref structure
 45 *
 46 * This operation is only valid if you already hold a reference
 47 * to the object, so you know the count cannot be zero.
 48 */
 49void lockref_get(struct lockref *lockref)
 50{
 51	CMPXCHG_LOOP(
 52		new.count++;
 53	,
 54		return;
 55	);
 56
 57	spin_lock(&lockref->lock);
 58	lockref->count++;
 59	spin_unlock(&lockref->lock);
 60}
 61EXPORT_SYMBOL(lockref_get);
 62
 63/**
 64 * lockref_get_not_zero - Increments count unless the count is 0
 65 * @lockref: pointer to lockref structure
 66 * Return: 1 if count updated successfully or 0 if count was zero
 67 */
 68int lockref_get_not_zero(struct lockref *lockref)
 69{
 70	int retval;
 71
 72	CMPXCHG_LOOP(
 73		new.count++;
 74		if (!old.count)
 75			return 0;
 76	,
 77		return 1;
 78	);
 79
 80	spin_lock(&lockref->lock);
 81	retval = 0;
 82	if (lockref->count) {
 83		lockref->count++;
 84		retval = 1;
 85	}
 86	spin_unlock(&lockref->lock);
 87	return retval;
 88}
 89EXPORT_SYMBOL(lockref_get_not_zero);
 90
 91/**
 92 * lockref_get_or_lock - Increments count unless the count is 0
 93 * @lockref: pointer to lockref structure
 94 * Return: 1 if count updated successfully or 0 if count was zero
 95 * and we got the lock instead.
 96 */
 97int lockref_get_or_lock(struct lockref *lockref)
 98{
 
 
 99	CMPXCHG_LOOP(
100		new.count++;
101		if (!old.count)
102			break;
103	,
104		return 1;
105	);
106
107	spin_lock(&lockref->lock);
108	if (!lockref->count)
109		return 0;
110	lockref->count++;
 
 
111	spin_unlock(&lockref->lock);
112	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113}
114EXPORT_SYMBOL(lockref_get_or_lock);
115
116/**
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118 * @lockref: pointer to lockref structure
119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
120 */
121int lockref_put_or_lock(struct lockref *lockref)
122{
123	CMPXCHG_LOOP(
124		new.count--;
125		if (old.count <= 1)
126			break;
127	,
128		return 1;
129	);
130
131	spin_lock(&lockref->lock);
132	if (lockref->count <= 1)
133		return 0;
134	lockref->count--;
135	spin_unlock(&lockref->lock);
136	return 1;
137}
138EXPORT_SYMBOL(lockref_put_or_lock);
139
140/**
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
143 */
144void lockref_mark_dead(struct lockref *lockref)
145{
146	assert_spin_locked(&lockref->lock);
147	lockref->count = -128;
148}
149EXPORT_SYMBOL(lockref_mark_dead);
150
151/**
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
155 */
156int lockref_get_not_dead(struct lockref *lockref)
157{
158	int retval;
159
160	CMPXCHG_LOOP(
161		new.count++;
162		if ((int)old.count < 0)
163			return 0;
164	,
165		return 1;
166	);
167
168	spin_lock(&lockref->lock);
169	retval = 0;
170	if ((int) lockref->count >= 0) {
171		lockref->count++;
172		retval = 1;
173	}
174	spin_unlock(&lockref->lock);
175	return retval;
176}
177EXPORT_SYMBOL(lockref_get_not_dead);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/export.h>
  3#include <linux/lockref.h>
 
  4
  5#if USE_CMPXCHG_LOCKREF
  6
  7/*
 
 
 
 
 
 
 
 
  8 * Note that the "cmpxchg()" reloads the "old" value for the
  9 * failure case.
 10 */
 11#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
 12	int retry = 100;							\
 13	struct lockref old;							\
 14	BUILD_BUG_ON(sizeof(old) != 8);						\
 15	old.lock_count = READ_ONCE(lockref->lock_count);			\
 16	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 17		struct lockref new = old;					\
 18		CODE								\
 19		if (likely(try_cmpxchg64_relaxed(&lockref->lock_count,		\
 20						 &old.lock_count,		\
 21						 new.lock_count))) {		\
 
 22			SUCCESS;						\
 23		}								\
 24		if (!--retry)							\
 25			break;							\
 26	}									\
 27} while (0)
 28
 29#else
 30
 31#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
 32
 33#endif
 34
 35/**
 36 * lockref_get - Increments reference count unconditionally
 37 * @lockref: pointer to lockref structure
 38 *
 39 * This operation is only valid if you already hold a reference
 40 * to the object, so you know the count cannot be zero.
 41 */
 42void lockref_get(struct lockref *lockref)
 43{
 44	CMPXCHG_LOOP(
 45		new.count++;
 46	,
 47		return;
 48	);
 49
 50	spin_lock(&lockref->lock);
 51	lockref->count++;
 52	spin_unlock(&lockref->lock);
 53}
 54EXPORT_SYMBOL(lockref_get);
 55
 56/**
 57 * lockref_get_not_zero - Increments count unless the count is 0 or dead
 58 * @lockref: pointer to lockref structure
 59 * Return: 1 if count updated successfully or 0 if count was zero
 60 */
 61int lockref_get_not_zero(struct lockref *lockref)
 62{
 63	int retval;
 64
 65	CMPXCHG_LOOP(
 66		new.count++;
 67		if (old.count <= 0)
 68			return 0;
 69	,
 70		return 1;
 71	);
 72
 73	spin_lock(&lockref->lock);
 74	retval = 0;
 75	if (lockref->count > 0) {
 76		lockref->count++;
 77		retval = 1;
 78	}
 79	spin_unlock(&lockref->lock);
 80	return retval;
 81}
 82EXPORT_SYMBOL(lockref_get_not_zero);
 83
 84/**
 85 * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
 86 * @lockref: pointer to lockref structure
 87 * Return: 1 if count updated successfully or 0 if count would become zero
 
 88 */
 89int lockref_put_not_zero(struct lockref *lockref)
 90{
 91	int retval;
 92
 93	CMPXCHG_LOOP(
 94		new.count--;
 95		if (old.count <= 1)
 96			return 0;
 97	,
 98		return 1;
 99	);
100
101	spin_lock(&lockref->lock);
102	retval = 0;
103	if (lockref->count > 1) {
104		lockref->count--;
105		retval = 1;
106	}
107	spin_unlock(&lockref->lock);
108	return retval;
109}
110EXPORT_SYMBOL(lockref_put_not_zero);
111
112/**
113 * lockref_put_return - Decrement reference count if possible
114 * @lockref: pointer to lockref structure
115 *
116 * Decrement the reference count and return the new value.
117 * If the lockref was dead or locked, return an error.
118 */
119int lockref_put_return(struct lockref *lockref)
120{
121	CMPXCHG_LOOP(
122		new.count--;
123		if (old.count <= 0)
124			return -1;
125	,
126		return new.count;
127	);
128	return -1;
129}
130EXPORT_SYMBOL(lockref_put_return);
131
132/**
133 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
134 * @lockref: pointer to lockref structure
135 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
136 */
137int lockref_put_or_lock(struct lockref *lockref)
138{
139	CMPXCHG_LOOP(
140		new.count--;
141		if (old.count <= 1)
142			break;
143	,
144		return 1;
145	);
146
147	spin_lock(&lockref->lock);
148	if (lockref->count <= 1)
149		return 0;
150	lockref->count--;
151	spin_unlock(&lockref->lock);
152	return 1;
153}
154EXPORT_SYMBOL(lockref_put_or_lock);
155
156/**
157 * lockref_mark_dead - mark lockref dead
158 * @lockref: pointer to lockref structure
159 */
160void lockref_mark_dead(struct lockref *lockref)
161{
162	assert_spin_locked(&lockref->lock);
163	lockref->count = -128;
164}
165EXPORT_SYMBOL(lockref_mark_dead);
166
167/**
168 * lockref_get_not_dead - Increments count unless the ref is dead
169 * @lockref: pointer to lockref structure
170 * Return: 1 if count updated successfully or 0 if lockref was dead
171 */
172int lockref_get_not_dead(struct lockref *lockref)
173{
174	int retval;
175
176	CMPXCHG_LOOP(
177		new.count++;
178		if (old.count < 0)
179			return 0;
180	,
181		return 1;
182	);
183
184	spin_lock(&lockref->lock);
185	retval = 0;
186	if (lockref->count >= 0) {
187		lockref->count++;
188		retval = 1;
189	}
190	spin_unlock(&lockref->lock);
191	return retval;
192}
193EXPORT_SYMBOL(lockref_get_not_dead);