Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/lockref.h>
4
5#if USE_CMPXCHG_LOCKREF
6
7/*
8 * Note that the "cmpxchg()" reloads the "old" value for the
9 * failure case.
10 */
11#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
12 int retry = 100; \
13 struct lockref old; \
14 BUILD_BUG_ON(sizeof(old) != 8); \
15 old.lock_count = READ_ONCE(lockref->lock_count); \
16 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
17 struct lockref new = old, prev = old; \
18 CODE \
19 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
20 old.lock_count, \
21 new.lock_count); \
22 if (likely(old.lock_count == prev.lock_count)) { \
23 SUCCESS; \
24 } \
25 if (!--retry) \
26 break; \
27 cpu_relax(); \
28 } \
29} while (0)
30
31#else
32
33#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
34
35#endif
36
37/**
38 * lockref_get - Increments reference count unconditionally
39 * @lockref: pointer to lockref structure
40 *
41 * This operation is only valid if you already hold a reference
42 * to the object, so you know the count cannot be zero.
43 */
44void lockref_get(struct lockref *lockref)
45{
46 CMPXCHG_LOOP(
47 new.count++;
48 ,
49 return;
50 );
51
52 spin_lock(&lockref->lock);
53 lockref->count++;
54 spin_unlock(&lockref->lock);
55}
56EXPORT_SYMBOL(lockref_get);
57
58/**
59 * lockref_get_not_zero - Increments count unless the count is 0 or dead
60 * @lockref: pointer to lockref structure
61 * Return: 1 if count updated successfully or 0 if count was zero
62 */
63int lockref_get_not_zero(struct lockref *lockref)
64{
65 int retval;
66
67 CMPXCHG_LOOP(
68 new.count++;
69 if (old.count <= 0)
70 return 0;
71 ,
72 return 1;
73 );
74
75 spin_lock(&lockref->lock);
76 retval = 0;
77 if (lockref->count > 0) {
78 lockref->count++;
79 retval = 1;
80 }
81 spin_unlock(&lockref->lock);
82 return retval;
83}
84EXPORT_SYMBOL(lockref_get_not_zero);
85
86/**
87 * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
88 * @lockref: pointer to lockref structure
89 * Return: 1 if count updated successfully or 0 if count would become zero
90 */
91int lockref_put_not_zero(struct lockref *lockref)
92{
93 int retval;
94
95 CMPXCHG_LOOP(
96 new.count--;
97 if (old.count <= 1)
98 return 0;
99 ,
100 return 1;
101 );
102
103 spin_lock(&lockref->lock);
104 retval = 0;
105 if (lockref->count > 1) {
106 lockref->count--;
107 retval = 1;
108 }
109 spin_unlock(&lockref->lock);
110 return retval;
111}
112EXPORT_SYMBOL(lockref_put_not_zero);
113
114/**
115 * lockref_get_or_lock - Increments count unless the count is 0 or dead
116 * @lockref: pointer to lockref structure
117 * Return: 1 if count updated successfully or 0 if count was zero
118 * and we got the lock instead.
119 */
120int lockref_get_or_lock(struct lockref *lockref)
121{
122 CMPXCHG_LOOP(
123 new.count++;
124 if (old.count <= 0)
125 break;
126 ,
127 return 1;
128 );
129
130 spin_lock(&lockref->lock);
131 if (lockref->count <= 0)
132 return 0;
133 lockref->count++;
134 spin_unlock(&lockref->lock);
135 return 1;
136}
137EXPORT_SYMBOL(lockref_get_or_lock);
138
139/**
140 * lockref_put_return - Decrement reference count if possible
141 * @lockref: pointer to lockref structure
142 *
143 * Decrement the reference count and return the new value.
144 * If the lockref was dead or locked, return an error.
145 */
146int lockref_put_return(struct lockref *lockref)
147{
148 CMPXCHG_LOOP(
149 new.count--;
150 if (old.count <= 0)
151 return -1;
152 ,
153 return new.count;
154 );
155 return -1;
156}
157EXPORT_SYMBOL(lockref_put_return);
158
159/**
160 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
161 * @lockref: pointer to lockref structure
162 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
163 */
164int lockref_put_or_lock(struct lockref *lockref)
165{
166 CMPXCHG_LOOP(
167 new.count--;
168 if (old.count <= 1)
169 break;
170 ,
171 return 1;
172 );
173
174 spin_lock(&lockref->lock);
175 if (lockref->count <= 1)
176 return 0;
177 lockref->count--;
178 spin_unlock(&lockref->lock);
179 return 1;
180}
181EXPORT_SYMBOL(lockref_put_or_lock);
182
183/**
184 * lockref_mark_dead - mark lockref dead
185 * @lockref: pointer to lockref structure
186 */
187void lockref_mark_dead(struct lockref *lockref)
188{
189 assert_spin_locked(&lockref->lock);
190 lockref->count = -128;
191}
192EXPORT_SYMBOL(lockref_mark_dead);
193
194/**
195 * lockref_get_not_dead - Increments count unless the ref is dead
196 * @lockref: pointer to lockref structure
197 * Return: 1 if count updated successfully or 0 if lockref was dead
198 */
199int lockref_get_not_dead(struct lockref *lockref)
200{
201 int retval;
202
203 CMPXCHG_LOOP(
204 new.count++;
205 if (old.count < 0)
206 return 0;
207 ,
208 return 1;
209 );
210
211 spin_lock(&lockref->lock);
212 retval = 0;
213 if (lockref->count >= 0) {
214 lockref->count++;
215 retval = 1;
216 }
217 spin_unlock(&lockref->lock);
218 return retval;
219}
220EXPORT_SYMBOL(lockref_get_not_dead);
1#include <linux/export.h>
2#include <linux/lockref.h>
3
4#if USE_CMPXCHG_LOCKREF
5
6/*
7 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case.
9 */
10#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
11 struct lockref old; \
12 BUILD_BUG_ON(sizeof(old) != 8); \
13 old.lock_count = READ_ONCE(lockref->lock_count); \
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \
16 CODE \
17 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, \
19 new.lock_count); \
20 if (likely(old.lock_count == prev.lock_count)) { \
21 SUCCESS; \
22 } \
23 cpu_relax_lowlatency(); \
24 } \
25} while (0)
26
27#else
28
29#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
30
31#endif
32
33/**
34 * lockref_get - Increments reference count unconditionally
35 * @lockref: pointer to lockref structure
36 *
37 * This operation is only valid if you already hold a reference
38 * to the object, so you know the count cannot be zero.
39 */
40void lockref_get(struct lockref *lockref)
41{
42 CMPXCHG_LOOP(
43 new.count++;
44 ,
45 return;
46 );
47
48 spin_lock(&lockref->lock);
49 lockref->count++;
50 spin_unlock(&lockref->lock);
51}
52EXPORT_SYMBOL(lockref_get);
53
54/**
55 * lockref_get_not_zero - Increments count unless the count is 0 or dead
56 * @lockref: pointer to lockref structure
57 * Return: 1 if count updated successfully or 0 if count was zero
58 */
59int lockref_get_not_zero(struct lockref *lockref)
60{
61 int retval;
62
63 CMPXCHG_LOOP(
64 new.count++;
65 if (old.count <= 0)
66 return 0;
67 ,
68 return 1;
69 );
70
71 spin_lock(&lockref->lock);
72 retval = 0;
73 if (lockref->count > 0) {
74 lockref->count++;
75 retval = 1;
76 }
77 spin_unlock(&lockref->lock);
78 return retval;
79}
80EXPORT_SYMBOL(lockref_get_not_zero);
81
82/**
83 * lockref_get_or_lock - Increments count unless the count is 0 or dead
84 * @lockref: pointer to lockref structure
85 * Return: 1 if count updated successfully or 0 if count was zero
86 * and we got the lock instead.
87 */
88int lockref_get_or_lock(struct lockref *lockref)
89{
90 CMPXCHG_LOOP(
91 new.count++;
92 if (old.count <= 0)
93 break;
94 ,
95 return 1;
96 );
97
98 spin_lock(&lockref->lock);
99 if (lockref->count <= 0)
100 return 0;
101 lockref->count++;
102 spin_unlock(&lockref->lock);
103 return 1;
104}
105EXPORT_SYMBOL(lockref_get_or_lock);
106
107/**
108 * lockref_put_return - Decrement reference count if possible
109 * @lockref: pointer to lockref structure
110 *
111 * Decrement the reference count and return the new value.
112 * If the lockref was dead or locked, return an error.
113 */
114int lockref_put_return(struct lockref *lockref)
115{
116 CMPXCHG_LOOP(
117 new.count--;
118 if (old.count <= 0)
119 return -1;
120 ,
121 return new.count;
122 );
123 return -1;
124}
125EXPORT_SYMBOL(lockref_put_return);
126
127/**
128 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
129 * @lockref: pointer to lockref structure
130 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
131 */
132int lockref_put_or_lock(struct lockref *lockref)
133{
134 CMPXCHG_LOOP(
135 new.count--;
136 if (old.count <= 1)
137 break;
138 ,
139 return 1;
140 );
141
142 spin_lock(&lockref->lock);
143 if (lockref->count <= 1)
144 return 0;
145 lockref->count--;
146 spin_unlock(&lockref->lock);
147 return 1;
148}
149EXPORT_SYMBOL(lockref_put_or_lock);
150
151/**
152 * lockref_mark_dead - mark lockref dead
153 * @lockref: pointer to lockref structure
154 */
155void lockref_mark_dead(struct lockref *lockref)
156{
157 assert_spin_locked(&lockref->lock);
158 lockref->count = -128;
159}
160EXPORT_SYMBOL(lockref_mark_dead);
161
162/**
163 * lockref_get_not_dead - Increments count unless the ref is dead
164 * @lockref: pointer to lockref structure
165 * Return: 1 if count updated successfully or 0 if lockref was dead
166 */
167int lockref_get_not_dead(struct lockref *lockref)
168{
169 int retval;
170
171 CMPXCHG_LOOP(
172 new.count++;
173 if (old.count < 0)
174 return 0;
175 ,
176 return 1;
177 );
178
179 spin_lock(&lockref->lock);
180 retval = 0;
181 if (lockref->count >= 0) {
182 lockref->count++;
183 retval = 1;
184 }
185 spin_unlock(&lockref->lock);
186 return retval;
187}
188EXPORT_SYMBOL(lockref_get_not_dead);