Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#ifndef BTRFS_LOCKING_H
7#define BTRFS_LOCKING_H
8
9#include <linux/atomic.h>
10#include <linux/wait.h>
11#include <linux/percpu_counter.h>
12#include "extent_io.h"
13
14#define BTRFS_WRITE_LOCK 1
15#define BTRFS_READ_LOCK 2
16
17/*
18 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
19 * the time of this patch is 8, which is how many we use. Keep this in mind if
20 * you decide you want to add another subclass.
21 */
22enum btrfs_lock_nesting {
23 BTRFS_NESTING_NORMAL,
24
25 /*
26 * When we COW a block we are holding the lock on the original block,
27 * and since our lockdep maps are rootid+level, this confuses lockdep
28 * when we lock the newly allocated COW'd block. Handle this by having
29 * a subclass for COW'ed blocks so that lockdep doesn't complain.
30 */
31 BTRFS_NESTING_COW,
32
33 /*
34 * Oftentimes we need to lock adjacent nodes on the same level while
35 * still holding the lock on the original node we searched to, such as
36 * for searching forward or for split/balance.
37 *
38 * Because of this we need to indicate to lockdep that this is
39 * acceptable by having a different subclass for each of these
40 * operations.
41 */
42 BTRFS_NESTING_LEFT,
43 BTRFS_NESTING_RIGHT,
44
45 /*
46 * When splitting we will be holding a lock on the left/right node when
47 * we need to cow that node, thus we need a new set of subclasses for
48 * these two operations.
49 */
50 BTRFS_NESTING_LEFT_COW,
51 BTRFS_NESTING_RIGHT_COW,
52
53 /*
54 * When splitting we may push nodes to the left or right, but still use
55 * the subsequent nodes in our path, keeping our locks on those adjacent
56 * blocks. Thus when we go to allocate a new split block we've already
57 * used up all of our available subclasses, so this subclass exists to
58 * handle this case where we need to allocate a new split block.
59 */
60 BTRFS_NESTING_SPLIT,
61
62 /*
63 * When promoting a new block to a root we need to have a special
64 * subclass so we don't confuse lockdep, as it will appear that we are
65 * locking a higher level node before a lower level one. Copying also
66 * has this problem as it appears we're locking the same block again
67 * when we make a snapshot of an existing root.
68 */
69 BTRFS_NESTING_NEW_ROOT,
70
71 /*
72 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
73 * add this in here and add a static_assert to keep us from going over
74 * the limit. As of this writing we're limited to 8, and we're
75 * definitely using 8, hence this check to keep us from messing up in
76 * the future.
77 */
78 BTRFS_NESTING_MAX,
79};
80
81enum btrfs_lockdep_trans_states {
82 BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
83 BTRFS_LOCKDEP_TRANS_UNBLOCKED,
84 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
85 BTRFS_LOCKDEP_TRANS_COMPLETED,
86};
87
88/*
89 * Lockdep annotation for wait events.
90 *
91 * @owner: The struct where the lockdep map is defined
92 * @lock: The lockdep map corresponding to a wait event
93 *
94 * This macro is used to annotate a wait event. In this case a thread acquires
95 * the lockdep map as writer (exclusive lock) because it has to block until all
96 * the threads that hold the lock as readers signal the condition for the wait
97 * event and release their locks.
98 */
99#define btrfs_might_wait_for_event(owner, lock) \
100 do { \
101 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
102 rwsem_release(&owner->lock##_map, _THIS_IP_); \
103 } while (0)
104
105/*
106 * Protection for the resource/condition of a wait event.
107 *
108 * @owner: The struct where the lockdep map is defined
109 * @lock: The lockdep map corresponding to a wait event
110 *
111 * Many threads can modify the condition for the wait event at the same time
112 * and signal the threads that block on the wait event. The threads that modify
113 * the condition and do the signaling acquire the lock as readers (shared
114 * lock).
115 */
116#define btrfs_lockdep_acquire(owner, lock) \
117 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
118
119/*
120 * Used after signaling the condition for a wait event to release the lockdep
121 * map held by a reader thread.
122 */
123#define btrfs_lockdep_release(owner, lock) \
124 rwsem_release(&owner->lock##_map, _THIS_IP_)
125
126/*
127 * Macros for the transaction states wait events, similar to the generic wait
128 * event macros.
129 */
130#define btrfs_might_wait_for_state(owner, i) \
131 do { \
132 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
133 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
134 } while (0)
135
136#define btrfs_trans_state_lockdep_acquire(owner, i) \
137 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
138
139#define btrfs_trans_state_lockdep_release(owner, i) \
140 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
141
142/* Initialization of the lockdep map */
143#define btrfs_lockdep_init_map(owner, lock) \
144 do { \
145 static struct lock_class_key lock##_key; \
146 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
147 } while (0)
148
149/* Initialization of the transaction states lockdep maps. */
150#define btrfs_state_lockdep_init_map(owner, lock, state) \
151 do { \
152 static struct lock_class_key lock##_key; \
153 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
154 &lock##_key, 0); \
155 } while (0)
156
157static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
158 "too many lock subclasses defined");
159
160struct btrfs_path;
161
162void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
163void btrfs_tree_lock(struct extent_buffer *eb);
164void btrfs_tree_unlock(struct extent_buffer *eb);
165
166void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
167void btrfs_tree_read_lock(struct extent_buffer *eb);
168void btrfs_tree_read_unlock(struct extent_buffer *eb);
169int btrfs_try_tree_read_lock(struct extent_buffer *eb);
170int btrfs_try_tree_write_lock(struct extent_buffer *eb);
171struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
172struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
173struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
174
175#ifdef CONFIG_BTRFS_DEBUG
176static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
177{
178 lockdep_assert_held_write(&eb->lock);
179}
180#else
181static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
182#endif
183
184void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
185
186static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
187{
188 if (rw == BTRFS_WRITE_LOCK)
189 btrfs_tree_unlock(eb);
190 else if (rw == BTRFS_READ_LOCK)
191 btrfs_tree_read_unlock(eb);
192 else
193 BUG();
194}
195
196struct btrfs_drew_lock {
197 atomic_t readers;
198 atomic_t writers;
199 wait_queue_head_t pending_writers;
200 wait_queue_head_t pending_readers;
201};
202
203void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
204void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
205bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
206void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
207void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
208void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
209
210#ifdef CONFIG_DEBUG_LOCK_ALLOC
211void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
212void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
213#else
214static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
215 struct extent_buffer *eb, int level)
216{
217}
218static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
219 struct extent_buffer *eb)
220{
221}
222#endif
223
224#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#ifndef BTRFS_LOCKING_H
7#define BTRFS_LOCKING_H
8
9#include <linux/atomic.h>
10#include <linux/wait.h>
11#include <linux/lockdep.h>
12#include <linux/percpu_counter.h>
13#include "extent_io.h"
14
15struct extent_buffer;
16struct btrfs_path;
17struct btrfs_root;
18
19#define BTRFS_WRITE_LOCK 1
20#define BTRFS_READ_LOCK 2
21
22/*
23 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
24 * the time of this patch is 8, which is how many we use. Keep this in mind if
25 * you decide you want to add another subclass.
26 */
27enum btrfs_lock_nesting {
28 BTRFS_NESTING_NORMAL,
29
30 /*
31 * When we COW a block we are holding the lock on the original block,
32 * and since our lockdep maps are rootid+level, this confuses lockdep
33 * when we lock the newly allocated COW'd block. Handle this by having
34 * a subclass for COW'ed blocks so that lockdep doesn't complain.
35 */
36 BTRFS_NESTING_COW,
37
38 /*
39 * Oftentimes we need to lock adjacent nodes on the same level while
40 * still holding the lock on the original node we searched to, such as
41 * for searching forward or for split/balance.
42 *
43 * Because of this we need to indicate to lockdep that this is
44 * acceptable by having a different subclass for each of these
45 * operations.
46 */
47 BTRFS_NESTING_LEFT,
48 BTRFS_NESTING_RIGHT,
49
50 /*
51 * When splitting we will be holding a lock on the left/right node when
52 * we need to cow that node, thus we need a new set of subclasses for
53 * these two operations.
54 */
55 BTRFS_NESTING_LEFT_COW,
56 BTRFS_NESTING_RIGHT_COW,
57
58 /*
59 * When splitting we may push nodes to the left or right, but still use
60 * the subsequent nodes in our path, keeping our locks on those adjacent
61 * blocks. Thus when we go to allocate a new split block we've already
62 * used up all of our available subclasses, so this subclass exists to
63 * handle this case where we need to allocate a new split block.
64 */
65 BTRFS_NESTING_SPLIT,
66
67 /*
68 * When promoting a new block to a root we need to have a special
69 * subclass so we don't confuse lockdep, as it will appear that we are
70 * locking a higher level node before a lower level one. Copying also
71 * has this problem as it appears we're locking the same block again
72 * when we make a snapshot of an existing root.
73 */
74 BTRFS_NESTING_NEW_ROOT,
75
76 /*
77 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
78 * add this in here and add a static_assert to keep us from going over
79 * the limit. As of this writing we're limited to 8, and we're
80 * definitely using 8, hence this check to keep us from messing up in
81 * the future.
82 */
83 BTRFS_NESTING_MAX,
84};
85
86enum btrfs_lockdep_trans_states {
87 BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
88 BTRFS_LOCKDEP_TRANS_UNBLOCKED,
89 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
90 BTRFS_LOCKDEP_TRANS_COMPLETED,
91};
92
93/*
94 * Lockdep annotation for wait events.
95 *
96 * @owner: The struct where the lockdep map is defined
97 * @lock: The lockdep map corresponding to a wait event
98 *
99 * This macro is used to annotate a wait event. In this case a thread acquires
100 * the lockdep map as writer (exclusive lock) because it has to block until all
101 * the threads that hold the lock as readers signal the condition for the wait
102 * event and release their locks.
103 */
104#define btrfs_might_wait_for_event(owner, lock) \
105 do { \
106 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
107 rwsem_release(&owner->lock##_map, _THIS_IP_); \
108 } while (0)
109
110/*
111 * Protection for the resource/condition of a wait event.
112 *
113 * @owner: The struct where the lockdep map is defined
114 * @lock: The lockdep map corresponding to a wait event
115 *
116 * Many threads can modify the condition for the wait event at the same time
117 * and signal the threads that block on the wait event. The threads that modify
118 * the condition and do the signaling acquire the lock as readers (shared
119 * lock).
120 */
121#define btrfs_lockdep_acquire(owner, lock) \
122 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
123
124/*
125 * Used after signaling the condition for a wait event to release the lockdep
126 * map held by a reader thread.
127 */
128#define btrfs_lockdep_release(owner, lock) \
129 rwsem_release(&owner->lock##_map, _THIS_IP_)
130
131/*
132 * Used to account for the fact that when doing io_uring encoded I/O, we can
133 * return to userspace with the inode lock still held.
134 */
135#define btrfs_lockdep_inode_acquire(owner, lock) \
136 rwsem_acquire_read(&owner->vfs_inode.lock.dep_map, 0, 0, _THIS_IP_)
137
138#define btrfs_lockdep_inode_release(owner, lock) \
139 rwsem_release(&owner->vfs_inode.lock.dep_map, _THIS_IP_)
140
141/*
142 * Macros for the transaction states wait events, similar to the generic wait
143 * event macros.
144 */
145#define btrfs_might_wait_for_state(owner, i) \
146 do { \
147 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
148 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
149 } while (0)
150
151#define btrfs_trans_state_lockdep_acquire(owner, i) \
152 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
153
154#define btrfs_trans_state_lockdep_release(owner, i) \
155 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
156
157/* Initialization of the lockdep map */
158#define btrfs_lockdep_init_map(owner, lock) \
159 do { \
160 static struct lock_class_key lock##_key; \
161 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
162 } while (0)
163
164/* Initialization of the transaction states lockdep maps. */
165#define btrfs_state_lockdep_init_map(owner, lock, state) \
166 do { \
167 static struct lock_class_key lock##_key; \
168 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
169 &lock##_key, 0); \
170 } while (0)
171
172static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
173 "too many lock subclasses defined");
174
175void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
176
177static inline void btrfs_tree_lock(struct extent_buffer *eb)
178{
179 btrfs_tree_lock_nested(eb, BTRFS_NESTING_NORMAL);
180}
181
182void btrfs_tree_unlock(struct extent_buffer *eb);
183
184void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
185
186static inline void btrfs_tree_read_lock(struct extent_buffer *eb)
187{
188 btrfs_tree_read_lock_nested(eb, BTRFS_NESTING_NORMAL);
189}
190
191void btrfs_tree_read_unlock(struct extent_buffer *eb);
192int btrfs_try_tree_read_lock(struct extent_buffer *eb);
193struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
194struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
195struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
196
197#ifdef CONFIG_BTRFS_DEBUG
198static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
199{
200 lockdep_assert_held_write(&eb->lock);
201}
202#else
203static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
204#endif
205
206void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
207
208static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
209{
210 if (rw == BTRFS_WRITE_LOCK)
211 btrfs_tree_unlock(eb);
212 else if (rw == BTRFS_READ_LOCK)
213 btrfs_tree_read_unlock(eb);
214 else
215 BUG();
216}
217
218struct btrfs_drew_lock {
219 atomic_t readers;
220 atomic_t writers;
221 wait_queue_head_t pending_writers;
222 wait_queue_head_t pending_readers;
223};
224
225void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
226void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
227bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
228void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
229void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
230void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
231
232#ifdef CONFIG_DEBUG_LOCK_ALLOC
233void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
234void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
235#else
236static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
237 struct extent_buffer *eb, int level)
238{
239}
240static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
241 struct extent_buffer *eb)
242{
243}
244#endif
245
246#endif