Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "scrub/bitmap.h"
14
15/*
16 * Set a range of this bitmap. Caller must ensure the range is not set.
17 *
18 * This is the logical equivalent of bitmap |= mask(start, len).
19 */
20int
21xbitmap_set(
22 struct xbitmap *bitmap,
23 uint64_t start,
24 uint64_t len)
25{
26 struct xbitmap_range *bmr;
27
28 bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
29 if (!bmr)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&bmr->list);
33 bmr->start = start;
34 bmr->len = len;
35 list_add_tail(&bmr->list, &bitmap->list);
36
37 return 0;
38}
39
40/* Free everything related to this bitmap. */
41void
42xbitmap_destroy(
43 struct xbitmap *bitmap)
44{
45 struct xbitmap_range *bmr;
46 struct xbitmap_range *n;
47
48 for_each_xbitmap_extent(bmr, n, bitmap) {
49 list_del(&bmr->list);
50 kmem_free(bmr);
51 }
52}
53
54/* Set up a per-AG block bitmap. */
55void
56xbitmap_init(
57 struct xbitmap *bitmap)
58{
59 INIT_LIST_HEAD(&bitmap->list);
60}
61
62/* Compare two btree extents. */
63static int
64xbitmap_range_cmp(
65 void *priv,
66 const struct list_head *a,
67 const struct list_head *b)
68{
69 struct xbitmap_range *ap;
70 struct xbitmap_range *bp;
71
72 ap = container_of(a, struct xbitmap_range, list);
73 bp = container_of(b, struct xbitmap_range, list);
74
75 if (ap->start > bp->start)
76 return 1;
77 if (ap->start < bp->start)
78 return -1;
79 return 0;
80}
81
82/*
83 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
84 *
85 * The intent is that callers will iterate the rmapbt for all of its records
86 * for a given owner to generate @bitmap; and iterate all the blocks of the
87 * metadata structures that are not being rebuilt and have the same rmapbt
88 * owner to generate @sub. This routine subtracts all the extents
89 * mentioned in sub from all the extents linked in @bitmap, which leaves
90 * @bitmap as the list of blocks that are not accounted for, which we assume
91 * are the dead blocks of the old metadata structure. The blocks mentioned in
92 * @bitmap can be reaped.
93 *
94 * This is the logical equivalent of bitmap &= ~sub.
95 */
96#define LEFT_ALIGNED (1 << 0)
97#define RIGHT_ALIGNED (1 << 1)
98int
99xbitmap_disunion(
100 struct xbitmap *bitmap,
101 struct xbitmap *sub)
102{
103 struct list_head *lp;
104 struct xbitmap_range *br;
105 struct xbitmap_range *new_br;
106 struct xbitmap_range *sub_br;
107 uint64_t sub_start;
108 uint64_t sub_len;
109 int state;
110 int error = 0;
111
112 if (list_empty(&bitmap->list) || list_empty(&sub->list))
113 return 0;
114 ASSERT(!list_empty(&sub->list));
115
116 list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
117 list_sort(NULL, &sub->list, xbitmap_range_cmp);
118
119 /*
120 * Now that we've sorted both lists, we iterate bitmap once, rolling
121 * forward through sub and/or bitmap as necessary until we find an
122 * overlap or reach the end of either list. We do not reset lp to the
123 * head of bitmap nor do we reset sub_br to the head of sub. The
124 * list traversal is similar to merge sort, but we're deleting
125 * instead. In this manner we avoid O(n^2) operations.
126 */
127 sub_br = list_first_entry(&sub->list, struct xbitmap_range,
128 list);
129 lp = bitmap->list.next;
130 while (lp != &bitmap->list) {
131 br = list_entry(lp, struct xbitmap_range, list);
132
133 /*
134 * Advance sub_br and/or br until we find a pair that
135 * intersect or we run out of extents.
136 */
137 while (sub_br->start + sub_br->len <= br->start) {
138 if (list_is_last(&sub_br->list, &sub->list))
139 goto out;
140 sub_br = list_next_entry(sub_br, list);
141 }
142 if (sub_br->start >= br->start + br->len) {
143 lp = lp->next;
144 continue;
145 }
146
147 /* trim sub_br to fit the extent we have */
148 sub_start = sub_br->start;
149 sub_len = sub_br->len;
150 if (sub_br->start < br->start) {
151 sub_len -= br->start - sub_br->start;
152 sub_start = br->start;
153 }
154 if (sub_len > br->len)
155 sub_len = br->len;
156
157 state = 0;
158 if (sub_start == br->start)
159 state |= LEFT_ALIGNED;
160 if (sub_start + sub_len == br->start + br->len)
161 state |= RIGHT_ALIGNED;
162 switch (state) {
163 case LEFT_ALIGNED:
164 /* Coincides with only the left. */
165 br->start += sub_len;
166 br->len -= sub_len;
167 break;
168 case RIGHT_ALIGNED:
169 /* Coincides with only the right. */
170 br->len -= sub_len;
171 lp = lp->next;
172 break;
173 case LEFT_ALIGNED | RIGHT_ALIGNED:
174 /* Total overlap, just delete ex. */
175 lp = lp->next;
176 list_del(&br->list);
177 kmem_free(br);
178 break;
179 case 0:
180 /*
181 * Deleting from the middle: add the new right extent
182 * and then shrink the left extent.
183 */
184 new_br = kmem_alloc(sizeof(struct xbitmap_range),
185 KM_MAYFAIL);
186 if (!new_br) {
187 error = -ENOMEM;
188 goto out;
189 }
190 INIT_LIST_HEAD(&new_br->list);
191 new_br->start = sub_start + sub_len;
192 new_br->len = br->start + br->len - new_br->start;
193 list_add(&new_br->list, &br->list);
194 br->len = sub_start - br->start;
195 lp = lp->next;
196 break;
197 default:
198 ASSERT(0);
199 break;
200 }
201 }
202
203out:
204 return error;
205}
206#undef LEFT_ALIGNED
207#undef RIGHT_ALIGNED
208
209/*
210 * Record all btree blocks seen while iterating all records of a btree.
211 *
212 * We know that the btree query_all function starts at the left edge and walks
213 * towards the right edge of the tree. Therefore, we know that we can walk up
214 * the btree cursor towards the root; if the pointer for a given level points
215 * to the first record/key in that block, we haven't seen this block before;
216 * and therefore we need to remember that we saw this block in the btree.
217 *
218 * So if our btree is:
219 *
220 * 4
221 * / | \
222 * 1 2 3
223 *
224 * Pretend for this example that each leaf block has 100 btree records. For
225 * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
226 * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
227 * block 4. The list is [1, 4].
228 *
229 * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
230 * loop. The list remains [1, 4].
231 *
232 * For the 101st btree record, we've moved onto leaf block 2. Now
233 * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
234 * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
235 *
236 * For the 102nd record, bc_ptrs[0] == 2, so we continue.
237 *
238 * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
239 * we add 3 to the list. Now it is [1, 4, 2, 3].
240 *
241 * For the 300th record we just exit, with the list being [1, 4, 2, 3].
242 */
243
244/*
245 * Record all the buffers pointed to by the btree cursor. Callers already
246 * engaged in a btree walk should call this function to capture the list of
247 * blocks going from the leaf towards the root.
248 */
249int
250xbitmap_set_btcur_path(
251 struct xbitmap *bitmap,
252 struct xfs_btree_cur *cur)
253{
254 struct xfs_buf *bp;
255 xfs_fsblock_t fsb;
256 int i;
257 int error;
258
259 for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
260 xfs_btree_get_block(cur, i, &bp);
261 if (!bp)
262 continue;
263 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
264 error = xbitmap_set(bitmap, fsb, 1);
265 if (error)
266 return error;
267 }
268
269 return 0;
270}
271
272/* Collect a btree's block in the bitmap. */
273STATIC int
274xbitmap_collect_btblock(
275 struct xfs_btree_cur *cur,
276 int level,
277 void *priv)
278{
279 struct xbitmap *bitmap = priv;
280 struct xfs_buf *bp;
281 xfs_fsblock_t fsbno;
282
283 xfs_btree_get_block(cur, level, &bp);
284 if (!bp)
285 return 0;
286
287 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
288 return xbitmap_set(bitmap, fsbno, 1);
289}
290
291/* Walk the btree and mark the bitmap wherever a btree block is found. */
292int
293xbitmap_set_btblocks(
294 struct xbitmap *bitmap,
295 struct xfs_btree_cur *cur)
296{
297 return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
298 XFS_BTREE_VISIT_ALL, bitmap);
299}
300
301/* How many bits are set in this bitmap? */
302uint64_t
303xbitmap_hweight(
304 struct xbitmap *bitmap)
305{
306 struct xbitmap_range *bmr;
307 struct xbitmap_range *n;
308 uint64_t ret = 0;
309
310 for_each_xbitmap_extent(bmr, n, bitmap)
311 ret += bmr->len;
312
313 return ret;
314}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_bit.h"
10#include "xfs_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_btree.h"
14#include "scrub/scrub.h"
15#include "scrub/bitmap.h"
16
17#include <linux/interval_tree_generic.h>
18
19/* u64 bitmap */
20
21struct xbitmap64_node {
22 struct rb_node bn_rbnode;
23
24 /* First set bit of this interval and subtree. */
25 uint64_t bn_start;
26
27 /* Last set bit of this interval. */
28 uint64_t bn_last;
29
30 /* Last set bit of this subtree. Do not touch this. */
31 uint64_t __bn_subtree_last;
32};
33
34/* Define our own interval tree type with uint64_t parameters. */
35
36#define START(node) ((node)->bn_start)
37#define LAST(node) ((node)->bn_last)
38
39/*
40 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
41 * forward-declare them anyway for clarity.
42 */
43static inline void
44xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
45
46static inline void
47xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
48
49static inline struct xbitmap64_node *
50xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
51 uint64_t last);
52
53static inline struct xbitmap64_node *
54xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
55 uint64_t last);
56
57INTERVAL_TREE_DEFINE(struct xbitmap64_node, bn_rbnode, uint64_t,
58 __bn_subtree_last, START, LAST, static inline, xbitmap64_tree)
59
60/* Iterate each interval of a bitmap. Do not change the bitmap. */
61#define for_each_xbitmap64_extent(bn, bitmap) \
62 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
63 struct xbitmap64_node, bn_rbnode); \
64 (bn) != NULL; \
65 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
66 struct xbitmap64_node, bn_rbnode))
67
68/* Clear a range of this bitmap. */
69int
70xbitmap64_clear(
71 struct xbitmap64 *bitmap,
72 uint64_t start,
73 uint64_t len)
74{
75 struct xbitmap64_node *bn;
76 struct xbitmap64_node *new_bn;
77 uint64_t last = start + len - 1;
78
79 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last))) {
80 if (bn->bn_start < start && bn->bn_last > last) {
81 uint64_t old_last = bn->bn_last;
82
83 /* overlaps with the entire clearing range */
84 xbitmap64_tree_remove(bn, &bitmap->xb_root);
85 bn->bn_last = start - 1;
86 xbitmap64_tree_insert(bn, &bitmap->xb_root);
87
88 /* add an extent */
89 new_bn = kmalloc(sizeof(struct xbitmap64_node),
90 XCHK_GFP_FLAGS);
91 if (!new_bn)
92 return -ENOMEM;
93 new_bn->bn_start = last + 1;
94 new_bn->bn_last = old_last;
95 xbitmap64_tree_insert(new_bn, &bitmap->xb_root);
96 } else if (bn->bn_start < start) {
97 /* overlaps with the left side of the clearing range */
98 xbitmap64_tree_remove(bn, &bitmap->xb_root);
99 bn->bn_last = start - 1;
100 xbitmap64_tree_insert(bn, &bitmap->xb_root);
101 } else if (bn->bn_last > last) {
102 /* overlaps with the right side of the clearing range */
103 xbitmap64_tree_remove(bn, &bitmap->xb_root);
104 bn->bn_start = last + 1;
105 xbitmap64_tree_insert(bn, &bitmap->xb_root);
106 break;
107 } else {
108 /* in the middle of the clearing range */
109 xbitmap64_tree_remove(bn, &bitmap->xb_root);
110 kfree(bn);
111 }
112 }
113
114 return 0;
115}
116
117/* Set a range of this bitmap. */
118int
119xbitmap64_set(
120 struct xbitmap64 *bitmap,
121 uint64_t start,
122 uint64_t len)
123{
124 struct xbitmap64_node *left;
125 struct xbitmap64_node *right;
126 uint64_t last = start + len - 1;
127 int error;
128
129 /* Is this whole range already set? */
130 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
131 if (left && left->bn_start <= start && left->bn_last >= last)
132 return 0;
133
134 /* Clear out everything in the range we want to set. */
135 error = xbitmap64_clear(bitmap, start, len);
136 if (error)
137 return error;
138
139 /* Do we have a left-adjacent extent? */
140 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
141 ASSERT(!left || left->bn_last + 1 == start);
142
143 /* Do we have a right-adjacent extent? */
144 right = xbitmap64_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
145 ASSERT(!right || right->bn_start == last + 1);
146
147 if (left && right) {
148 /* combine left and right adjacent extent */
149 xbitmap64_tree_remove(left, &bitmap->xb_root);
150 xbitmap64_tree_remove(right, &bitmap->xb_root);
151 left->bn_last = right->bn_last;
152 xbitmap64_tree_insert(left, &bitmap->xb_root);
153 kfree(right);
154 } else if (left) {
155 /* combine with left extent */
156 xbitmap64_tree_remove(left, &bitmap->xb_root);
157 left->bn_last = last;
158 xbitmap64_tree_insert(left, &bitmap->xb_root);
159 } else if (right) {
160 /* combine with right extent */
161 xbitmap64_tree_remove(right, &bitmap->xb_root);
162 right->bn_start = start;
163 xbitmap64_tree_insert(right, &bitmap->xb_root);
164 } else {
165 /* add an extent */
166 left = kmalloc(sizeof(struct xbitmap64_node), XCHK_GFP_FLAGS);
167 if (!left)
168 return -ENOMEM;
169 left->bn_start = start;
170 left->bn_last = last;
171 xbitmap64_tree_insert(left, &bitmap->xb_root);
172 }
173
174 return 0;
175}
176
177/* Free everything related to this bitmap. */
178void
179xbitmap64_destroy(
180 struct xbitmap64 *bitmap)
181{
182 struct xbitmap64_node *bn;
183
184 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
185 xbitmap64_tree_remove(bn, &bitmap->xb_root);
186 kfree(bn);
187 }
188}
189
190/* Set up a per-AG block bitmap. */
191void
192xbitmap64_init(
193 struct xbitmap64 *bitmap)
194{
195 bitmap->xb_root = RB_ROOT_CACHED;
196}
197
198/*
199 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
200 *
201 * The intent is that callers will iterate the rmapbt for all of its records
202 * for a given owner to generate @bitmap; and iterate all the blocks of the
203 * metadata structures that are not being rebuilt and have the same rmapbt
204 * owner to generate @sub. This routine subtracts all the extents
205 * mentioned in sub from all the extents linked in @bitmap, which leaves
206 * @bitmap as the list of blocks that are not accounted for, which we assume
207 * are the dead blocks of the old metadata structure. The blocks mentioned in
208 * @bitmap can be reaped.
209 *
210 * This is the logical equivalent of bitmap &= ~sub.
211 */
212int
213xbitmap64_disunion(
214 struct xbitmap64 *bitmap,
215 struct xbitmap64 *sub)
216{
217 struct xbitmap64_node *bn;
218 int error;
219
220 if (xbitmap64_empty(bitmap) || xbitmap64_empty(sub))
221 return 0;
222
223 for_each_xbitmap64_extent(bn, sub) {
224 error = xbitmap64_clear(bitmap, bn->bn_start,
225 bn->bn_last - bn->bn_start + 1);
226 if (error)
227 return error;
228 }
229
230 return 0;
231}
232
233/* How many bits are set in this bitmap? */
234uint64_t
235xbitmap64_hweight(
236 struct xbitmap64 *bitmap)
237{
238 struct xbitmap64_node *bn;
239 uint64_t ret = 0;
240
241 for_each_xbitmap64_extent(bn, bitmap)
242 ret += bn->bn_last - bn->bn_start + 1;
243
244 return ret;
245}
246
247/* Call a function for every run of set bits in this bitmap. */
248int
249xbitmap64_walk(
250 struct xbitmap64 *bitmap,
251 xbitmap64_walk_fn fn,
252 void *priv)
253{
254 struct xbitmap64_node *bn;
255 int error = 0;
256
257 for_each_xbitmap64_extent(bn, bitmap) {
258 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
259 if (error)
260 break;
261 }
262
263 return error;
264}
265
266/* Does this bitmap have no bits set at all? */
267bool
268xbitmap64_empty(
269 struct xbitmap64 *bitmap)
270{
271 return bitmap->xb_root.rb_root.rb_node == NULL;
272}
273
274/* Is the start of the range set or clear? And for how long? */
275bool
276xbitmap64_test(
277 struct xbitmap64 *bitmap,
278 uint64_t start,
279 uint64_t *len)
280{
281 struct xbitmap64_node *bn;
282 uint64_t last = start + *len - 1;
283
284 bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
285 if (!bn)
286 return false;
287 if (bn->bn_start <= start) {
288 if (bn->bn_last < last)
289 *len = bn->bn_last - start + 1;
290 return true;
291 }
292 *len = bn->bn_start - start;
293 return false;
294}
295
296/* u32 bitmap */
297
298struct xbitmap32_node {
299 struct rb_node bn_rbnode;
300
301 /* First set bit of this interval and subtree. */
302 uint32_t bn_start;
303
304 /* Last set bit of this interval. */
305 uint32_t bn_last;
306
307 /* Last set bit of this subtree. Do not touch this. */
308 uint32_t __bn_subtree_last;
309};
310
311/* Define our own interval tree type with uint32_t parameters. */
312
313/*
314 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
315 * forward-declare them anyway for clarity.
316 */
317static inline void
318xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
319
320static inline void
321xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
322
323static inline struct xbitmap32_node *
324xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
325 uint32_t last);
326
327static inline struct xbitmap32_node *
328xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
329 uint32_t last);
330
331INTERVAL_TREE_DEFINE(struct xbitmap32_node, bn_rbnode, uint32_t,
332 __bn_subtree_last, START, LAST, static inline, xbitmap32_tree)
333
334/* Iterate each interval of a bitmap. Do not change the bitmap. */
335#define for_each_xbitmap32_extent(bn, bitmap) \
336 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
337 struct xbitmap32_node, bn_rbnode); \
338 (bn) != NULL; \
339 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
340 struct xbitmap32_node, bn_rbnode))
341
342/* Clear a range of this bitmap. */
343int
344xbitmap32_clear(
345 struct xbitmap32 *bitmap,
346 uint32_t start,
347 uint32_t len)
348{
349 struct xbitmap32_node *bn;
350 struct xbitmap32_node *new_bn;
351 uint32_t last = start + len - 1;
352
353 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last))) {
354 if (bn->bn_start < start && bn->bn_last > last) {
355 uint32_t old_last = bn->bn_last;
356
357 /* overlaps with the entire clearing range */
358 xbitmap32_tree_remove(bn, &bitmap->xb_root);
359 bn->bn_last = start - 1;
360 xbitmap32_tree_insert(bn, &bitmap->xb_root);
361
362 /* add an extent */
363 new_bn = kmalloc(sizeof(struct xbitmap32_node),
364 XCHK_GFP_FLAGS);
365 if (!new_bn)
366 return -ENOMEM;
367 new_bn->bn_start = last + 1;
368 new_bn->bn_last = old_last;
369 xbitmap32_tree_insert(new_bn, &bitmap->xb_root);
370 } else if (bn->bn_start < start) {
371 /* overlaps with the left side of the clearing range */
372 xbitmap32_tree_remove(bn, &bitmap->xb_root);
373 bn->bn_last = start - 1;
374 xbitmap32_tree_insert(bn, &bitmap->xb_root);
375 } else if (bn->bn_last > last) {
376 /* overlaps with the right side of the clearing range */
377 xbitmap32_tree_remove(bn, &bitmap->xb_root);
378 bn->bn_start = last + 1;
379 xbitmap32_tree_insert(bn, &bitmap->xb_root);
380 break;
381 } else {
382 /* in the middle of the clearing range */
383 xbitmap32_tree_remove(bn, &bitmap->xb_root);
384 kfree(bn);
385 }
386 }
387
388 return 0;
389}
390
391/* Set a range of this bitmap. */
392int
393xbitmap32_set(
394 struct xbitmap32 *bitmap,
395 uint32_t start,
396 uint32_t len)
397{
398 struct xbitmap32_node *left;
399 struct xbitmap32_node *right;
400 uint32_t last = start + len - 1;
401 int error;
402
403 /* Is this whole range already set? */
404 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
405 if (left && left->bn_start <= start && left->bn_last >= last)
406 return 0;
407
408 /* Clear out everything in the range we want to set. */
409 error = xbitmap32_clear(bitmap, start, len);
410 if (error)
411 return error;
412
413 /* Do we have a left-adjacent extent? */
414 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
415 ASSERT(!left || left->bn_last + 1 == start);
416
417 /* Do we have a right-adjacent extent? */
418 right = xbitmap32_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
419 ASSERT(!right || right->bn_start == last + 1);
420
421 if (left && right) {
422 /* combine left and right adjacent extent */
423 xbitmap32_tree_remove(left, &bitmap->xb_root);
424 xbitmap32_tree_remove(right, &bitmap->xb_root);
425 left->bn_last = right->bn_last;
426 xbitmap32_tree_insert(left, &bitmap->xb_root);
427 kfree(right);
428 } else if (left) {
429 /* combine with left extent */
430 xbitmap32_tree_remove(left, &bitmap->xb_root);
431 left->bn_last = last;
432 xbitmap32_tree_insert(left, &bitmap->xb_root);
433 } else if (right) {
434 /* combine with right extent */
435 xbitmap32_tree_remove(right, &bitmap->xb_root);
436 right->bn_start = start;
437 xbitmap32_tree_insert(right, &bitmap->xb_root);
438 } else {
439 /* add an extent */
440 left = kmalloc(sizeof(struct xbitmap32_node), XCHK_GFP_FLAGS);
441 if (!left)
442 return -ENOMEM;
443 left->bn_start = start;
444 left->bn_last = last;
445 xbitmap32_tree_insert(left, &bitmap->xb_root);
446 }
447
448 return 0;
449}
450
451/* Free everything related to this bitmap. */
452void
453xbitmap32_destroy(
454 struct xbitmap32 *bitmap)
455{
456 struct xbitmap32_node *bn;
457
458 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, 0, -1U))) {
459 xbitmap32_tree_remove(bn, &bitmap->xb_root);
460 kfree(bn);
461 }
462}
463
464/* Set up a per-AG block bitmap. */
465void
466xbitmap32_init(
467 struct xbitmap32 *bitmap)
468{
469 bitmap->xb_root = RB_ROOT_CACHED;
470}
471
472/*
473 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
474 *
475 * The intent is that callers will iterate the rmapbt for all of its records
476 * for a given owner to generate @bitmap; and iterate all the blocks of the
477 * metadata structures that are not being rebuilt and have the same rmapbt
478 * owner to generate @sub. This routine subtracts all the extents
479 * mentioned in sub from all the extents linked in @bitmap, which leaves
480 * @bitmap as the list of blocks that are not accounted for, which we assume
481 * are the dead blocks of the old metadata structure. The blocks mentioned in
482 * @bitmap can be reaped.
483 *
484 * This is the logical equivalent of bitmap &= ~sub.
485 */
486int
487xbitmap32_disunion(
488 struct xbitmap32 *bitmap,
489 struct xbitmap32 *sub)
490{
491 struct xbitmap32_node *bn;
492 int error;
493
494 if (xbitmap32_empty(bitmap) || xbitmap32_empty(sub))
495 return 0;
496
497 for_each_xbitmap32_extent(bn, sub) {
498 error = xbitmap32_clear(bitmap, bn->bn_start,
499 bn->bn_last - bn->bn_start + 1);
500 if (error)
501 return error;
502 }
503
504 return 0;
505}
506
507/* How many bits are set in this bitmap? */
508uint32_t
509xbitmap32_hweight(
510 struct xbitmap32 *bitmap)
511{
512 struct xbitmap32_node *bn;
513 uint32_t ret = 0;
514
515 for_each_xbitmap32_extent(bn, bitmap)
516 ret += bn->bn_last - bn->bn_start + 1;
517
518 return ret;
519}
520
521/* Call a function for every run of set bits in this bitmap. */
522int
523xbitmap32_walk(
524 struct xbitmap32 *bitmap,
525 xbitmap32_walk_fn fn,
526 void *priv)
527{
528 struct xbitmap32_node *bn;
529 int error = 0;
530
531 for_each_xbitmap32_extent(bn, bitmap) {
532 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
533 if (error)
534 break;
535 }
536
537 return error;
538}
539
540/* Does this bitmap have no bits set at all? */
541bool
542xbitmap32_empty(
543 struct xbitmap32 *bitmap)
544{
545 return bitmap->xb_root.rb_root.rb_node == NULL;
546}
547
548/* Is the start of the range set or clear? And for how long? */
549bool
550xbitmap32_test(
551 struct xbitmap32 *bitmap,
552 uint32_t start,
553 uint32_t *len)
554{
555 struct xbitmap32_node *bn;
556 uint32_t last = start + *len - 1;
557
558 bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
559 if (!bn)
560 return false;
561 if (bn->bn_start <= start) {
562 if (bn->bn_last < last)
563 *len = bn->bn_last - start + 1;
564 return true;
565 }
566 *len = bn->bn_start - start;
567 return false;
568}