Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "scrub/bitmap.h"
14
15/*
16 * Set a range of this bitmap. Caller must ensure the range is not set.
17 *
18 * This is the logical equivalent of bitmap |= mask(start, len).
19 */
20int
21xfs_bitmap_set(
22 struct xfs_bitmap *bitmap,
23 uint64_t start,
24 uint64_t len)
25{
26 struct xfs_bitmap_range *bmr;
27
28 bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
29 if (!bmr)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&bmr->list);
33 bmr->start = start;
34 bmr->len = len;
35 list_add_tail(&bmr->list, &bitmap->list);
36
37 return 0;
38}
39
40/* Free everything related to this bitmap. */
41void
42xfs_bitmap_destroy(
43 struct xfs_bitmap *bitmap)
44{
45 struct xfs_bitmap_range *bmr;
46 struct xfs_bitmap_range *n;
47
48 for_each_xfs_bitmap_extent(bmr, n, bitmap) {
49 list_del(&bmr->list);
50 kmem_free(bmr);
51 }
52}
53
54/* Set up a per-AG block bitmap. */
55void
56xfs_bitmap_init(
57 struct xfs_bitmap *bitmap)
58{
59 INIT_LIST_HEAD(&bitmap->list);
60}
61
62/* Compare two btree extents. */
63static int
64xfs_bitmap_range_cmp(
65 void *priv,
66 struct list_head *a,
67 struct list_head *b)
68{
69 struct xfs_bitmap_range *ap;
70 struct xfs_bitmap_range *bp;
71
72 ap = container_of(a, struct xfs_bitmap_range, list);
73 bp = container_of(b, struct xfs_bitmap_range, list);
74
75 if (ap->start > bp->start)
76 return 1;
77 if (ap->start < bp->start)
78 return -1;
79 return 0;
80}
81
82/*
83 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
84 *
85 * The intent is that callers will iterate the rmapbt for all of its records
86 * for a given owner to generate @bitmap; and iterate all the blocks of the
87 * metadata structures that are not being rebuilt and have the same rmapbt
88 * owner to generate @sub. This routine subtracts all the extents
89 * mentioned in sub from all the extents linked in @bitmap, which leaves
90 * @bitmap as the list of blocks that are not accounted for, which we assume
91 * are the dead blocks of the old metadata structure. The blocks mentioned in
92 * @bitmap can be reaped.
93 *
94 * This is the logical equivalent of bitmap &= ~sub.
95 */
96#define LEFT_ALIGNED (1 << 0)
97#define RIGHT_ALIGNED (1 << 1)
98int
99xfs_bitmap_disunion(
100 struct xfs_bitmap *bitmap,
101 struct xfs_bitmap *sub)
102{
103 struct list_head *lp;
104 struct xfs_bitmap_range *br;
105 struct xfs_bitmap_range *new_br;
106 struct xfs_bitmap_range *sub_br;
107 uint64_t sub_start;
108 uint64_t sub_len;
109 int state;
110 int error = 0;
111
112 if (list_empty(&bitmap->list) || list_empty(&sub->list))
113 return 0;
114 ASSERT(!list_empty(&sub->list));
115
116 list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
117 list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
118
119 /*
120 * Now that we've sorted both lists, we iterate bitmap once, rolling
121 * forward through sub and/or bitmap as necessary until we find an
122 * overlap or reach the end of either list. We do not reset lp to the
123 * head of bitmap nor do we reset sub_br to the head of sub. The
124 * list traversal is similar to merge sort, but we're deleting
125 * instead. In this manner we avoid O(n^2) operations.
126 */
127 sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
128 list);
129 lp = bitmap->list.next;
130 while (lp != &bitmap->list) {
131 br = list_entry(lp, struct xfs_bitmap_range, list);
132
133 /*
134 * Advance sub_br and/or br until we find a pair that
135 * intersect or we run out of extents.
136 */
137 while (sub_br->start + sub_br->len <= br->start) {
138 if (list_is_last(&sub_br->list, &sub->list))
139 goto out;
140 sub_br = list_next_entry(sub_br, list);
141 }
142 if (sub_br->start >= br->start + br->len) {
143 lp = lp->next;
144 continue;
145 }
146
147 /* trim sub_br to fit the extent we have */
148 sub_start = sub_br->start;
149 sub_len = sub_br->len;
150 if (sub_br->start < br->start) {
151 sub_len -= br->start - sub_br->start;
152 sub_start = br->start;
153 }
154 if (sub_len > br->len)
155 sub_len = br->len;
156
157 state = 0;
158 if (sub_start == br->start)
159 state |= LEFT_ALIGNED;
160 if (sub_start + sub_len == br->start + br->len)
161 state |= RIGHT_ALIGNED;
162 switch (state) {
163 case LEFT_ALIGNED:
164 /* Coincides with only the left. */
165 br->start += sub_len;
166 br->len -= sub_len;
167 break;
168 case RIGHT_ALIGNED:
169 /* Coincides with only the right. */
170 br->len -= sub_len;
171 lp = lp->next;
172 break;
173 case LEFT_ALIGNED | RIGHT_ALIGNED:
174 /* Total overlap, just delete ex. */
175 lp = lp->next;
176 list_del(&br->list);
177 kmem_free(br);
178 break;
179 case 0:
180 /*
181 * Deleting from the middle: add the new right extent
182 * and then shrink the left extent.
183 */
184 new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
185 KM_MAYFAIL);
186 if (!new_br) {
187 error = -ENOMEM;
188 goto out;
189 }
190 INIT_LIST_HEAD(&new_br->list);
191 new_br->start = sub_start + sub_len;
192 new_br->len = br->start + br->len - new_br->start;
193 list_add(&new_br->list, &br->list);
194 br->len = sub_start - br->start;
195 lp = lp->next;
196 break;
197 default:
198 ASSERT(0);
199 break;
200 }
201 }
202
203out:
204 return error;
205}
206#undef LEFT_ALIGNED
207#undef RIGHT_ALIGNED
208
209/*
210 * Record all btree blocks seen while iterating all records of a btree.
211 *
212 * We know that the btree query_all function starts at the left edge and walks
213 * towards the right edge of the tree. Therefore, we know that we can walk up
214 * the btree cursor towards the root; if the pointer for a given level points
215 * to the first record/key in that block, we haven't seen this block before;
216 * and therefore we need to remember that we saw this block in the btree.
217 *
218 * So if our btree is:
219 *
220 * 4
221 * / | \
222 * 1 2 3
223 *
224 * Pretend for this example that each leaf block has 100 btree records. For
225 * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
226 * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
227 * block 4. The list is [1, 4].
228 *
229 * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
230 * loop. The list remains [1, 4].
231 *
232 * For the 101st btree record, we've moved onto leaf block 2. Now
233 * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
234 * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
235 *
236 * For the 102nd record, bc_ptrs[0] == 2, so we continue.
237 *
238 * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
239 * we add 3 to the list. Now it is [1, 4, 2, 3].
240 *
241 * For the 300th record we just exit, with the list being [1, 4, 2, 3].
242 */
243
244/*
245 * Record all the buffers pointed to by the btree cursor. Callers already
246 * engaged in a btree walk should call this function to capture the list of
247 * blocks going from the leaf towards the root.
248 */
249int
250xfs_bitmap_set_btcur_path(
251 struct xfs_bitmap *bitmap,
252 struct xfs_btree_cur *cur)
253{
254 struct xfs_buf *bp;
255 xfs_fsblock_t fsb;
256 int i;
257 int error;
258
259 for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
260 xfs_btree_get_block(cur, i, &bp);
261 if (!bp)
262 continue;
263 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
264 error = xfs_bitmap_set(bitmap, fsb, 1);
265 if (error)
266 return error;
267 }
268
269 return 0;
270}
271
272/* Collect a btree's block in the bitmap. */
273STATIC int
274xfs_bitmap_collect_btblock(
275 struct xfs_btree_cur *cur,
276 int level,
277 void *priv)
278{
279 struct xfs_bitmap *bitmap = priv;
280 struct xfs_buf *bp;
281 xfs_fsblock_t fsbno;
282
283 xfs_btree_get_block(cur, level, &bp);
284 if (!bp)
285 return 0;
286
287 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
288 return xfs_bitmap_set(bitmap, fsbno, 1);
289}
290
291/* Walk the btree and mark the bitmap wherever a btree block is found. */
292int
293xfs_bitmap_set_btblocks(
294 struct xfs_bitmap *bitmap,
295 struct xfs_btree_cur *cur)
296{
297 return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
298}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_bit.h"
10#include "xfs_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_btree.h"
14#include "scrub/scrub.h"
15#include "scrub/bitmap.h"
16
17#include <linux/interval_tree_generic.h>
18
19/* u64 bitmap */
20
21struct xbitmap64_node {
22 struct rb_node bn_rbnode;
23
24 /* First set bit of this interval and subtree. */
25 uint64_t bn_start;
26
27 /* Last set bit of this interval. */
28 uint64_t bn_last;
29
30 /* Last set bit of this subtree. Do not touch this. */
31 uint64_t __bn_subtree_last;
32};
33
34/* Define our own interval tree type with uint64_t parameters. */
35
36#define START(node) ((node)->bn_start)
37#define LAST(node) ((node)->bn_last)
38
39/*
40 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
41 * forward-declare them anyway for clarity.
42 */
43static inline __maybe_unused void
44xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
45
46static inline __maybe_unused void
47xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
48
49static inline __maybe_unused struct xbitmap64_node *
50xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
51 uint64_t last);
52
53static inline __maybe_unused struct xbitmap64_node *
54xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
55 uint64_t last);
56
57INTERVAL_TREE_DEFINE(struct xbitmap64_node, bn_rbnode, uint64_t,
58 __bn_subtree_last, START, LAST, static inline __maybe_unused,
59 xbitmap64_tree)
60
61/* Iterate each interval of a bitmap. Do not change the bitmap. */
62#define for_each_xbitmap64_extent(bn, bitmap) \
63 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
64 struct xbitmap64_node, bn_rbnode); \
65 (bn) != NULL; \
66 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
67 struct xbitmap64_node, bn_rbnode))
68
69/* Clear a range of this bitmap. */
70int
71xbitmap64_clear(
72 struct xbitmap64 *bitmap,
73 uint64_t start,
74 uint64_t len)
75{
76 struct xbitmap64_node *bn;
77 struct xbitmap64_node *new_bn;
78 uint64_t last = start + len - 1;
79
80 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last))) {
81 if (bn->bn_start < start && bn->bn_last > last) {
82 uint64_t old_last = bn->bn_last;
83
84 /* overlaps with the entire clearing range */
85 xbitmap64_tree_remove(bn, &bitmap->xb_root);
86 bn->bn_last = start - 1;
87 xbitmap64_tree_insert(bn, &bitmap->xb_root);
88
89 /* add an extent */
90 new_bn = kmalloc(sizeof(struct xbitmap64_node),
91 XCHK_GFP_FLAGS);
92 if (!new_bn)
93 return -ENOMEM;
94 new_bn->bn_start = last + 1;
95 new_bn->bn_last = old_last;
96 xbitmap64_tree_insert(new_bn, &bitmap->xb_root);
97 } else if (bn->bn_start < start) {
98 /* overlaps with the left side of the clearing range */
99 xbitmap64_tree_remove(bn, &bitmap->xb_root);
100 bn->bn_last = start - 1;
101 xbitmap64_tree_insert(bn, &bitmap->xb_root);
102 } else if (bn->bn_last > last) {
103 /* overlaps with the right side of the clearing range */
104 xbitmap64_tree_remove(bn, &bitmap->xb_root);
105 bn->bn_start = last + 1;
106 xbitmap64_tree_insert(bn, &bitmap->xb_root);
107 break;
108 } else {
109 /* in the middle of the clearing range */
110 xbitmap64_tree_remove(bn, &bitmap->xb_root);
111 kfree(bn);
112 }
113 }
114
115 return 0;
116}
117
118/* Set a range of this bitmap. */
119int
120xbitmap64_set(
121 struct xbitmap64 *bitmap,
122 uint64_t start,
123 uint64_t len)
124{
125 struct xbitmap64_node *left;
126 struct xbitmap64_node *right;
127 uint64_t last = start + len - 1;
128 int error;
129
130 /* Is this whole range already set? */
131 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
132 if (left && left->bn_start <= start && left->bn_last >= last)
133 return 0;
134
135 /* Clear out everything in the range we want to set. */
136 error = xbitmap64_clear(bitmap, start, len);
137 if (error)
138 return error;
139
140 /* Do we have a left-adjacent extent? */
141 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
142 ASSERT(!left || left->bn_last + 1 == start);
143
144 /* Do we have a right-adjacent extent? */
145 right = xbitmap64_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
146 ASSERT(!right || right->bn_start == last + 1);
147
148 if (left && right) {
149 /* combine left and right adjacent extent */
150 xbitmap64_tree_remove(left, &bitmap->xb_root);
151 xbitmap64_tree_remove(right, &bitmap->xb_root);
152 left->bn_last = right->bn_last;
153 xbitmap64_tree_insert(left, &bitmap->xb_root);
154 kfree(right);
155 } else if (left) {
156 /* combine with left extent */
157 xbitmap64_tree_remove(left, &bitmap->xb_root);
158 left->bn_last = last;
159 xbitmap64_tree_insert(left, &bitmap->xb_root);
160 } else if (right) {
161 /* combine with right extent */
162 xbitmap64_tree_remove(right, &bitmap->xb_root);
163 right->bn_start = start;
164 xbitmap64_tree_insert(right, &bitmap->xb_root);
165 } else {
166 /* add an extent */
167 left = kmalloc(sizeof(struct xbitmap64_node), XCHK_GFP_FLAGS);
168 if (!left)
169 return -ENOMEM;
170 left->bn_start = start;
171 left->bn_last = last;
172 xbitmap64_tree_insert(left, &bitmap->xb_root);
173 }
174
175 return 0;
176}
177
178/* Free everything related to this bitmap. */
179void
180xbitmap64_destroy(
181 struct xbitmap64 *bitmap)
182{
183 struct xbitmap64_node *bn;
184
185 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
186 xbitmap64_tree_remove(bn, &bitmap->xb_root);
187 kfree(bn);
188 }
189}
190
191/* Set up a per-AG block bitmap. */
192void
193xbitmap64_init(
194 struct xbitmap64 *bitmap)
195{
196 bitmap->xb_root = RB_ROOT_CACHED;
197}
198
199/*
200 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
201 *
202 * The intent is that callers will iterate the rmapbt for all of its records
203 * for a given owner to generate @bitmap; and iterate all the blocks of the
204 * metadata structures that are not being rebuilt and have the same rmapbt
205 * owner to generate @sub. This routine subtracts all the extents
206 * mentioned in sub from all the extents linked in @bitmap, which leaves
207 * @bitmap as the list of blocks that are not accounted for, which we assume
208 * are the dead blocks of the old metadata structure. The blocks mentioned in
209 * @bitmap can be reaped.
210 *
211 * This is the logical equivalent of bitmap &= ~sub.
212 */
213int
214xbitmap64_disunion(
215 struct xbitmap64 *bitmap,
216 struct xbitmap64 *sub)
217{
218 struct xbitmap64_node *bn;
219 int error;
220
221 if (xbitmap64_empty(bitmap) || xbitmap64_empty(sub))
222 return 0;
223
224 for_each_xbitmap64_extent(bn, sub) {
225 error = xbitmap64_clear(bitmap, bn->bn_start,
226 bn->bn_last - bn->bn_start + 1);
227 if (error)
228 return error;
229 }
230
231 return 0;
232}
233
234/* How many bits are set in this bitmap? */
235uint64_t
236xbitmap64_hweight(
237 struct xbitmap64 *bitmap)
238{
239 struct xbitmap64_node *bn;
240 uint64_t ret = 0;
241
242 for_each_xbitmap64_extent(bn, bitmap)
243 ret += bn->bn_last - bn->bn_start + 1;
244
245 return ret;
246}
247
248/* Call a function for every run of set bits in this bitmap. */
249int
250xbitmap64_walk(
251 struct xbitmap64 *bitmap,
252 xbitmap64_walk_fn fn,
253 void *priv)
254{
255 struct xbitmap64_node *bn;
256 int error = 0;
257
258 for_each_xbitmap64_extent(bn, bitmap) {
259 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
260 if (error)
261 break;
262 }
263
264 return error;
265}
266
267/* Does this bitmap have no bits set at all? */
268bool
269xbitmap64_empty(
270 struct xbitmap64 *bitmap)
271{
272 return bitmap->xb_root.rb_root.rb_node == NULL;
273}
274
275/* Is the start of the range set or clear? And for how long? */
276bool
277xbitmap64_test(
278 struct xbitmap64 *bitmap,
279 uint64_t start,
280 uint64_t *len)
281{
282 struct xbitmap64_node *bn;
283 uint64_t last = start + *len - 1;
284
285 bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
286 if (!bn)
287 return false;
288 if (bn->bn_start <= start) {
289 if (bn->bn_last < last)
290 *len = bn->bn_last - start + 1;
291 return true;
292 }
293 *len = bn->bn_start - start;
294 return false;
295}
296
297/* u32 bitmap */
298
299struct xbitmap32_node {
300 struct rb_node bn_rbnode;
301
302 /* First set bit of this interval and subtree. */
303 uint32_t bn_start;
304
305 /* Last set bit of this interval. */
306 uint32_t bn_last;
307
308 /* Last set bit of this subtree. Do not touch this. */
309 uint32_t __bn_subtree_last;
310};
311
312/* Define our own interval tree type with uint32_t parameters. */
313
314/*
315 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
316 * forward-declare them anyway for clarity.
317 */
318static inline __maybe_unused void
319xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
320
321static inline __maybe_unused void
322xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
323
324static inline __maybe_unused struct xbitmap32_node *
325xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
326 uint32_t last);
327
328static inline __maybe_unused struct xbitmap32_node *
329xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
330 uint32_t last);
331
332INTERVAL_TREE_DEFINE(struct xbitmap32_node, bn_rbnode, uint32_t,
333 __bn_subtree_last, START, LAST, static inline __maybe_unused,
334 xbitmap32_tree)
335
336/* Iterate each interval of a bitmap. Do not change the bitmap. */
337#define for_each_xbitmap32_extent(bn, bitmap) \
338 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
339 struct xbitmap32_node, bn_rbnode); \
340 (bn) != NULL; \
341 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
342 struct xbitmap32_node, bn_rbnode))
343
344/* Clear a range of this bitmap. */
345int
346xbitmap32_clear(
347 struct xbitmap32 *bitmap,
348 uint32_t start,
349 uint32_t len)
350{
351 struct xbitmap32_node *bn;
352 struct xbitmap32_node *new_bn;
353 uint32_t last = start + len - 1;
354
355 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last))) {
356 if (bn->bn_start < start && bn->bn_last > last) {
357 uint32_t old_last = bn->bn_last;
358
359 /* overlaps with the entire clearing range */
360 xbitmap32_tree_remove(bn, &bitmap->xb_root);
361 bn->bn_last = start - 1;
362 xbitmap32_tree_insert(bn, &bitmap->xb_root);
363
364 /* add an extent */
365 new_bn = kmalloc(sizeof(struct xbitmap32_node),
366 XCHK_GFP_FLAGS);
367 if (!new_bn)
368 return -ENOMEM;
369 new_bn->bn_start = last + 1;
370 new_bn->bn_last = old_last;
371 xbitmap32_tree_insert(new_bn, &bitmap->xb_root);
372 } else if (bn->bn_start < start) {
373 /* overlaps with the left side of the clearing range */
374 xbitmap32_tree_remove(bn, &bitmap->xb_root);
375 bn->bn_last = start - 1;
376 xbitmap32_tree_insert(bn, &bitmap->xb_root);
377 } else if (bn->bn_last > last) {
378 /* overlaps with the right side of the clearing range */
379 xbitmap32_tree_remove(bn, &bitmap->xb_root);
380 bn->bn_start = last + 1;
381 xbitmap32_tree_insert(bn, &bitmap->xb_root);
382 break;
383 } else {
384 /* in the middle of the clearing range */
385 xbitmap32_tree_remove(bn, &bitmap->xb_root);
386 kfree(bn);
387 }
388 }
389
390 return 0;
391}
392
393/* Set a range of this bitmap. */
394int
395xbitmap32_set(
396 struct xbitmap32 *bitmap,
397 uint32_t start,
398 uint32_t len)
399{
400 struct xbitmap32_node *left;
401 struct xbitmap32_node *right;
402 uint32_t last = start + len - 1;
403 int error;
404
405 /* Is this whole range already set? */
406 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
407 if (left && left->bn_start <= start && left->bn_last >= last)
408 return 0;
409
410 /* Clear out everything in the range we want to set. */
411 error = xbitmap32_clear(bitmap, start, len);
412 if (error)
413 return error;
414
415 /* Do we have a left-adjacent extent? */
416 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
417 ASSERT(!left || left->bn_last + 1 == start);
418
419 /* Do we have a right-adjacent extent? */
420 right = xbitmap32_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
421 ASSERT(!right || right->bn_start == last + 1);
422
423 if (left && right) {
424 /* combine left and right adjacent extent */
425 xbitmap32_tree_remove(left, &bitmap->xb_root);
426 xbitmap32_tree_remove(right, &bitmap->xb_root);
427 left->bn_last = right->bn_last;
428 xbitmap32_tree_insert(left, &bitmap->xb_root);
429 kfree(right);
430 } else if (left) {
431 /* combine with left extent */
432 xbitmap32_tree_remove(left, &bitmap->xb_root);
433 left->bn_last = last;
434 xbitmap32_tree_insert(left, &bitmap->xb_root);
435 } else if (right) {
436 /* combine with right extent */
437 xbitmap32_tree_remove(right, &bitmap->xb_root);
438 right->bn_start = start;
439 xbitmap32_tree_insert(right, &bitmap->xb_root);
440 } else {
441 /* add an extent */
442 left = kmalloc(sizeof(struct xbitmap32_node), XCHK_GFP_FLAGS);
443 if (!left)
444 return -ENOMEM;
445 left->bn_start = start;
446 left->bn_last = last;
447 xbitmap32_tree_insert(left, &bitmap->xb_root);
448 }
449
450 return 0;
451}
452
453/* Free everything related to this bitmap. */
454void
455xbitmap32_destroy(
456 struct xbitmap32 *bitmap)
457{
458 struct xbitmap32_node *bn;
459
460 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, 0, -1U))) {
461 xbitmap32_tree_remove(bn, &bitmap->xb_root);
462 kfree(bn);
463 }
464}
465
466/* Set up a per-AG block bitmap. */
467void
468xbitmap32_init(
469 struct xbitmap32 *bitmap)
470{
471 bitmap->xb_root = RB_ROOT_CACHED;
472}
473
474/*
475 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
476 *
477 * The intent is that callers will iterate the rmapbt for all of its records
478 * for a given owner to generate @bitmap; and iterate all the blocks of the
479 * metadata structures that are not being rebuilt and have the same rmapbt
480 * owner to generate @sub. This routine subtracts all the extents
481 * mentioned in sub from all the extents linked in @bitmap, which leaves
482 * @bitmap as the list of blocks that are not accounted for, which we assume
483 * are the dead blocks of the old metadata structure. The blocks mentioned in
484 * @bitmap can be reaped.
485 *
486 * This is the logical equivalent of bitmap &= ~sub.
487 */
488int
489xbitmap32_disunion(
490 struct xbitmap32 *bitmap,
491 struct xbitmap32 *sub)
492{
493 struct xbitmap32_node *bn;
494 int error;
495
496 if (xbitmap32_empty(bitmap) || xbitmap32_empty(sub))
497 return 0;
498
499 for_each_xbitmap32_extent(bn, sub) {
500 error = xbitmap32_clear(bitmap, bn->bn_start,
501 bn->bn_last - bn->bn_start + 1);
502 if (error)
503 return error;
504 }
505
506 return 0;
507}
508
509/* How many bits are set in this bitmap? */
510uint32_t
511xbitmap32_hweight(
512 struct xbitmap32 *bitmap)
513{
514 struct xbitmap32_node *bn;
515 uint32_t ret = 0;
516
517 for_each_xbitmap32_extent(bn, bitmap)
518 ret += bn->bn_last - bn->bn_start + 1;
519
520 return ret;
521}
522
523/* Call a function for every run of set bits in this bitmap. */
524int
525xbitmap32_walk(
526 struct xbitmap32 *bitmap,
527 xbitmap32_walk_fn fn,
528 void *priv)
529{
530 struct xbitmap32_node *bn;
531 int error = 0;
532
533 for_each_xbitmap32_extent(bn, bitmap) {
534 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
535 if (error)
536 break;
537 }
538
539 return error;
540}
541
542/* Does this bitmap have no bits set at all? */
543bool
544xbitmap32_empty(
545 struct xbitmap32 *bitmap)
546{
547 return bitmap->xb_root.rb_root.rb_node == NULL;
548}
549
550/* Is the start of the range set or clear? And for how long? */
551bool
552xbitmap32_test(
553 struct xbitmap32 *bitmap,
554 uint32_t start,
555 uint32_t *len)
556{
557 struct xbitmap32_node *bn;
558 uint32_t last = start + *len - 1;
559
560 bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
561 if (!bn)
562 return false;
563 if (bn->bn_start <= start) {
564 if (bn->bn_last < last)
565 *len = bn->bn_last - start + 1;
566 return true;
567 }
568 *len = bn->bn_start - start;
569 return false;
570}
571
572/* Count the number of set regions in this bitmap. */
573uint32_t
574xbitmap32_count_set_regions(
575 struct xbitmap32 *bitmap)
576{
577 struct xbitmap32_node *bn;
578 uint32_t nr = 0;
579
580 for_each_xbitmap32_extent(bn, bitmap)
581 nr++;
582
583 return nr;
584}