Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "scrub/bitmap.h"
14
15/*
16 * Set a range of this bitmap. Caller must ensure the range is not set.
17 *
18 * This is the logical equivalent of bitmap |= mask(start, len).
19 */
20int
21xbitmap_set(
22 struct xbitmap *bitmap,
23 uint64_t start,
24 uint64_t len)
25{
26 struct xbitmap_range *bmr;
27
28 bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
29 if (!bmr)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&bmr->list);
33 bmr->start = start;
34 bmr->len = len;
35 list_add_tail(&bmr->list, &bitmap->list);
36
37 return 0;
38}
39
40/* Free everything related to this bitmap. */
41void
42xbitmap_destroy(
43 struct xbitmap *bitmap)
44{
45 struct xbitmap_range *bmr;
46 struct xbitmap_range *n;
47
48 for_each_xbitmap_extent(bmr, n, bitmap) {
49 list_del(&bmr->list);
50 kmem_free(bmr);
51 }
52}
53
54/* Set up a per-AG block bitmap. */
55void
56xbitmap_init(
57 struct xbitmap *bitmap)
58{
59 INIT_LIST_HEAD(&bitmap->list);
60}
61
62/* Compare two btree extents. */
63static int
64xbitmap_range_cmp(
65 void *priv,
66 const struct list_head *a,
67 const struct list_head *b)
68{
69 struct xbitmap_range *ap;
70 struct xbitmap_range *bp;
71
72 ap = container_of(a, struct xbitmap_range, list);
73 bp = container_of(b, struct xbitmap_range, list);
74
75 if (ap->start > bp->start)
76 return 1;
77 if (ap->start < bp->start)
78 return -1;
79 return 0;
80}
81
82/*
83 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
84 *
85 * The intent is that callers will iterate the rmapbt for all of its records
86 * for a given owner to generate @bitmap; and iterate all the blocks of the
87 * metadata structures that are not being rebuilt and have the same rmapbt
88 * owner to generate @sub. This routine subtracts all the extents
89 * mentioned in sub from all the extents linked in @bitmap, which leaves
90 * @bitmap as the list of blocks that are not accounted for, which we assume
91 * are the dead blocks of the old metadata structure. The blocks mentioned in
92 * @bitmap can be reaped.
93 *
94 * This is the logical equivalent of bitmap &= ~sub.
95 */
96#define LEFT_ALIGNED (1 << 0)
97#define RIGHT_ALIGNED (1 << 1)
98int
99xbitmap_disunion(
100 struct xbitmap *bitmap,
101 struct xbitmap *sub)
102{
103 struct list_head *lp;
104 struct xbitmap_range *br;
105 struct xbitmap_range *new_br;
106 struct xbitmap_range *sub_br;
107 uint64_t sub_start;
108 uint64_t sub_len;
109 int state;
110 int error = 0;
111
112 if (list_empty(&bitmap->list) || list_empty(&sub->list))
113 return 0;
114 ASSERT(!list_empty(&sub->list));
115
116 list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
117 list_sort(NULL, &sub->list, xbitmap_range_cmp);
118
119 /*
120 * Now that we've sorted both lists, we iterate bitmap once, rolling
121 * forward through sub and/or bitmap as necessary until we find an
122 * overlap or reach the end of either list. We do not reset lp to the
123 * head of bitmap nor do we reset sub_br to the head of sub. The
124 * list traversal is similar to merge sort, but we're deleting
125 * instead. In this manner we avoid O(n^2) operations.
126 */
127 sub_br = list_first_entry(&sub->list, struct xbitmap_range,
128 list);
129 lp = bitmap->list.next;
130 while (lp != &bitmap->list) {
131 br = list_entry(lp, struct xbitmap_range, list);
132
133 /*
134 * Advance sub_br and/or br until we find a pair that
135 * intersect or we run out of extents.
136 */
137 while (sub_br->start + sub_br->len <= br->start) {
138 if (list_is_last(&sub_br->list, &sub->list))
139 goto out;
140 sub_br = list_next_entry(sub_br, list);
141 }
142 if (sub_br->start >= br->start + br->len) {
143 lp = lp->next;
144 continue;
145 }
146
147 /* trim sub_br to fit the extent we have */
148 sub_start = sub_br->start;
149 sub_len = sub_br->len;
150 if (sub_br->start < br->start) {
151 sub_len -= br->start - sub_br->start;
152 sub_start = br->start;
153 }
154 if (sub_len > br->len)
155 sub_len = br->len;
156
157 state = 0;
158 if (sub_start == br->start)
159 state |= LEFT_ALIGNED;
160 if (sub_start + sub_len == br->start + br->len)
161 state |= RIGHT_ALIGNED;
162 switch (state) {
163 case LEFT_ALIGNED:
164 /* Coincides with only the left. */
165 br->start += sub_len;
166 br->len -= sub_len;
167 break;
168 case RIGHT_ALIGNED:
169 /* Coincides with only the right. */
170 br->len -= sub_len;
171 lp = lp->next;
172 break;
173 case LEFT_ALIGNED | RIGHT_ALIGNED:
174 /* Total overlap, just delete ex. */
175 lp = lp->next;
176 list_del(&br->list);
177 kmem_free(br);
178 break;
179 case 0:
180 /*
181 * Deleting from the middle: add the new right extent
182 * and then shrink the left extent.
183 */
184 new_br = kmem_alloc(sizeof(struct xbitmap_range),
185 KM_MAYFAIL);
186 if (!new_br) {
187 error = -ENOMEM;
188 goto out;
189 }
190 INIT_LIST_HEAD(&new_br->list);
191 new_br->start = sub_start + sub_len;
192 new_br->len = br->start + br->len - new_br->start;
193 list_add(&new_br->list, &br->list);
194 br->len = sub_start - br->start;
195 lp = lp->next;
196 break;
197 default:
198 ASSERT(0);
199 break;
200 }
201 }
202
203out:
204 return error;
205}
206#undef LEFT_ALIGNED
207#undef RIGHT_ALIGNED
208
209/*
210 * Record all btree blocks seen while iterating all records of a btree.
211 *
212 * We know that the btree query_all function starts at the left edge and walks
213 * towards the right edge of the tree. Therefore, we know that we can walk up
214 * the btree cursor towards the root; if the pointer for a given level points
215 * to the first record/key in that block, we haven't seen this block before;
216 * and therefore we need to remember that we saw this block in the btree.
217 *
218 * So if our btree is:
219 *
220 * 4
221 * / | \
222 * 1 2 3
223 *
224 * Pretend for this example that each leaf block has 100 btree records. For
225 * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
226 * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
227 * block 4. The list is [1, 4].
228 *
229 * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
230 * loop. The list remains [1, 4].
231 *
232 * For the 101st btree record, we've moved onto leaf block 2. Now
233 * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
234 * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
235 *
236 * For the 102nd record, bc_ptrs[0] == 2, so we continue.
237 *
238 * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
239 * we add 3 to the list. Now it is [1, 4, 2, 3].
240 *
241 * For the 300th record we just exit, with the list being [1, 4, 2, 3].
242 */
243
244/*
245 * Record all the buffers pointed to by the btree cursor. Callers already
246 * engaged in a btree walk should call this function to capture the list of
247 * blocks going from the leaf towards the root.
248 */
249int
250xbitmap_set_btcur_path(
251 struct xbitmap *bitmap,
252 struct xfs_btree_cur *cur)
253{
254 struct xfs_buf *bp;
255 xfs_fsblock_t fsb;
256 int i;
257 int error;
258
259 for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
260 xfs_btree_get_block(cur, i, &bp);
261 if (!bp)
262 continue;
263 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
264 error = xbitmap_set(bitmap, fsb, 1);
265 if (error)
266 return error;
267 }
268
269 return 0;
270}
271
272/* Collect a btree's block in the bitmap. */
273STATIC int
274xbitmap_collect_btblock(
275 struct xfs_btree_cur *cur,
276 int level,
277 void *priv)
278{
279 struct xbitmap *bitmap = priv;
280 struct xfs_buf *bp;
281 xfs_fsblock_t fsbno;
282
283 xfs_btree_get_block(cur, level, &bp);
284 if (!bp)
285 return 0;
286
287 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
288 return xbitmap_set(bitmap, fsbno, 1);
289}
290
291/* Walk the btree and mark the bitmap wherever a btree block is found. */
292int
293xbitmap_set_btblocks(
294 struct xbitmap *bitmap,
295 struct xfs_btree_cur *cur)
296{
297 return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
298 XFS_BTREE_VISIT_ALL, bitmap);
299}
300
301/* How many bits are set in this bitmap? */
302uint64_t
303xbitmap_hweight(
304 struct xbitmap *bitmap)
305{
306 struct xbitmap_range *bmr;
307 struct xbitmap_range *n;
308 uint64_t ret = 0;
309
310 for_each_xbitmap_extent(bmr, n, bitmap)
311 ret += bmr->len;
312
313 return ret;
314}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "scrub/scrub.h"
14#include "scrub/bitmap.h"
15
16/*
17 * Set a range of this bitmap. Caller must ensure the range is not set.
18 *
19 * This is the logical equivalent of bitmap |= mask(start, len).
20 */
21int
22xbitmap_set(
23 struct xbitmap *bitmap,
24 uint64_t start,
25 uint64_t len)
26{
27 struct xbitmap_range *bmr;
28
29 bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS);
30 if (!bmr)
31 return -ENOMEM;
32
33 INIT_LIST_HEAD(&bmr->list);
34 bmr->start = start;
35 bmr->len = len;
36 list_add_tail(&bmr->list, &bitmap->list);
37
38 return 0;
39}
40
41/* Free everything related to this bitmap. */
42void
43xbitmap_destroy(
44 struct xbitmap *bitmap)
45{
46 struct xbitmap_range *bmr;
47 struct xbitmap_range *n;
48
49 for_each_xbitmap_extent(bmr, n, bitmap) {
50 list_del(&bmr->list);
51 kfree(bmr);
52 }
53}
54
55/* Set up a per-AG block bitmap. */
56void
57xbitmap_init(
58 struct xbitmap *bitmap)
59{
60 INIT_LIST_HEAD(&bitmap->list);
61}
62
63/* Compare two btree extents. */
64static int
65xbitmap_range_cmp(
66 void *priv,
67 const struct list_head *a,
68 const struct list_head *b)
69{
70 struct xbitmap_range *ap;
71 struct xbitmap_range *bp;
72
73 ap = container_of(a, struct xbitmap_range, list);
74 bp = container_of(b, struct xbitmap_range, list);
75
76 if (ap->start > bp->start)
77 return 1;
78 if (ap->start < bp->start)
79 return -1;
80 return 0;
81}
82
83/*
84 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
85 *
86 * The intent is that callers will iterate the rmapbt for all of its records
87 * for a given owner to generate @bitmap; and iterate all the blocks of the
88 * metadata structures that are not being rebuilt and have the same rmapbt
89 * owner to generate @sub. This routine subtracts all the extents
90 * mentioned in sub from all the extents linked in @bitmap, which leaves
91 * @bitmap as the list of blocks that are not accounted for, which we assume
92 * are the dead blocks of the old metadata structure. The blocks mentioned in
93 * @bitmap can be reaped.
94 *
95 * This is the logical equivalent of bitmap &= ~sub.
96 */
97#define LEFT_ALIGNED (1 << 0)
98#define RIGHT_ALIGNED (1 << 1)
99int
100xbitmap_disunion(
101 struct xbitmap *bitmap,
102 struct xbitmap *sub)
103{
104 struct list_head *lp;
105 struct xbitmap_range *br;
106 struct xbitmap_range *new_br;
107 struct xbitmap_range *sub_br;
108 uint64_t sub_start;
109 uint64_t sub_len;
110 int state;
111 int error = 0;
112
113 if (list_empty(&bitmap->list) || list_empty(&sub->list))
114 return 0;
115 ASSERT(!list_empty(&sub->list));
116
117 list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
118 list_sort(NULL, &sub->list, xbitmap_range_cmp);
119
120 /*
121 * Now that we've sorted both lists, we iterate bitmap once, rolling
122 * forward through sub and/or bitmap as necessary until we find an
123 * overlap or reach the end of either list. We do not reset lp to the
124 * head of bitmap nor do we reset sub_br to the head of sub. The
125 * list traversal is similar to merge sort, but we're deleting
126 * instead. In this manner we avoid O(n^2) operations.
127 */
128 sub_br = list_first_entry(&sub->list, struct xbitmap_range,
129 list);
130 lp = bitmap->list.next;
131 while (lp != &bitmap->list) {
132 br = list_entry(lp, struct xbitmap_range, list);
133
134 /*
135 * Advance sub_br and/or br until we find a pair that
136 * intersect or we run out of extents.
137 */
138 while (sub_br->start + sub_br->len <= br->start) {
139 if (list_is_last(&sub_br->list, &sub->list))
140 goto out;
141 sub_br = list_next_entry(sub_br, list);
142 }
143 if (sub_br->start >= br->start + br->len) {
144 lp = lp->next;
145 continue;
146 }
147
148 /* trim sub_br to fit the extent we have */
149 sub_start = sub_br->start;
150 sub_len = sub_br->len;
151 if (sub_br->start < br->start) {
152 sub_len -= br->start - sub_br->start;
153 sub_start = br->start;
154 }
155 if (sub_len > br->len)
156 sub_len = br->len;
157
158 state = 0;
159 if (sub_start == br->start)
160 state |= LEFT_ALIGNED;
161 if (sub_start + sub_len == br->start + br->len)
162 state |= RIGHT_ALIGNED;
163 switch (state) {
164 case LEFT_ALIGNED:
165 /* Coincides with only the left. */
166 br->start += sub_len;
167 br->len -= sub_len;
168 break;
169 case RIGHT_ALIGNED:
170 /* Coincides with only the right. */
171 br->len -= sub_len;
172 lp = lp->next;
173 break;
174 case LEFT_ALIGNED | RIGHT_ALIGNED:
175 /* Total overlap, just delete ex. */
176 lp = lp->next;
177 list_del(&br->list);
178 kfree(br);
179 break;
180 case 0:
181 /*
182 * Deleting from the middle: add the new right extent
183 * and then shrink the left extent.
184 */
185 new_br = kmalloc(sizeof(struct xbitmap_range),
186 XCHK_GFP_FLAGS);
187 if (!new_br) {
188 error = -ENOMEM;
189 goto out;
190 }
191 INIT_LIST_HEAD(&new_br->list);
192 new_br->start = sub_start + sub_len;
193 new_br->len = br->start + br->len - new_br->start;
194 list_add(&new_br->list, &br->list);
195 br->len = sub_start - br->start;
196 lp = lp->next;
197 break;
198 default:
199 ASSERT(0);
200 break;
201 }
202 }
203
204out:
205 return error;
206}
207#undef LEFT_ALIGNED
208#undef RIGHT_ALIGNED
209
210/*
211 * Record all btree blocks seen while iterating all records of a btree.
212 *
213 * We know that the btree query_all function starts at the left edge and walks
214 * towards the right edge of the tree. Therefore, we know that we can walk up
215 * the btree cursor towards the root; if the pointer for a given level points
216 * to the first record/key in that block, we haven't seen this block before;
217 * and therefore we need to remember that we saw this block in the btree.
218 *
219 * So if our btree is:
220 *
221 * 4
222 * / | \
223 * 1 2 3
224 *
225 * Pretend for this example that each leaf block has 100 btree records. For
226 * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we
227 * record that we saw block 1. Then we observe that bc_levels[1].ptr == 1, so
228 * we record block 4. The list is [1, 4].
229 *
230 * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit
231 * the loop. The list remains [1, 4].
232 *
233 * For the 101st btree record, we've moved onto leaf block 2. Now
234 * bc_levels[0].ptr == 1 again, so we record that we saw block 2. We see that
235 * bc_levels[1].ptr == 2, so we exit the loop. The list is now [1, 4, 2].
236 *
237 * For the 102nd record, bc_levels[0].ptr == 2, so we continue.
238 *
239 * For the 201st record, we've moved on to leaf block 3.
240 * bc_levels[0].ptr == 1, so we add 3 to the list. Now it is [1, 4, 2, 3].
241 *
242 * For the 300th record we just exit, with the list being [1, 4, 2, 3].
243 */
244
245/*
246 * Record all the buffers pointed to by the btree cursor. Callers already
247 * engaged in a btree walk should call this function to capture the list of
248 * blocks going from the leaf towards the root.
249 */
250int
251xbitmap_set_btcur_path(
252 struct xbitmap *bitmap,
253 struct xfs_btree_cur *cur)
254{
255 struct xfs_buf *bp;
256 xfs_fsblock_t fsb;
257 int i;
258 int error;
259
260 for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) {
261 xfs_btree_get_block(cur, i, &bp);
262 if (!bp)
263 continue;
264 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
265 error = xbitmap_set(bitmap, fsb, 1);
266 if (error)
267 return error;
268 }
269
270 return 0;
271}
272
273/* Collect a btree's block in the bitmap. */
274STATIC int
275xbitmap_collect_btblock(
276 struct xfs_btree_cur *cur,
277 int level,
278 void *priv)
279{
280 struct xbitmap *bitmap = priv;
281 struct xfs_buf *bp;
282 xfs_fsblock_t fsbno;
283
284 xfs_btree_get_block(cur, level, &bp);
285 if (!bp)
286 return 0;
287
288 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
289 return xbitmap_set(bitmap, fsbno, 1);
290}
291
292/* Walk the btree and mark the bitmap wherever a btree block is found. */
293int
294xbitmap_set_btblocks(
295 struct xbitmap *bitmap,
296 struct xfs_btree_cur *cur)
297{
298 return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
299 XFS_BTREE_VISIT_ALL, bitmap);
300}
301
302/* How many bits are set in this bitmap? */
303uint64_t
304xbitmap_hweight(
305 struct xbitmap *bitmap)
306{
307 struct xbitmap_range *bmr;
308 struct xbitmap_range *n;
309 uint64_t ret = 0;
310
311 for_each_xbitmap_extent(bmr, n, bitmap)
312 ret += bmr->len;
313
314 return ret;
315}