Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_btree.h"
14#include "xfs_btree_staging.h"
15#include "xfs_refcount_btree.h"
16#include "xfs_alloc.h"
17#include "xfs_error.h"
18#include "xfs_trace.h"
19#include "xfs_trans.h"
20#include "xfs_bit.h"
21#include "xfs_rmap.h"
22#include "xfs_ag.h"
23
24static struct xfs_btree_cur *
25xfs_refcountbt_dup_cursor(
26 struct xfs_btree_cur *cur)
27{
28 return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
29 cur->bc_ag.agbp, cur->bc_ag.pag);
30}
31
32STATIC void
33xfs_refcountbt_set_root(
34 struct xfs_btree_cur *cur,
35 union xfs_btree_ptr *ptr,
36 int inc)
37{
38 struct xfs_buf *agbp = cur->bc_ag.agbp;
39 struct xfs_agf *agf = agbp->b_addr;
40 struct xfs_perag *pag = agbp->b_pag;
41
42 ASSERT(ptr->s != 0);
43
44 agf->agf_refcount_root = ptr->s;
45 be32_add_cpu(&agf->agf_refcount_level, inc);
46 pag->pagf_refcount_level += inc;
47
48 xfs_alloc_log_agf(cur->bc_tp, agbp,
49 XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
50}
51
52STATIC int
53xfs_refcountbt_alloc_block(
54 struct xfs_btree_cur *cur,
55 union xfs_btree_ptr *start,
56 union xfs_btree_ptr *new,
57 int *stat)
58{
59 struct xfs_buf *agbp = cur->bc_ag.agbp;
60 struct xfs_agf *agf = agbp->b_addr;
61 struct xfs_alloc_arg args; /* block allocation args */
62 int error; /* error return value */
63
64 memset(&args, 0, sizeof(args));
65 args.tp = cur->bc_tp;
66 args.mp = cur->bc_mp;
67 args.type = XFS_ALLOCTYPE_NEAR_BNO;
68 args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
69 xfs_refc_block(args.mp));
70 args.oinfo = XFS_RMAP_OINFO_REFC;
71 args.minlen = args.maxlen = args.prod = 1;
72 args.resv = XFS_AG_RESV_METADATA;
73
74 error = xfs_alloc_vextent(&args);
75 if (error)
76 goto out_error;
77 trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
78 args.agbno, 1);
79 if (args.fsbno == NULLFSBLOCK) {
80 *stat = 0;
81 return 0;
82 }
83 ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
84 ASSERT(args.len == 1);
85
86 new->s = cpu_to_be32(args.agbno);
87 be32_add_cpu(&agf->agf_refcount_blocks, 1);
88 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
89
90 *stat = 1;
91 return 0;
92
93out_error:
94 return error;
95}
96
97STATIC int
98xfs_refcountbt_free_block(
99 struct xfs_btree_cur *cur,
100 struct xfs_buf *bp)
101{
102 struct xfs_mount *mp = cur->bc_mp;
103 struct xfs_buf *agbp = cur->bc_ag.agbp;
104 struct xfs_agf *agf = agbp->b_addr;
105 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
106 int error;
107
108 trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
109 XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
110 be32_add_cpu(&agf->agf_refcount_blocks, -1);
111 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
112 error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
113 XFS_AG_RESV_METADATA);
114 if (error)
115 return error;
116
117 return error;
118}
119
120STATIC int
121xfs_refcountbt_get_minrecs(
122 struct xfs_btree_cur *cur,
123 int level)
124{
125 return cur->bc_mp->m_refc_mnr[level != 0];
126}
127
128STATIC int
129xfs_refcountbt_get_maxrecs(
130 struct xfs_btree_cur *cur,
131 int level)
132{
133 return cur->bc_mp->m_refc_mxr[level != 0];
134}
135
136STATIC void
137xfs_refcountbt_init_key_from_rec(
138 union xfs_btree_key *key,
139 union xfs_btree_rec *rec)
140{
141 key->refc.rc_startblock = rec->refc.rc_startblock;
142}
143
144STATIC void
145xfs_refcountbt_init_high_key_from_rec(
146 union xfs_btree_key *key,
147 union xfs_btree_rec *rec)
148{
149 __u32 x;
150
151 x = be32_to_cpu(rec->refc.rc_startblock);
152 x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
153 key->refc.rc_startblock = cpu_to_be32(x);
154}
155
156STATIC void
157xfs_refcountbt_init_rec_from_cur(
158 struct xfs_btree_cur *cur,
159 union xfs_btree_rec *rec)
160{
161 rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
162 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
163 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
164}
165
166STATIC void
167xfs_refcountbt_init_ptr_from_cur(
168 struct xfs_btree_cur *cur,
169 union xfs_btree_ptr *ptr)
170{
171 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
172
173 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
174
175 ptr->s = agf->agf_refcount_root;
176}
177
178STATIC int64_t
179xfs_refcountbt_key_diff(
180 struct xfs_btree_cur *cur,
181 union xfs_btree_key *key)
182{
183 struct xfs_refcount_irec *rec = &cur->bc_rec.rc;
184 struct xfs_refcount_key *kp = &key->refc;
185
186 return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
187}
188
189STATIC int64_t
190xfs_refcountbt_diff_two_keys(
191 struct xfs_btree_cur *cur,
192 union xfs_btree_key *k1,
193 union xfs_btree_key *k2)
194{
195 return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
196 be32_to_cpu(k2->refc.rc_startblock);
197}
198
199STATIC xfs_failaddr_t
200xfs_refcountbt_verify(
201 struct xfs_buf *bp)
202{
203 struct xfs_mount *mp = bp->b_mount;
204 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
205 struct xfs_perag *pag = bp->b_pag;
206 xfs_failaddr_t fa;
207 unsigned int level;
208
209 if (!xfs_verify_magic(bp, block->bb_magic))
210 return __this_address;
211
212 if (!xfs_sb_version_hasreflink(&mp->m_sb))
213 return __this_address;
214 fa = xfs_btree_sblock_v5hdr_verify(bp);
215 if (fa)
216 return fa;
217
218 level = be16_to_cpu(block->bb_level);
219 if (pag && pag->pagf_init) {
220 if (level >= pag->pagf_refcount_level)
221 return __this_address;
222 } else if (level >= mp->m_refc_maxlevels)
223 return __this_address;
224
225 return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
226}
227
228STATIC void
229xfs_refcountbt_read_verify(
230 struct xfs_buf *bp)
231{
232 xfs_failaddr_t fa;
233
234 if (!xfs_btree_sblock_verify_crc(bp))
235 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
236 else {
237 fa = xfs_refcountbt_verify(bp);
238 if (fa)
239 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
240 }
241
242 if (bp->b_error)
243 trace_xfs_btree_corrupt(bp, _RET_IP_);
244}
245
246STATIC void
247xfs_refcountbt_write_verify(
248 struct xfs_buf *bp)
249{
250 xfs_failaddr_t fa;
251
252 fa = xfs_refcountbt_verify(bp);
253 if (fa) {
254 trace_xfs_btree_corrupt(bp, _RET_IP_);
255 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
256 return;
257 }
258 xfs_btree_sblock_calc_crc(bp);
259
260}
261
262const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
263 .name = "xfs_refcountbt",
264 .magic = { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
265 .verify_read = xfs_refcountbt_read_verify,
266 .verify_write = xfs_refcountbt_write_verify,
267 .verify_struct = xfs_refcountbt_verify,
268};
269
270STATIC int
271xfs_refcountbt_keys_inorder(
272 struct xfs_btree_cur *cur,
273 union xfs_btree_key *k1,
274 union xfs_btree_key *k2)
275{
276 return be32_to_cpu(k1->refc.rc_startblock) <
277 be32_to_cpu(k2->refc.rc_startblock);
278}
279
280STATIC int
281xfs_refcountbt_recs_inorder(
282 struct xfs_btree_cur *cur,
283 union xfs_btree_rec *r1,
284 union xfs_btree_rec *r2)
285{
286 return be32_to_cpu(r1->refc.rc_startblock) +
287 be32_to_cpu(r1->refc.rc_blockcount) <=
288 be32_to_cpu(r2->refc.rc_startblock);
289}
290
291static const struct xfs_btree_ops xfs_refcountbt_ops = {
292 .rec_len = sizeof(struct xfs_refcount_rec),
293 .key_len = sizeof(struct xfs_refcount_key),
294
295 .dup_cursor = xfs_refcountbt_dup_cursor,
296 .set_root = xfs_refcountbt_set_root,
297 .alloc_block = xfs_refcountbt_alloc_block,
298 .free_block = xfs_refcountbt_free_block,
299 .get_minrecs = xfs_refcountbt_get_minrecs,
300 .get_maxrecs = xfs_refcountbt_get_maxrecs,
301 .init_key_from_rec = xfs_refcountbt_init_key_from_rec,
302 .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
303 .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
304 .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
305 .key_diff = xfs_refcountbt_key_diff,
306 .buf_ops = &xfs_refcountbt_buf_ops,
307 .diff_two_keys = xfs_refcountbt_diff_two_keys,
308 .keys_inorder = xfs_refcountbt_keys_inorder,
309 .recs_inorder = xfs_refcountbt_recs_inorder,
310};
311
312/*
313 * Initialize a new refcount btree cursor.
314 */
315static struct xfs_btree_cur *
316xfs_refcountbt_init_common(
317 struct xfs_mount *mp,
318 struct xfs_trans *tp,
319 struct xfs_perag *pag)
320{
321 struct xfs_btree_cur *cur;
322
323 ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
324
325 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
326 cur->bc_tp = tp;
327 cur->bc_mp = mp;
328 cur->bc_btnum = XFS_BTNUM_REFC;
329 cur->bc_blocklog = mp->m_sb.sb_blocklog;
330 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
331
332 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
333
334 /* take a reference for the cursor */
335 atomic_inc(&pag->pag_ref);
336 cur->bc_ag.pag = pag;
337
338 cur->bc_ag.refc.nr_ops = 0;
339 cur->bc_ag.refc.shape_changes = 0;
340 cur->bc_ops = &xfs_refcountbt_ops;
341 return cur;
342}
343
344/* Create a btree cursor. */
345struct xfs_btree_cur *
346xfs_refcountbt_init_cursor(
347 struct xfs_mount *mp,
348 struct xfs_trans *tp,
349 struct xfs_buf *agbp,
350 struct xfs_perag *pag)
351{
352 struct xfs_agf *agf = agbp->b_addr;
353 struct xfs_btree_cur *cur;
354
355 cur = xfs_refcountbt_init_common(mp, tp, pag);
356 cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
357 cur->bc_ag.agbp = agbp;
358 return cur;
359}
360
361/* Create a btree cursor with a fake root for staging. */
362struct xfs_btree_cur *
363xfs_refcountbt_stage_cursor(
364 struct xfs_mount *mp,
365 struct xbtree_afakeroot *afake,
366 struct xfs_perag *pag)
367{
368 struct xfs_btree_cur *cur;
369
370 cur = xfs_refcountbt_init_common(mp, NULL, pag);
371 xfs_btree_stage_afakeroot(cur, afake);
372 return cur;
373}
374
375/*
376 * Swap in the new btree root. Once we pass this point the newly rebuilt btree
377 * is in place and we have to kill off all the old btree blocks.
378 */
379void
380xfs_refcountbt_commit_staged_btree(
381 struct xfs_btree_cur *cur,
382 struct xfs_trans *tp,
383 struct xfs_buf *agbp)
384{
385 struct xfs_agf *agf = agbp->b_addr;
386 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
387
388 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
389
390 agf->agf_refcount_root = cpu_to_be32(afake->af_root);
391 agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
392 agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
393 xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
394 XFS_AGF_REFCOUNT_ROOT |
395 XFS_AGF_REFCOUNT_LEVEL);
396 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
397}
398
399/*
400 * Calculate the number of records in a refcount btree block.
401 */
402int
403xfs_refcountbt_maxrecs(
404 int blocklen,
405 bool leaf)
406{
407 blocklen -= XFS_REFCOUNT_BLOCK_LEN;
408
409 if (leaf)
410 return blocklen / sizeof(struct xfs_refcount_rec);
411 return blocklen / (sizeof(struct xfs_refcount_key) +
412 sizeof(xfs_refcount_ptr_t));
413}
414
415/* Compute the maximum height of a refcount btree. */
416void
417xfs_refcountbt_compute_maxlevels(
418 struct xfs_mount *mp)
419{
420 mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
421 mp->m_refc_mnr, mp->m_sb.sb_agblocks);
422}
423
424/* Calculate the refcount btree size for some records. */
425xfs_extlen_t
426xfs_refcountbt_calc_size(
427 struct xfs_mount *mp,
428 unsigned long long len)
429{
430 return xfs_btree_calc_size(mp->m_refc_mnr, len);
431}
432
433/*
434 * Calculate the maximum refcount btree size.
435 */
436xfs_extlen_t
437xfs_refcountbt_max_size(
438 struct xfs_mount *mp,
439 xfs_agblock_t agblocks)
440{
441 /* Bail out if we're uninitialized, which can happen in mkfs. */
442 if (mp->m_refc_mxr[0] == 0)
443 return 0;
444
445 return xfs_refcountbt_calc_size(mp, agblocks);
446}
447
448/*
449 * Figure out how many blocks to reserve and how many are used by this btree.
450 */
451int
452xfs_refcountbt_calc_reserves(
453 struct xfs_mount *mp,
454 struct xfs_trans *tp,
455 struct xfs_perag *pag,
456 xfs_extlen_t *ask,
457 xfs_extlen_t *used)
458{
459 struct xfs_buf *agbp;
460 struct xfs_agf *agf;
461 xfs_agblock_t agblocks;
462 xfs_extlen_t tree_len;
463 int error;
464
465 if (!xfs_sb_version_hasreflink(&mp->m_sb))
466 return 0;
467
468 error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
469 if (error)
470 return error;
471
472 agf = agbp->b_addr;
473 agblocks = be32_to_cpu(agf->agf_length);
474 tree_len = be32_to_cpu(agf->agf_refcount_blocks);
475 xfs_trans_brelse(tp, agbp);
476
477 /*
478 * The log is permanently allocated, so the space it occupies will
479 * never be available for the kinds of things that would require btree
480 * expansion. We therefore can pretend the space isn't there.
481 */
482 if (mp->m_sb.sb_logstart &&
483 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
484 agblocks -= mp->m_sb.sb_logblocks;
485
486 *ask += xfs_refcountbt_max_size(mp, agblocks);
487 *used += tree_len;
488
489 return error;
490}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_btree.h"
14#include "xfs_btree_staging.h"
15#include "xfs_refcount_btree.h"
16#include "xfs_refcount.h"
17#include "xfs_alloc.h"
18#include "xfs_error.h"
19#include "xfs_trace.h"
20#include "xfs_trans.h"
21#include "xfs_bit.h"
22#include "xfs_rmap.h"
23#include "xfs_ag.h"
24
25static struct kmem_cache *xfs_refcountbt_cur_cache;
26
27static struct xfs_btree_cur *
28xfs_refcountbt_dup_cursor(
29 struct xfs_btree_cur *cur)
30{
31 return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
32 cur->bc_ag.agbp, cur->bc_ag.pag);
33}
34
35STATIC void
36xfs_refcountbt_set_root(
37 struct xfs_btree_cur *cur,
38 const union xfs_btree_ptr *ptr,
39 int inc)
40{
41 struct xfs_buf *agbp = cur->bc_ag.agbp;
42 struct xfs_agf *agf = agbp->b_addr;
43 struct xfs_perag *pag = agbp->b_pag;
44
45 ASSERT(ptr->s != 0);
46
47 agf->agf_refcount_root = ptr->s;
48 be32_add_cpu(&agf->agf_refcount_level, inc);
49 pag->pagf_refcount_level += inc;
50
51 xfs_alloc_log_agf(cur->bc_tp, agbp,
52 XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
53}
54
55STATIC int
56xfs_refcountbt_alloc_block(
57 struct xfs_btree_cur *cur,
58 const union xfs_btree_ptr *start,
59 union xfs_btree_ptr *new,
60 int *stat)
61{
62 struct xfs_buf *agbp = cur->bc_ag.agbp;
63 struct xfs_agf *agf = agbp->b_addr;
64 struct xfs_alloc_arg args; /* block allocation args */
65 int error; /* error return value */
66
67 memset(&args, 0, sizeof(args));
68 args.tp = cur->bc_tp;
69 args.mp = cur->bc_mp;
70 args.pag = cur->bc_ag.pag;
71 args.oinfo = XFS_RMAP_OINFO_REFC;
72 args.minlen = args.maxlen = args.prod = 1;
73 args.resv = XFS_AG_RESV_METADATA;
74
75 error = xfs_alloc_vextent_near_bno(&args,
76 XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno,
77 xfs_refc_block(args.mp)));
78 if (error)
79 goto out_error;
80 trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
81 args.agbno, 1);
82 if (args.fsbno == NULLFSBLOCK) {
83 *stat = 0;
84 return 0;
85 }
86 ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
87 ASSERT(args.len == 1);
88
89 new->s = cpu_to_be32(args.agbno);
90 be32_add_cpu(&agf->agf_refcount_blocks, 1);
91 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
92
93 *stat = 1;
94 return 0;
95
96out_error:
97 return error;
98}
99
100STATIC int
101xfs_refcountbt_free_block(
102 struct xfs_btree_cur *cur,
103 struct xfs_buf *bp)
104{
105 struct xfs_mount *mp = cur->bc_mp;
106 struct xfs_buf *agbp = cur->bc_ag.agbp;
107 struct xfs_agf *agf = agbp->b_addr;
108 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
109
110 trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
111 XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
112 be32_add_cpu(&agf->agf_refcount_blocks, -1);
113 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
114 return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
115 &XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA, false);
116}
117
118STATIC int
119xfs_refcountbt_get_minrecs(
120 struct xfs_btree_cur *cur,
121 int level)
122{
123 return cur->bc_mp->m_refc_mnr[level != 0];
124}
125
126STATIC int
127xfs_refcountbt_get_maxrecs(
128 struct xfs_btree_cur *cur,
129 int level)
130{
131 return cur->bc_mp->m_refc_mxr[level != 0];
132}
133
134STATIC void
135xfs_refcountbt_init_key_from_rec(
136 union xfs_btree_key *key,
137 const union xfs_btree_rec *rec)
138{
139 key->refc.rc_startblock = rec->refc.rc_startblock;
140}
141
142STATIC void
143xfs_refcountbt_init_high_key_from_rec(
144 union xfs_btree_key *key,
145 const union xfs_btree_rec *rec)
146{
147 __u32 x;
148
149 x = be32_to_cpu(rec->refc.rc_startblock);
150 x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
151 key->refc.rc_startblock = cpu_to_be32(x);
152}
153
154STATIC void
155xfs_refcountbt_init_rec_from_cur(
156 struct xfs_btree_cur *cur,
157 union xfs_btree_rec *rec)
158{
159 const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
160 uint32_t start;
161
162 start = xfs_refcount_encode_startblock(irec->rc_startblock,
163 irec->rc_domain);
164 rec->refc.rc_startblock = cpu_to_be32(start);
165 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
166 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
167}
168
169STATIC void
170xfs_refcountbt_init_ptr_from_cur(
171 struct xfs_btree_cur *cur,
172 union xfs_btree_ptr *ptr)
173{
174 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
175
176 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
177
178 ptr->s = agf->agf_refcount_root;
179}
180
181STATIC int64_t
182xfs_refcountbt_key_diff(
183 struct xfs_btree_cur *cur,
184 const union xfs_btree_key *key)
185{
186 const struct xfs_refcount_key *kp = &key->refc;
187 const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
188 uint32_t start;
189
190 start = xfs_refcount_encode_startblock(irec->rc_startblock,
191 irec->rc_domain);
192 return (int64_t)be32_to_cpu(kp->rc_startblock) - start;
193}
194
195STATIC int64_t
196xfs_refcountbt_diff_two_keys(
197 struct xfs_btree_cur *cur,
198 const union xfs_btree_key *k1,
199 const union xfs_btree_key *k2,
200 const union xfs_btree_key *mask)
201{
202 ASSERT(!mask || mask->refc.rc_startblock);
203
204 return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
205 be32_to_cpu(k2->refc.rc_startblock);
206}
207
208STATIC xfs_failaddr_t
209xfs_refcountbt_verify(
210 struct xfs_buf *bp)
211{
212 struct xfs_mount *mp = bp->b_mount;
213 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
214 struct xfs_perag *pag = bp->b_pag;
215 xfs_failaddr_t fa;
216 unsigned int level;
217
218 if (!xfs_verify_magic(bp, block->bb_magic))
219 return __this_address;
220
221 if (!xfs_has_reflink(mp))
222 return __this_address;
223 fa = xfs_btree_sblock_v5hdr_verify(bp);
224 if (fa)
225 return fa;
226
227 level = be16_to_cpu(block->bb_level);
228 if (pag && xfs_perag_initialised_agf(pag)) {
229 unsigned int maxlevel = pag->pagf_refcount_level;
230
231#ifdef CONFIG_XFS_ONLINE_REPAIR
232 /*
233 * Online repair could be rewriting the refcount btree, so
234 * we'll validate against the larger of either tree while this
235 * is going on.
236 */
237 maxlevel = max_t(unsigned int, maxlevel,
238 pag->pagf_repair_refcount_level);
239#endif
240 if (level >= maxlevel)
241 return __this_address;
242 } else if (level >= mp->m_refc_maxlevels)
243 return __this_address;
244
245 return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
246}
247
248STATIC void
249xfs_refcountbt_read_verify(
250 struct xfs_buf *bp)
251{
252 xfs_failaddr_t fa;
253
254 if (!xfs_btree_sblock_verify_crc(bp))
255 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
256 else {
257 fa = xfs_refcountbt_verify(bp);
258 if (fa)
259 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
260 }
261
262 if (bp->b_error)
263 trace_xfs_btree_corrupt(bp, _RET_IP_);
264}
265
266STATIC void
267xfs_refcountbt_write_verify(
268 struct xfs_buf *bp)
269{
270 xfs_failaddr_t fa;
271
272 fa = xfs_refcountbt_verify(bp);
273 if (fa) {
274 trace_xfs_btree_corrupt(bp, _RET_IP_);
275 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
276 return;
277 }
278 xfs_btree_sblock_calc_crc(bp);
279
280}
281
282const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
283 .name = "xfs_refcountbt",
284 .magic = { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
285 .verify_read = xfs_refcountbt_read_verify,
286 .verify_write = xfs_refcountbt_write_verify,
287 .verify_struct = xfs_refcountbt_verify,
288};
289
290STATIC int
291xfs_refcountbt_keys_inorder(
292 struct xfs_btree_cur *cur,
293 const union xfs_btree_key *k1,
294 const union xfs_btree_key *k2)
295{
296 return be32_to_cpu(k1->refc.rc_startblock) <
297 be32_to_cpu(k2->refc.rc_startblock);
298}
299
300STATIC int
301xfs_refcountbt_recs_inorder(
302 struct xfs_btree_cur *cur,
303 const union xfs_btree_rec *r1,
304 const union xfs_btree_rec *r2)
305{
306 return be32_to_cpu(r1->refc.rc_startblock) +
307 be32_to_cpu(r1->refc.rc_blockcount) <=
308 be32_to_cpu(r2->refc.rc_startblock);
309}
310
311STATIC enum xbtree_key_contig
312xfs_refcountbt_keys_contiguous(
313 struct xfs_btree_cur *cur,
314 const union xfs_btree_key *key1,
315 const union xfs_btree_key *key2,
316 const union xfs_btree_key *mask)
317{
318 ASSERT(!mask || mask->refc.rc_startblock);
319
320 return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock),
321 be32_to_cpu(key2->refc.rc_startblock));
322}
323
324static const struct xfs_btree_ops xfs_refcountbt_ops = {
325 .rec_len = sizeof(struct xfs_refcount_rec),
326 .key_len = sizeof(struct xfs_refcount_key),
327
328 .dup_cursor = xfs_refcountbt_dup_cursor,
329 .set_root = xfs_refcountbt_set_root,
330 .alloc_block = xfs_refcountbt_alloc_block,
331 .free_block = xfs_refcountbt_free_block,
332 .get_minrecs = xfs_refcountbt_get_minrecs,
333 .get_maxrecs = xfs_refcountbt_get_maxrecs,
334 .init_key_from_rec = xfs_refcountbt_init_key_from_rec,
335 .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
336 .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
337 .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
338 .key_diff = xfs_refcountbt_key_diff,
339 .buf_ops = &xfs_refcountbt_buf_ops,
340 .diff_two_keys = xfs_refcountbt_diff_two_keys,
341 .keys_inorder = xfs_refcountbt_keys_inorder,
342 .recs_inorder = xfs_refcountbt_recs_inorder,
343 .keys_contiguous = xfs_refcountbt_keys_contiguous,
344};
345
346/*
347 * Initialize a new refcount btree cursor.
348 */
349static struct xfs_btree_cur *
350xfs_refcountbt_init_common(
351 struct xfs_mount *mp,
352 struct xfs_trans *tp,
353 struct xfs_perag *pag)
354{
355 struct xfs_btree_cur *cur;
356
357 ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
358
359 cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
360 mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
361 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
362
363 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
364
365 cur->bc_ag.pag = xfs_perag_hold(pag);
366 cur->bc_ag.refc.nr_ops = 0;
367 cur->bc_ag.refc.shape_changes = 0;
368 cur->bc_ops = &xfs_refcountbt_ops;
369 return cur;
370}
371
372/* Create a btree cursor. */
373struct xfs_btree_cur *
374xfs_refcountbt_init_cursor(
375 struct xfs_mount *mp,
376 struct xfs_trans *tp,
377 struct xfs_buf *agbp,
378 struct xfs_perag *pag)
379{
380 struct xfs_agf *agf = agbp->b_addr;
381 struct xfs_btree_cur *cur;
382
383 cur = xfs_refcountbt_init_common(mp, tp, pag);
384 cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
385 cur->bc_ag.agbp = agbp;
386 return cur;
387}
388
389/* Create a btree cursor with a fake root for staging. */
390struct xfs_btree_cur *
391xfs_refcountbt_stage_cursor(
392 struct xfs_mount *mp,
393 struct xbtree_afakeroot *afake,
394 struct xfs_perag *pag)
395{
396 struct xfs_btree_cur *cur;
397
398 cur = xfs_refcountbt_init_common(mp, NULL, pag);
399 xfs_btree_stage_afakeroot(cur, afake);
400 return cur;
401}
402
403/*
404 * Swap in the new btree root. Once we pass this point the newly rebuilt btree
405 * is in place and we have to kill off all the old btree blocks.
406 */
407void
408xfs_refcountbt_commit_staged_btree(
409 struct xfs_btree_cur *cur,
410 struct xfs_trans *tp,
411 struct xfs_buf *agbp)
412{
413 struct xfs_agf *agf = agbp->b_addr;
414 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
415
416 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
417
418 agf->agf_refcount_root = cpu_to_be32(afake->af_root);
419 agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
420 agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
421 xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
422 XFS_AGF_REFCOUNT_ROOT |
423 XFS_AGF_REFCOUNT_LEVEL);
424 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
425}
426
427/* Calculate number of records in a refcount btree block. */
428static inline unsigned int
429xfs_refcountbt_block_maxrecs(
430 unsigned int blocklen,
431 bool leaf)
432{
433 if (leaf)
434 return blocklen / sizeof(struct xfs_refcount_rec);
435 return blocklen / (sizeof(struct xfs_refcount_key) +
436 sizeof(xfs_refcount_ptr_t));
437}
438
439/*
440 * Calculate the number of records in a refcount btree block.
441 */
442int
443xfs_refcountbt_maxrecs(
444 int blocklen,
445 bool leaf)
446{
447 blocklen -= XFS_REFCOUNT_BLOCK_LEN;
448 return xfs_refcountbt_block_maxrecs(blocklen, leaf);
449}
450
451/* Compute the max possible height of the maximally sized refcount btree. */
452unsigned int
453xfs_refcountbt_maxlevels_ondisk(void)
454{
455 unsigned int minrecs[2];
456 unsigned int blocklen;
457
458 blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
459
460 minrecs[0] = xfs_refcountbt_block_maxrecs(blocklen, true) / 2;
461 minrecs[1] = xfs_refcountbt_block_maxrecs(blocklen, false) / 2;
462
463 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_CRC_AG_BLOCKS);
464}
465
466/* Compute the maximum height of a refcount btree. */
467void
468xfs_refcountbt_compute_maxlevels(
469 struct xfs_mount *mp)
470{
471 if (!xfs_has_reflink(mp)) {
472 mp->m_refc_maxlevels = 0;
473 return;
474 }
475
476 mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
477 mp->m_refc_mnr, mp->m_sb.sb_agblocks);
478 ASSERT(mp->m_refc_maxlevels <= xfs_refcountbt_maxlevels_ondisk());
479}
480
481/* Calculate the refcount btree size for some records. */
482xfs_extlen_t
483xfs_refcountbt_calc_size(
484 struct xfs_mount *mp,
485 unsigned long long len)
486{
487 return xfs_btree_calc_size(mp->m_refc_mnr, len);
488}
489
490/*
491 * Calculate the maximum refcount btree size.
492 */
493xfs_extlen_t
494xfs_refcountbt_max_size(
495 struct xfs_mount *mp,
496 xfs_agblock_t agblocks)
497{
498 /* Bail out if we're uninitialized, which can happen in mkfs. */
499 if (mp->m_refc_mxr[0] == 0)
500 return 0;
501
502 return xfs_refcountbt_calc_size(mp, agblocks);
503}
504
505/*
506 * Figure out how many blocks to reserve and how many are used by this btree.
507 */
508int
509xfs_refcountbt_calc_reserves(
510 struct xfs_mount *mp,
511 struct xfs_trans *tp,
512 struct xfs_perag *pag,
513 xfs_extlen_t *ask,
514 xfs_extlen_t *used)
515{
516 struct xfs_buf *agbp;
517 struct xfs_agf *agf;
518 xfs_agblock_t agblocks;
519 xfs_extlen_t tree_len;
520 int error;
521
522 if (!xfs_has_reflink(mp))
523 return 0;
524
525 error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
526 if (error)
527 return error;
528
529 agf = agbp->b_addr;
530 agblocks = be32_to_cpu(agf->agf_length);
531 tree_len = be32_to_cpu(agf->agf_refcount_blocks);
532 xfs_trans_brelse(tp, agbp);
533
534 /*
535 * The log is permanently allocated, so the space it occupies will
536 * never be available for the kinds of things that would require btree
537 * expansion. We therefore can pretend the space isn't there.
538 */
539 if (xfs_ag_contains_log(mp, pag->pag_agno))
540 agblocks -= mp->m_sb.sb_logblocks;
541
542 *ask += xfs_refcountbt_max_size(mp, agblocks);
543 *used += tree_len;
544
545 return error;
546}
547
548int __init
549xfs_refcountbt_init_cur_cache(void)
550{
551 xfs_refcountbt_cur_cache = kmem_cache_create("xfs_refcbt_cur",
552 xfs_btree_cur_sizeof(xfs_refcountbt_maxlevels_ondisk()),
553 0, 0, NULL);
554
555 if (!xfs_refcountbt_cur_cache)
556 return -ENOMEM;
557 return 0;
558}
559
560void
561xfs_refcountbt_destroy_cur_cache(void)
562{
563 kmem_cache_destroy(xfs_refcountbt_cur_cache);
564 xfs_refcountbt_cur_cache = NULL;
565}