Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 
 12#include "xfs_mount.h"
 13#include "xfs_btree.h"
 14#include "xfs_btree_staging.h"
 15#include "xfs_alloc_btree.h"
 16#include "xfs_alloc.h"
 17#include "xfs_extent_busy.h"
 18#include "xfs_error.h"
 19#include "xfs_trace.h"
 20#include "xfs_trans.h"
 21#include "xfs_ag.h"
 22
 23
 24STATIC struct xfs_btree_cur *
 25xfs_allocbt_dup_cursor(
 26	struct xfs_btree_cur	*cur)
 27{
 28	return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
 29			cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
 
 30}
 31
 32STATIC void
 33xfs_allocbt_set_root(
 34	struct xfs_btree_cur	*cur,
 35	union xfs_btree_ptr	*ptr,
 36	int			inc)
 37{
 38	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 39	struct xfs_agf		*agf = agbp->b_addr;
 
 40	int			btnum = cur->bc_btnum;
 
 41
 42	ASSERT(ptr->s != 0);
 43
 44	agf->agf_roots[btnum] = ptr->s;
 45	be32_add_cpu(&agf->agf_levels[btnum], inc);
 46	cur->bc_ag.pag->pagf_levels[btnum] += inc;
 
 47
 48	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
 49}
 50
 51STATIC int
 52xfs_allocbt_alloc_block(
 53	struct xfs_btree_cur	*cur,
 54	union xfs_btree_ptr	*start,
 55	union xfs_btree_ptr	*new,
 56	int			*stat)
 57{
 58	int			error;
 59	xfs_agblock_t		bno;
 60
 61	/* Allocate the new block from the freelist. If we can't, give up.  */
 62	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
 63				       &bno, 1);
 64	if (error)
 65		return error;
 66
 67	if (bno == NULLAGBLOCK) {
 68		*stat = 0;
 69		return 0;
 70	}
 71
 72	atomic64_inc(&cur->bc_mp->m_allocbt_blks);
 73	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agbp->b_pag, bno, 1, false);
 74
 
 75	new->s = cpu_to_be32(bno);
 76
 77	*stat = 1;
 78	return 0;
 79}
 80
 81STATIC int
 82xfs_allocbt_free_block(
 83	struct xfs_btree_cur	*cur,
 84	struct xfs_buf		*bp)
 85{
 86	struct xfs_buf		*agbp = cur->bc_ag.agbp;
 
 87	xfs_agblock_t		bno;
 88	int			error;
 89
 90	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
 91	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
 92	if (error)
 93		return error;
 94
 95	atomic64_dec(&cur->bc_mp->m_allocbt_blks);
 96	xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
 97			      XFS_EXTENT_BUSY_SKIP_DISCARD);
 
 98	return 0;
 99}
100
101/*
102 * Update the longest extent in the AGF
103 */
104STATIC void
105xfs_allocbt_update_lastrec(
106	struct xfs_btree_cur	*cur,
107	struct xfs_btree_block	*block,
108	union xfs_btree_rec	*rec,
109	int			ptr,
110	int			reason)
111{
112	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
 
113	struct xfs_perag	*pag;
114	__be32			len;
115	int			numrecs;
116
117	ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
118
119	switch (reason) {
120	case LASTREC_UPDATE:
121		/*
122		 * If this is the last leaf block and it's the last record,
123		 * then update the size of the longest extent in the AG.
124		 */
125		if (ptr != xfs_btree_get_numrecs(block))
126			return;
127		len = rec->alloc.ar_blockcount;
128		break;
129	case LASTREC_INSREC:
130		if (be32_to_cpu(rec->alloc.ar_blockcount) <=
131		    be32_to_cpu(agf->agf_longest))
132			return;
133		len = rec->alloc.ar_blockcount;
134		break;
135	case LASTREC_DELREC:
136		numrecs = xfs_btree_get_numrecs(block);
137		if (ptr <= numrecs)
138			return;
139		ASSERT(ptr == numrecs + 1);
140
141		if (numrecs) {
142			xfs_alloc_rec_t *rrp;
143
144			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
145			len = rrp->ar_blockcount;
146		} else {
147			len = 0;
148		}
149
150		break;
151	default:
152		ASSERT(0);
153		return;
154	}
155
156	agf->agf_longest = len;
157	pag = cur->bc_ag.agbp->b_pag;
158	pag->pagf_longest = be32_to_cpu(len);
159	xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
 
160}
161
162STATIC int
163xfs_allocbt_get_minrecs(
164	struct xfs_btree_cur	*cur,
165	int			level)
166{
167	return cur->bc_mp->m_alloc_mnr[level != 0];
168}
169
170STATIC int
171xfs_allocbt_get_maxrecs(
172	struct xfs_btree_cur	*cur,
173	int			level)
174{
175	return cur->bc_mp->m_alloc_mxr[level != 0];
176}
177
178STATIC void
179xfs_allocbt_init_key_from_rec(
180	union xfs_btree_key	*key,
181	union xfs_btree_rec	*rec)
182{
183	key->alloc.ar_startblock = rec->alloc.ar_startblock;
184	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
185}
186
187STATIC void
188xfs_bnobt_init_high_key_from_rec(
189	union xfs_btree_key	*key,
190	union xfs_btree_rec	*rec)
191{
192	__u32			x;
193
194	x = be32_to_cpu(rec->alloc.ar_startblock);
195	x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
196	key->alloc.ar_startblock = cpu_to_be32(x);
197	key->alloc.ar_blockcount = 0;
198}
199
200STATIC void
201xfs_cntbt_init_high_key_from_rec(
202	union xfs_btree_key	*key,
203	union xfs_btree_rec	*rec)
204{
205	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
206	key->alloc.ar_startblock = 0;
207}
208
209STATIC void
210xfs_allocbt_init_rec_from_cur(
211	struct xfs_btree_cur	*cur,
212	union xfs_btree_rec	*rec)
213{
214	rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
215	rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
216}
217
218STATIC void
219xfs_allocbt_init_ptr_from_cur(
220	struct xfs_btree_cur	*cur,
221	union xfs_btree_ptr	*ptr)
222{
223	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
224
225	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
226
227	ptr->s = agf->agf_roots[cur->bc_btnum];
228}
229
230STATIC int64_t
231xfs_bnobt_key_diff(
232	struct xfs_btree_cur	*cur,
233	union xfs_btree_key	*key)
234{
235	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
236	xfs_alloc_key_t		*kp = &key->alloc;
237
238	return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
239}
240
241STATIC int64_t
242xfs_cntbt_key_diff(
243	struct xfs_btree_cur	*cur,
244	union xfs_btree_key	*key)
245{
246	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
247	xfs_alloc_key_t		*kp = &key->alloc;
248	int64_t			diff;
249
250	diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
251	if (diff)
252		return diff;
253
254	return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
255}
256
257STATIC int64_t
258xfs_bnobt_diff_two_keys(
259	struct xfs_btree_cur	*cur,
260	union xfs_btree_key	*k1,
261	union xfs_btree_key	*k2)
262{
263	return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
264			  be32_to_cpu(k2->alloc.ar_startblock);
265}
266
267STATIC int64_t
268xfs_cntbt_diff_two_keys(
269	struct xfs_btree_cur	*cur,
270	union xfs_btree_key	*k1,
271	union xfs_btree_key	*k2)
272{
273	int64_t			diff;
274
275	diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
276		be32_to_cpu(k2->alloc.ar_blockcount);
277	if (diff)
278		return diff;
279
280	return  be32_to_cpu(k1->alloc.ar_startblock) -
281		be32_to_cpu(k2->alloc.ar_startblock);
282}
283
284static xfs_failaddr_t
285xfs_allocbt_verify(
286	struct xfs_buf		*bp)
287{
288	struct xfs_mount	*mp = bp->b_mount;
289	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
290	struct xfs_perag	*pag = bp->b_pag;
291	xfs_failaddr_t		fa;
292	unsigned int		level;
293	xfs_btnum_t		btnum = XFS_BTNUM_BNOi;
294
295	if (!xfs_verify_magic(bp, block->bb_magic))
296		return __this_address;
297
298	if (xfs_sb_version_hascrc(&mp->m_sb)) {
299		fa = xfs_btree_sblock_v5hdr_verify(bp);
300		if (fa)
301			return fa;
302	}
303
304	/*
305	 * The perag may not be attached during grow operations or fully
306	 * initialized from the AGF during log recovery. Therefore we can only
307	 * check against maximum tree depth from those contexts.
308	 *
309	 * Otherwise check against the per-tree limit. Peek at one of the
310	 * verifier magic values to determine the type of tree we're verifying
311	 * against.
312	 */
313	level = be16_to_cpu(block->bb_level);
314	if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
315		btnum = XFS_BTNUM_CNTi;
316	if (pag && pag->pagf_init) {
317		if (level >= pag->pagf_levels[btnum])
318			return __this_address;
319	} else if (level >= mp->m_ag_maxlevels)
320		return __this_address;
321
322	return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
323}
324
325static void
326xfs_allocbt_read_verify(
327	struct xfs_buf	*bp)
328{
329	xfs_failaddr_t	fa;
330
331	if (!xfs_btree_sblock_verify_crc(bp))
332		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
333	else {
334		fa = xfs_allocbt_verify(bp);
335		if (fa)
336			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
337	}
338
339	if (bp->b_error)
340		trace_xfs_btree_corrupt(bp, _RET_IP_);
341}
342
343static void
344xfs_allocbt_write_verify(
345	struct xfs_buf	*bp)
346{
347	xfs_failaddr_t	fa;
348
349	fa = xfs_allocbt_verify(bp);
350	if (fa) {
351		trace_xfs_btree_corrupt(bp, _RET_IP_);
352		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
353		return;
354	}
355	xfs_btree_sblock_calc_crc(bp);
356
357}
358
359const struct xfs_buf_ops xfs_bnobt_buf_ops = {
360	.name = "xfs_bnobt",
361	.magic = { cpu_to_be32(XFS_ABTB_MAGIC),
362		   cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
363	.verify_read = xfs_allocbt_read_verify,
364	.verify_write = xfs_allocbt_write_verify,
365	.verify_struct = xfs_allocbt_verify,
366};
367
368const struct xfs_buf_ops xfs_cntbt_buf_ops = {
369	.name = "xfs_cntbt",
370	.magic = { cpu_to_be32(XFS_ABTC_MAGIC),
371		   cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
372	.verify_read = xfs_allocbt_read_verify,
373	.verify_write = xfs_allocbt_write_verify,
374	.verify_struct = xfs_allocbt_verify,
375};
376
377STATIC int
378xfs_bnobt_keys_inorder(
379	struct xfs_btree_cur	*cur,
380	union xfs_btree_key	*k1,
381	union xfs_btree_key	*k2)
382{
383	return be32_to_cpu(k1->alloc.ar_startblock) <
384	       be32_to_cpu(k2->alloc.ar_startblock);
385}
386
387STATIC int
388xfs_bnobt_recs_inorder(
389	struct xfs_btree_cur	*cur,
390	union xfs_btree_rec	*r1,
391	union xfs_btree_rec	*r2)
392{
393	return be32_to_cpu(r1->alloc.ar_startblock) +
394		be32_to_cpu(r1->alloc.ar_blockcount) <=
395		be32_to_cpu(r2->alloc.ar_startblock);
396}
397
398STATIC int
399xfs_cntbt_keys_inorder(
400	struct xfs_btree_cur	*cur,
401	union xfs_btree_key	*k1,
402	union xfs_btree_key	*k2)
403{
404	return be32_to_cpu(k1->alloc.ar_blockcount) <
405		be32_to_cpu(k2->alloc.ar_blockcount) ||
406		(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
407		 be32_to_cpu(k1->alloc.ar_startblock) <
408		 be32_to_cpu(k2->alloc.ar_startblock));
409}
410
411STATIC int
412xfs_cntbt_recs_inorder(
413	struct xfs_btree_cur	*cur,
414	union xfs_btree_rec	*r1,
415	union xfs_btree_rec	*r2)
416{
417	return be32_to_cpu(r1->alloc.ar_blockcount) <
418		be32_to_cpu(r2->alloc.ar_blockcount) ||
419		(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
420		 be32_to_cpu(r1->alloc.ar_startblock) <
421		 be32_to_cpu(r2->alloc.ar_startblock));
422}
423
424static const struct xfs_btree_ops xfs_bnobt_ops = {
425	.rec_len		= sizeof(xfs_alloc_rec_t),
426	.key_len		= sizeof(xfs_alloc_key_t),
427
428	.dup_cursor		= xfs_allocbt_dup_cursor,
429	.set_root		= xfs_allocbt_set_root,
430	.alloc_block		= xfs_allocbt_alloc_block,
431	.free_block		= xfs_allocbt_free_block,
432	.update_lastrec		= xfs_allocbt_update_lastrec,
433	.get_minrecs		= xfs_allocbt_get_minrecs,
434	.get_maxrecs		= xfs_allocbt_get_maxrecs,
435	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
436	.init_high_key_from_rec	= xfs_bnobt_init_high_key_from_rec,
437	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
438	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
439	.key_diff		= xfs_bnobt_key_diff,
440	.buf_ops		= &xfs_bnobt_buf_ops,
441	.diff_two_keys		= xfs_bnobt_diff_two_keys,
442	.keys_inorder		= xfs_bnobt_keys_inorder,
443	.recs_inorder		= xfs_bnobt_recs_inorder,
444};
445
446static const struct xfs_btree_ops xfs_cntbt_ops = {
447	.rec_len		= sizeof(xfs_alloc_rec_t),
448	.key_len		= sizeof(xfs_alloc_key_t),
449
450	.dup_cursor		= xfs_allocbt_dup_cursor,
451	.set_root		= xfs_allocbt_set_root,
452	.alloc_block		= xfs_allocbt_alloc_block,
453	.free_block		= xfs_allocbt_free_block,
454	.update_lastrec		= xfs_allocbt_update_lastrec,
455	.get_minrecs		= xfs_allocbt_get_minrecs,
456	.get_maxrecs		= xfs_allocbt_get_maxrecs,
457	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
458	.init_high_key_from_rec	= xfs_cntbt_init_high_key_from_rec,
459	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
460	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
461	.key_diff		= xfs_cntbt_key_diff,
462	.buf_ops		= &xfs_cntbt_buf_ops,
463	.diff_two_keys		= xfs_cntbt_diff_two_keys,
464	.keys_inorder		= xfs_cntbt_keys_inorder,
465	.recs_inorder		= xfs_cntbt_recs_inorder,
466};
467
468/* Allocate most of a new allocation btree cursor. */
469STATIC struct xfs_btree_cur *
470xfs_allocbt_init_common(
471	struct xfs_mount	*mp,
472	struct xfs_trans	*tp,
473	struct xfs_perag	*pag,
474	xfs_btnum_t		btnum)
 
 
 
475{
 
476	struct xfs_btree_cur	*cur;
477
478	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
479
480	cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
481
482	cur->bc_tp = tp;
483	cur->bc_mp = mp;
484	cur->bc_btnum = btnum;
485	cur->bc_blocklog = mp->m_sb.sb_blocklog;
486	cur->bc_ag.abt.active = false;
487
488	if (btnum == XFS_BTNUM_CNT) {
489		cur->bc_ops = &xfs_cntbt_ops;
490		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
 
 
491		cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
492	} else {
493		cur->bc_ops = &xfs_bnobt_ops;
494		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
 
 
495	}
496
497	/* take a reference for the cursor */
498	atomic_inc(&pag->pag_ref);
499	cur->bc_ag.pag = pag;
500
501	if (xfs_sb_version_hascrc(&mp->m_sb))
502		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
503
504	return cur;
505}
506
507/*
508 * Allocate a new allocation btree cursor.
509 */
510struct xfs_btree_cur *			/* new alloc btree cursor */
511xfs_allocbt_init_cursor(
512	struct xfs_mount	*mp,		/* file system mount point */
513	struct xfs_trans	*tp,		/* transaction pointer */
514	struct xfs_buf		*agbp,		/* buffer for agf structure */
515	struct xfs_perag	*pag,
516	xfs_btnum_t		btnum)		/* btree identifier */
517{
518	struct xfs_agf		*agf = agbp->b_addr;
519	struct xfs_btree_cur	*cur;
520
521	cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
522	if (btnum == XFS_BTNUM_CNT)
523		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
524	else
525		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
526
527	cur->bc_ag.agbp = agbp;
528
529	return cur;
530}
531
532/* Create a free space btree cursor with a fake root for staging. */
533struct xfs_btree_cur *
534xfs_allocbt_stage_cursor(
535	struct xfs_mount	*mp,
536	struct xbtree_afakeroot	*afake,
537	struct xfs_perag	*pag,
538	xfs_btnum_t		btnum)
539{
540	struct xfs_btree_cur	*cur;
541
542	cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
543	xfs_btree_stage_afakeroot(cur, afake);
544	return cur;
545}
546
547/*
548 * Install a new free space btree root.  Caller is responsible for invalidating
549 * and freeing the old btree blocks.
550 */
551void
552xfs_allocbt_commit_staged_btree(
553	struct xfs_btree_cur	*cur,
554	struct xfs_trans	*tp,
555	struct xfs_buf		*agbp)
556{
557	struct xfs_agf		*agf = agbp->b_addr;
558	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
559
560	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
561
562	agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
563	agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
564	xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
565
566	if (cur->bc_btnum == XFS_BTNUM_BNO) {
567		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
568	} else {
569		cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
570		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
571	}
572}
573
574/*
575 * Calculate number of records in an alloc btree block.
576 */
577int
578xfs_allocbt_maxrecs(
579	struct xfs_mount	*mp,
580	int			blocklen,
581	int			leaf)
582{
583	blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
584
585	if (leaf)
586		return blocklen / sizeof(xfs_alloc_rec_t);
587	return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
588}
589
590/* Calculate the freespace btree size for some records. */
591xfs_extlen_t
592xfs_allocbt_calc_size(
593	struct xfs_mount	*mp,
594	unsigned long long	len)
595{
596	return xfs_btree_calc_size(mp->m_alloc_mnr, len);
597}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_sb.h"
 13#include "xfs_mount.h"
 14#include "xfs_btree.h"
 
 15#include "xfs_alloc_btree.h"
 16#include "xfs_alloc.h"
 17#include "xfs_extent_busy.h"
 18#include "xfs_error.h"
 19#include "xfs_trace.h"
 20#include "xfs_trans.h"
 
 21
 22
 23STATIC struct xfs_btree_cur *
 24xfs_allocbt_dup_cursor(
 25	struct xfs_btree_cur	*cur)
 26{
 27	return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
 28			cur->bc_private.a.agbp, cur->bc_private.a.agno,
 29			cur->bc_btnum);
 30}
 31
 32STATIC void
 33xfs_allocbt_set_root(
 34	struct xfs_btree_cur	*cur,
 35	union xfs_btree_ptr	*ptr,
 36	int			inc)
 37{
 38	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 39	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
 40	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
 41	int			btnum = cur->bc_btnum;
 42	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
 43
 44	ASSERT(ptr->s != 0);
 45
 46	agf->agf_roots[btnum] = ptr->s;
 47	be32_add_cpu(&agf->agf_levels[btnum], inc);
 48	pag->pagf_levels[btnum] += inc;
 49	xfs_perag_put(pag);
 50
 51	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
 52}
 53
 54STATIC int
 55xfs_allocbt_alloc_block(
 56	struct xfs_btree_cur	*cur,
 57	union xfs_btree_ptr	*start,
 58	union xfs_btree_ptr	*new,
 59	int			*stat)
 60{
 61	int			error;
 62	xfs_agblock_t		bno;
 63
 64	/* Allocate the new block from the freelist. If we can't, give up.  */
 65	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
 66				       &bno, 1);
 67	if (error)
 68		return error;
 69
 70	if (bno == NULLAGBLOCK) {
 71		*stat = 0;
 72		return 0;
 73	}
 74
 75	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
 
 76
 77	xfs_trans_agbtree_delta(cur->bc_tp, 1);
 78	new->s = cpu_to_be32(bno);
 79
 80	*stat = 1;
 81	return 0;
 82}
 83
 84STATIC int
 85xfs_allocbt_free_block(
 86	struct xfs_btree_cur	*cur,
 87	struct xfs_buf		*bp)
 88{
 89	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 90	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
 91	xfs_agblock_t		bno;
 92	int			error;
 93
 94	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
 95	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
 96	if (error)
 97		return error;
 98
 99	xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
 
100			      XFS_EXTENT_BUSY_SKIP_DISCARD);
101	xfs_trans_agbtree_delta(cur->bc_tp, -1);
102	return 0;
103}
104
105/*
106 * Update the longest extent in the AGF
107 */
108STATIC void
109xfs_allocbt_update_lastrec(
110	struct xfs_btree_cur	*cur,
111	struct xfs_btree_block	*block,
112	union xfs_btree_rec	*rec,
113	int			ptr,
114	int			reason)
115{
116	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
117	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
118	struct xfs_perag	*pag;
119	__be32			len;
120	int			numrecs;
121
122	ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
123
124	switch (reason) {
125	case LASTREC_UPDATE:
126		/*
127		 * If this is the last leaf block and it's the last record,
128		 * then update the size of the longest extent in the AG.
129		 */
130		if (ptr != xfs_btree_get_numrecs(block))
131			return;
132		len = rec->alloc.ar_blockcount;
133		break;
134	case LASTREC_INSREC:
135		if (be32_to_cpu(rec->alloc.ar_blockcount) <=
136		    be32_to_cpu(agf->agf_longest))
137			return;
138		len = rec->alloc.ar_blockcount;
139		break;
140	case LASTREC_DELREC:
141		numrecs = xfs_btree_get_numrecs(block);
142		if (ptr <= numrecs)
143			return;
144		ASSERT(ptr == numrecs + 1);
145
146		if (numrecs) {
147			xfs_alloc_rec_t *rrp;
148
149			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
150			len = rrp->ar_blockcount;
151		} else {
152			len = 0;
153		}
154
155		break;
156	default:
157		ASSERT(0);
158		return;
159	}
160
161	agf->agf_longest = len;
162	pag = xfs_perag_get(cur->bc_mp, seqno);
163	pag->pagf_longest = be32_to_cpu(len);
164	xfs_perag_put(pag);
165	xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
166}
167
168STATIC int
169xfs_allocbt_get_minrecs(
170	struct xfs_btree_cur	*cur,
171	int			level)
172{
173	return cur->bc_mp->m_alloc_mnr[level != 0];
174}
175
176STATIC int
177xfs_allocbt_get_maxrecs(
178	struct xfs_btree_cur	*cur,
179	int			level)
180{
181	return cur->bc_mp->m_alloc_mxr[level != 0];
182}
183
184STATIC void
185xfs_allocbt_init_key_from_rec(
186	union xfs_btree_key	*key,
187	union xfs_btree_rec	*rec)
188{
189	key->alloc.ar_startblock = rec->alloc.ar_startblock;
190	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
191}
192
193STATIC void
194xfs_bnobt_init_high_key_from_rec(
195	union xfs_btree_key	*key,
196	union xfs_btree_rec	*rec)
197{
198	__u32			x;
199
200	x = be32_to_cpu(rec->alloc.ar_startblock);
201	x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
202	key->alloc.ar_startblock = cpu_to_be32(x);
203	key->alloc.ar_blockcount = 0;
204}
205
206STATIC void
207xfs_cntbt_init_high_key_from_rec(
208	union xfs_btree_key	*key,
209	union xfs_btree_rec	*rec)
210{
211	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
212	key->alloc.ar_startblock = 0;
213}
214
215STATIC void
216xfs_allocbt_init_rec_from_cur(
217	struct xfs_btree_cur	*cur,
218	union xfs_btree_rec	*rec)
219{
220	rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
221	rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
222}
223
224STATIC void
225xfs_allocbt_init_ptr_from_cur(
226	struct xfs_btree_cur	*cur,
227	union xfs_btree_ptr	*ptr)
228{
229	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
230
231	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
232
233	ptr->s = agf->agf_roots[cur->bc_btnum];
234}
235
236STATIC int64_t
237xfs_bnobt_key_diff(
238	struct xfs_btree_cur	*cur,
239	union xfs_btree_key	*key)
240{
241	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
242	xfs_alloc_key_t		*kp = &key->alloc;
243
244	return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
245}
246
247STATIC int64_t
248xfs_cntbt_key_diff(
249	struct xfs_btree_cur	*cur,
250	union xfs_btree_key	*key)
251{
252	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
253	xfs_alloc_key_t		*kp = &key->alloc;
254	int64_t			diff;
255
256	diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
257	if (diff)
258		return diff;
259
260	return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
261}
262
263STATIC int64_t
264xfs_bnobt_diff_two_keys(
265	struct xfs_btree_cur	*cur,
266	union xfs_btree_key	*k1,
267	union xfs_btree_key	*k2)
268{
269	return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
270			  be32_to_cpu(k2->alloc.ar_startblock);
271}
272
273STATIC int64_t
274xfs_cntbt_diff_two_keys(
275	struct xfs_btree_cur	*cur,
276	union xfs_btree_key	*k1,
277	union xfs_btree_key	*k2)
278{
279	int64_t			diff;
280
281	diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
282		be32_to_cpu(k2->alloc.ar_blockcount);
283	if (diff)
284		return diff;
285
286	return  be32_to_cpu(k1->alloc.ar_startblock) -
287		be32_to_cpu(k2->alloc.ar_startblock);
288}
289
290static xfs_failaddr_t
291xfs_allocbt_verify(
292	struct xfs_buf		*bp)
293{
294	struct xfs_mount	*mp = bp->b_mount;
295	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
296	struct xfs_perag	*pag = bp->b_pag;
297	xfs_failaddr_t		fa;
298	unsigned int		level;
299	xfs_btnum_t		btnum = XFS_BTNUM_BNOi;
300
301	if (!xfs_verify_magic(bp, block->bb_magic))
302		return __this_address;
303
304	if (xfs_sb_version_hascrc(&mp->m_sb)) {
305		fa = xfs_btree_sblock_v5hdr_verify(bp);
306		if (fa)
307			return fa;
308	}
309
310	/*
311	 * The perag may not be attached during grow operations or fully
312	 * initialized from the AGF during log recovery. Therefore we can only
313	 * check against maximum tree depth from those contexts.
314	 *
315	 * Otherwise check against the per-tree limit. Peek at one of the
316	 * verifier magic values to determine the type of tree we're verifying
317	 * against.
318	 */
319	level = be16_to_cpu(block->bb_level);
320	if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
321		btnum = XFS_BTNUM_CNTi;
322	if (pag && pag->pagf_init) {
323		if (level >= pag->pagf_levels[btnum])
324			return __this_address;
325	} else if (level >= mp->m_ag_maxlevels)
326		return __this_address;
327
328	return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
329}
330
331static void
332xfs_allocbt_read_verify(
333	struct xfs_buf	*bp)
334{
335	xfs_failaddr_t	fa;
336
337	if (!xfs_btree_sblock_verify_crc(bp))
338		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
339	else {
340		fa = xfs_allocbt_verify(bp);
341		if (fa)
342			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
343	}
344
345	if (bp->b_error)
346		trace_xfs_btree_corrupt(bp, _RET_IP_);
347}
348
349static void
350xfs_allocbt_write_verify(
351	struct xfs_buf	*bp)
352{
353	xfs_failaddr_t	fa;
354
355	fa = xfs_allocbt_verify(bp);
356	if (fa) {
357		trace_xfs_btree_corrupt(bp, _RET_IP_);
358		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
359		return;
360	}
361	xfs_btree_sblock_calc_crc(bp);
362
363}
364
365const struct xfs_buf_ops xfs_bnobt_buf_ops = {
366	.name = "xfs_bnobt",
367	.magic = { cpu_to_be32(XFS_ABTB_MAGIC),
368		   cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
369	.verify_read = xfs_allocbt_read_verify,
370	.verify_write = xfs_allocbt_write_verify,
371	.verify_struct = xfs_allocbt_verify,
372};
373
374const struct xfs_buf_ops xfs_cntbt_buf_ops = {
375	.name = "xfs_cntbt",
376	.magic = { cpu_to_be32(XFS_ABTC_MAGIC),
377		   cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
378	.verify_read = xfs_allocbt_read_verify,
379	.verify_write = xfs_allocbt_write_verify,
380	.verify_struct = xfs_allocbt_verify,
381};
382
383STATIC int
384xfs_bnobt_keys_inorder(
385	struct xfs_btree_cur	*cur,
386	union xfs_btree_key	*k1,
387	union xfs_btree_key	*k2)
388{
389	return be32_to_cpu(k1->alloc.ar_startblock) <
390	       be32_to_cpu(k2->alloc.ar_startblock);
391}
392
393STATIC int
394xfs_bnobt_recs_inorder(
395	struct xfs_btree_cur	*cur,
396	union xfs_btree_rec	*r1,
397	union xfs_btree_rec	*r2)
398{
399	return be32_to_cpu(r1->alloc.ar_startblock) +
400		be32_to_cpu(r1->alloc.ar_blockcount) <=
401		be32_to_cpu(r2->alloc.ar_startblock);
402}
403
404STATIC int
405xfs_cntbt_keys_inorder(
406	struct xfs_btree_cur	*cur,
407	union xfs_btree_key	*k1,
408	union xfs_btree_key	*k2)
409{
410	return be32_to_cpu(k1->alloc.ar_blockcount) <
411		be32_to_cpu(k2->alloc.ar_blockcount) ||
412		(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
413		 be32_to_cpu(k1->alloc.ar_startblock) <
414		 be32_to_cpu(k2->alloc.ar_startblock));
415}
416
417STATIC int
418xfs_cntbt_recs_inorder(
419	struct xfs_btree_cur	*cur,
420	union xfs_btree_rec	*r1,
421	union xfs_btree_rec	*r2)
422{
423	return be32_to_cpu(r1->alloc.ar_blockcount) <
424		be32_to_cpu(r2->alloc.ar_blockcount) ||
425		(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
426		 be32_to_cpu(r1->alloc.ar_startblock) <
427		 be32_to_cpu(r2->alloc.ar_startblock));
428}
429
430static const struct xfs_btree_ops xfs_bnobt_ops = {
431	.rec_len		= sizeof(xfs_alloc_rec_t),
432	.key_len		= sizeof(xfs_alloc_key_t),
433
434	.dup_cursor		= xfs_allocbt_dup_cursor,
435	.set_root		= xfs_allocbt_set_root,
436	.alloc_block		= xfs_allocbt_alloc_block,
437	.free_block		= xfs_allocbt_free_block,
438	.update_lastrec		= xfs_allocbt_update_lastrec,
439	.get_minrecs		= xfs_allocbt_get_minrecs,
440	.get_maxrecs		= xfs_allocbt_get_maxrecs,
441	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
442	.init_high_key_from_rec	= xfs_bnobt_init_high_key_from_rec,
443	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
444	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
445	.key_diff		= xfs_bnobt_key_diff,
446	.buf_ops		= &xfs_bnobt_buf_ops,
447	.diff_two_keys		= xfs_bnobt_diff_two_keys,
448	.keys_inorder		= xfs_bnobt_keys_inorder,
449	.recs_inorder		= xfs_bnobt_recs_inorder,
450};
451
452static const struct xfs_btree_ops xfs_cntbt_ops = {
453	.rec_len		= sizeof(xfs_alloc_rec_t),
454	.key_len		= sizeof(xfs_alloc_key_t),
455
456	.dup_cursor		= xfs_allocbt_dup_cursor,
457	.set_root		= xfs_allocbt_set_root,
458	.alloc_block		= xfs_allocbt_alloc_block,
459	.free_block		= xfs_allocbt_free_block,
460	.update_lastrec		= xfs_allocbt_update_lastrec,
461	.get_minrecs		= xfs_allocbt_get_minrecs,
462	.get_maxrecs		= xfs_allocbt_get_maxrecs,
463	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
464	.init_high_key_from_rec	= xfs_cntbt_init_high_key_from_rec,
465	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
466	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
467	.key_diff		= xfs_cntbt_key_diff,
468	.buf_ops		= &xfs_cntbt_buf_ops,
469	.diff_two_keys		= xfs_cntbt_diff_two_keys,
470	.keys_inorder		= xfs_cntbt_keys_inorder,
471	.recs_inorder		= xfs_cntbt_recs_inorder,
472};
473
474/*
475 * Allocate a new allocation btree cursor.
476 */
477struct xfs_btree_cur *			/* new alloc btree cursor */
478xfs_allocbt_init_cursor(
479	struct xfs_mount	*mp,		/* file system mount point */
480	struct xfs_trans	*tp,		/* transaction pointer */
481	struct xfs_buf		*agbp,		/* buffer for agf structure */
482	xfs_agnumber_t		agno,		/* allocation group number */
483	xfs_btnum_t		btnum)		/* btree identifier */
484{
485	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
486	struct xfs_btree_cur	*cur;
487
488	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
489
490	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
491
492	cur->bc_tp = tp;
493	cur->bc_mp = mp;
494	cur->bc_btnum = btnum;
495	cur->bc_blocklog = mp->m_sb.sb_blocklog;
 
496
497	if (btnum == XFS_BTNUM_CNT) {
 
498		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
499		cur->bc_ops = &xfs_cntbt_ops;
500		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
501		cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
502	} else {
 
503		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
504		cur->bc_ops = &xfs_bnobt_ops;
505		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
506	}
507
508	cur->bc_private.a.agbp = agbp;
509	cur->bc_private.a.agno = agno;
 
510
511	if (xfs_sb_version_hascrc(&mp->m_sb))
512		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
513
514	return cur;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515}
516
517/*
518 * Calculate number of records in an alloc btree block.
519 */
520int
521xfs_allocbt_maxrecs(
522	struct xfs_mount	*mp,
523	int			blocklen,
524	int			leaf)
525{
526	blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
527
528	if (leaf)
529		return blocklen / sizeof(xfs_alloc_rec_t);
530	return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
531}
532
533/* Calculate the freespace btree size for some records. */
534xfs_extlen_t
535xfs_allocbt_calc_size(
536	struct xfs_mount	*mp,
537	unsigned long long	len)
538{
539	return xfs_btree_calc_size(mp->m_alloc_mnr, len);
540}