Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_shared.h"
 21#include "xfs_format.h"
 22#include "xfs_log_format.h"
 23#include "xfs_trans_resv.h"
 24#include "xfs_sb.h"
 25#include "xfs_ag.h"
 26#include "xfs_mount.h"
 27#include "xfs_btree.h"
 28#include "xfs_alloc_btree.h"
 
 
 
 
 29#include "xfs_alloc.h"
 30#include "xfs_extent_busy.h"
 31#include "xfs_error.h"
 32#include "xfs_trace.h"
 33#include "xfs_cksum.h"
 34#include "xfs_trans.h"
 35
 36
 37STATIC struct xfs_btree_cur *
 38xfs_allocbt_dup_cursor(
 39	struct xfs_btree_cur	*cur)
 40{
 41	return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
 42			cur->bc_private.a.agbp, cur->bc_private.a.agno,
 43			cur->bc_btnum);
 44}
 45
 46STATIC void
 47xfs_allocbt_set_root(
 48	struct xfs_btree_cur	*cur,
 49	union xfs_btree_ptr	*ptr,
 50	int			inc)
 51{
 52	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 53	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
 54	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
 55	int			btnum = cur->bc_btnum;
 56	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
 57
 58	ASSERT(ptr->s != 0);
 59
 60	agf->agf_roots[btnum] = ptr->s;
 61	be32_add_cpu(&agf->agf_levels[btnum], inc);
 62	pag->pagf_levels[btnum] += inc;
 63	xfs_perag_put(pag);
 64
 65	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
 66}
 67
 68STATIC int
 69xfs_allocbt_alloc_block(
 70	struct xfs_btree_cur	*cur,
 71	union xfs_btree_ptr	*start,
 72	union xfs_btree_ptr	*new,
 73	int			length,
 74	int			*stat)
 75{
 76	int			error;
 77	xfs_agblock_t		bno;
 78
 79	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
 80
 81	/* Allocate the new block from the freelist. If we can't, give up.  */
 82	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
 83				       &bno, 1);
 84	if (error) {
 85		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
 86		return error;
 87	}
 88
 89	if (bno == NULLAGBLOCK) {
 90		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
 91		*stat = 0;
 92		return 0;
 93	}
 94
 95	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
 96
 97	xfs_trans_agbtree_delta(cur->bc_tp, 1);
 98	new->s = cpu_to_be32(bno);
 99
100	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
101	*stat = 1;
102	return 0;
103}
104
105STATIC int
106xfs_allocbt_free_block(
107	struct xfs_btree_cur	*cur,
108	struct xfs_buf		*bp)
109{
110	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
111	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
112	xfs_agblock_t		bno;
113	int			error;
114
115	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
116	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
117	if (error)
118		return error;
119
120	xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
121			      XFS_EXTENT_BUSY_SKIP_DISCARD);
122	xfs_trans_agbtree_delta(cur->bc_tp, -1);
123
124	xfs_trans_binval(cur->bc_tp, bp);
125	return 0;
126}
127
128/*
129 * Update the longest extent in the AGF
130 */
131STATIC void
132xfs_allocbt_update_lastrec(
133	struct xfs_btree_cur	*cur,
134	struct xfs_btree_block	*block,
135	union xfs_btree_rec	*rec,
136	int			ptr,
137	int			reason)
138{
139	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
140	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
141	struct xfs_perag	*pag;
142	__be32			len;
143	int			numrecs;
144
145	ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
146
147	switch (reason) {
148	case LASTREC_UPDATE:
149		/*
150		 * If this is the last leaf block and it's the last record,
151		 * then update the size of the longest extent in the AG.
152		 */
153		if (ptr != xfs_btree_get_numrecs(block))
154			return;
155		len = rec->alloc.ar_blockcount;
156		break;
157	case LASTREC_INSREC:
158		if (be32_to_cpu(rec->alloc.ar_blockcount) <=
159		    be32_to_cpu(agf->agf_longest))
160			return;
161		len = rec->alloc.ar_blockcount;
162		break;
163	case LASTREC_DELREC:
164		numrecs = xfs_btree_get_numrecs(block);
165		if (ptr <= numrecs)
166			return;
167		ASSERT(ptr == numrecs + 1);
168
169		if (numrecs) {
170			xfs_alloc_rec_t *rrp;
171
172			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
173			len = rrp->ar_blockcount;
174		} else {
175			len = 0;
176		}
177
178		break;
179	default:
180		ASSERT(0);
181		return;
182	}
183
184	agf->agf_longest = len;
185	pag = xfs_perag_get(cur->bc_mp, seqno);
186	pag->pagf_longest = be32_to_cpu(len);
187	xfs_perag_put(pag);
188	xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
189}
190
191STATIC int
192xfs_allocbt_get_minrecs(
193	struct xfs_btree_cur	*cur,
194	int			level)
195{
196	return cur->bc_mp->m_alloc_mnr[level != 0];
197}
198
199STATIC int
200xfs_allocbt_get_maxrecs(
201	struct xfs_btree_cur	*cur,
202	int			level)
203{
204	return cur->bc_mp->m_alloc_mxr[level != 0];
205}
206
207STATIC void
208xfs_allocbt_init_key_from_rec(
209	union xfs_btree_key	*key,
210	union xfs_btree_rec	*rec)
211{
212	ASSERT(rec->alloc.ar_startblock != 0);
213
214	key->alloc.ar_startblock = rec->alloc.ar_startblock;
215	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
216}
217
218STATIC void
219xfs_allocbt_init_rec_from_key(
220	union xfs_btree_key	*key,
221	union xfs_btree_rec	*rec)
222{
223	ASSERT(key->alloc.ar_startblock != 0);
224
225	rec->alloc.ar_startblock = key->alloc.ar_startblock;
226	rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
227}
228
229STATIC void
230xfs_allocbt_init_rec_from_cur(
231	struct xfs_btree_cur	*cur,
232	union xfs_btree_rec	*rec)
233{
234	ASSERT(cur->bc_rec.a.ar_startblock != 0);
235
236	rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
237	rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
238}
239
240STATIC void
241xfs_allocbt_init_ptr_from_cur(
242	struct xfs_btree_cur	*cur,
243	union xfs_btree_ptr	*ptr)
244{
245	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
246
247	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
248	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
249
250	ptr->s = agf->agf_roots[cur->bc_btnum];
251}
252
253STATIC __int64_t
254xfs_allocbt_key_diff(
255	struct xfs_btree_cur	*cur,
256	union xfs_btree_key	*key)
257{
258	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
259	xfs_alloc_key_t		*kp = &key->alloc;
260	__int64_t		diff;
261
262	if (cur->bc_btnum == XFS_BTNUM_BNO) {
263		return (__int64_t)be32_to_cpu(kp->ar_startblock) -
264				rec->ar_startblock;
265	}
266
267	diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
268	if (diff)
269		return diff;
270
271	return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
272}
273
274static bool
275xfs_allocbt_verify(
276	struct xfs_buf		*bp)
277{
278	struct xfs_mount	*mp = bp->b_target->bt_mount;
279	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
280	struct xfs_perag	*pag = bp->b_pag;
281	unsigned int		level;
282
283	/*
284	 * magic number and level verification
285	 *
286	 * During growfs operations, we can't verify the exact level or owner as
287	 * the perag is not fully initialised and hence not attached to the
288	 * buffer.  In this case, check against the maximum tree depth.
289	 *
290	 * Similarly, during log recovery we will have a perag structure
291	 * attached, but the agf information will not yet have been initialised
292	 * from the on disk AGF. Again, we can only check against maximum limits
293	 * in this case.
294	 */
295	level = be16_to_cpu(block->bb_level);
296	switch (block->bb_magic) {
297	case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
298		if (!xfs_sb_version_hascrc(&mp->m_sb))
299			return false;
300		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
301			return false;
302		if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
303			return false;
304		if (pag &&
305		    be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
306			return false;
307		/* fall through */
308	case cpu_to_be32(XFS_ABTB_MAGIC):
309		if (pag && pag->pagf_init) {
310			if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
311				return false;
312		} else if (level >= mp->m_ag_maxlevels)
313			return false;
314		break;
315	case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
316		if (!xfs_sb_version_hascrc(&mp->m_sb))
317			return false;
318		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
319			return false;
320		if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
321			return false;
322		if (pag &&
323		    be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
324			return false;
325		/* fall through */
326	case cpu_to_be32(XFS_ABTC_MAGIC):
327		if (pag && pag->pagf_init) {
328			if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
329				return false;
330		} else if (level >= mp->m_ag_maxlevels)
331			return false;
332		break;
333	default:
334		return false;
335	}
336
337	/* numrecs verification */
338	if (be16_to_cpu(block->bb_numrecs) > mp->m_alloc_mxr[level != 0])
339		return false;
340
341	/* sibling pointer verification */
342	if (!block->bb_u.s.bb_leftsib ||
343	    (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
344	     block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
345		return false;
346	if (!block->bb_u.s.bb_rightsib ||
347	    (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
348	     block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
349		return false;
350
351	return true;
352}
353
354static void
355xfs_allocbt_read_verify(
356	struct xfs_buf	*bp)
357{
358	if (!xfs_btree_sblock_verify_crc(bp))
359		xfs_buf_ioerror(bp, EFSBADCRC);
360	else if (!xfs_allocbt_verify(bp))
361		xfs_buf_ioerror(bp, EFSCORRUPTED);
362
363	if (bp->b_error) {
364		trace_xfs_btree_corrupt(bp, _RET_IP_);
365		xfs_verifier_error(bp);
366	}
367}
368
369static void
370xfs_allocbt_write_verify(
371	struct xfs_buf	*bp)
372{
373	if (!xfs_allocbt_verify(bp)) {
374		trace_xfs_btree_corrupt(bp, _RET_IP_);
375		xfs_buf_ioerror(bp, EFSCORRUPTED);
376		xfs_verifier_error(bp);
377		return;
378	}
379	xfs_btree_sblock_calc_crc(bp);
380
381}
382
383const struct xfs_buf_ops xfs_allocbt_buf_ops = {
384	.verify_read = xfs_allocbt_read_verify,
385	.verify_write = xfs_allocbt_write_verify,
386};
387
388
389#if defined(DEBUG) || defined(XFS_WARN)
390STATIC int
391xfs_allocbt_keys_inorder(
392	struct xfs_btree_cur	*cur,
393	union xfs_btree_key	*k1,
394	union xfs_btree_key	*k2)
395{
396	if (cur->bc_btnum == XFS_BTNUM_BNO) {
397		return be32_to_cpu(k1->alloc.ar_startblock) <
398		       be32_to_cpu(k2->alloc.ar_startblock);
399	} else {
400		return be32_to_cpu(k1->alloc.ar_blockcount) <
401			be32_to_cpu(k2->alloc.ar_blockcount) ||
402			(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
403			 be32_to_cpu(k1->alloc.ar_startblock) <
404			 be32_to_cpu(k2->alloc.ar_startblock));
405	}
406}
407
408STATIC int
409xfs_allocbt_recs_inorder(
410	struct xfs_btree_cur	*cur,
411	union xfs_btree_rec	*r1,
412	union xfs_btree_rec	*r2)
413{
414	if (cur->bc_btnum == XFS_BTNUM_BNO) {
415		return be32_to_cpu(r1->alloc.ar_startblock) +
416			be32_to_cpu(r1->alloc.ar_blockcount) <=
417			be32_to_cpu(r2->alloc.ar_startblock);
418	} else {
419		return be32_to_cpu(r1->alloc.ar_blockcount) <
420			be32_to_cpu(r2->alloc.ar_blockcount) ||
421			(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
422			 be32_to_cpu(r1->alloc.ar_startblock) <
423			 be32_to_cpu(r2->alloc.ar_startblock));
424	}
425}
426#endif	/* DEBUG */
427
428static const struct xfs_btree_ops xfs_allocbt_ops = {
429	.rec_len		= sizeof(xfs_alloc_rec_t),
430	.key_len		= sizeof(xfs_alloc_key_t),
431
432	.dup_cursor		= xfs_allocbt_dup_cursor,
433	.set_root		= xfs_allocbt_set_root,
434	.alloc_block		= xfs_allocbt_alloc_block,
435	.free_block		= xfs_allocbt_free_block,
436	.update_lastrec		= xfs_allocbt_update_lastrec,
437	.get_minrecs		= xfs_allocbt_get_minrecs,
438	.get_maxrecs		= xfs_allocbt_get_maxrecs,
439	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
440	.init_rec_from_key	= xfs_allocbt_init_rec_from_key,
441	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
442	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
443	.key_diff		= xfs_allocbt_key_diff,
444	.buf_ops		= &xfs_allocbt_buf_ops,
445#if defined(DEBUG) || defined(XFS_WARN)
446	.keys_inorder		= xfs_allocbt_keys_inorder,
447	.recs_inorder		= xfs_allocbt_recs_inorder,
448#endif
449};
450
451/*
452 * Allocate a new allocation btree cursor.
453 */
454struct xfs_btree_cur *			/* new alloc btree cursor */
455xfs_allocbt_init_cursor(
456	struct xfs_mount	*mp,		/* file system mount point */
457	struct xfs_trans	*tp,		/* transaction pointer */
458	struct xfs_buf		*agbp,		/* buffer for agf structure */
459	xfs_agnumber_t		agno,		/* allocation group number */
460	xfs_btnum_t		btnum)		/* btree identifier */
461{
462	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
463	struct xfs_btree_cur	*cur;
464
465	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
466
467	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
468
469	cur->bc_tp = tp;
470	cur->bc_mp = mp;
471	cur->bc_btnum = btnum;
472	cur->bc_blocklog = mp->m_sb.sb_blocklog;
473	cur->bc_ops = &xfs_allocbt_ops;
474
475	if (btnum == XFS_BTNUM_CNT) {
476		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
477		cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
478	} else {
479		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
480	}
481
482	cur->bc_private.a.agbp = agbp;
483	cur->bc_private.a.agno = agno;
484
485	if (xfs_sb_version_hascrc(&mp->m_sb))
486		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
487
488	return cur;
489}
490
491/*
492 * Calculate number of records in an alloc btree block.
493 */
494int
495xfs_allocbt_maxrecs(
496	struct xfs_mount	*mp,
497	int			blocklen,
498	int			leaf)
499{
500	blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
501
502	if (leaf)
503		return blocklen / sizeof(xfs_alloc_rec_t);
504	return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
505}
v3.5.6
  1/*
  2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_types.h"
 21#include "xfs_log.h"
 22#include "xfs_trans.h"
 
 23#include "xfs_sb.h"
 24#include "xfs_ag.h"
 25#include "xfs_mount.h"
 26#include "xfs_bmap_btree.h"
 27#include "xfs_alloc_btree.h"
 28#include "xfs_ialloc_btree.h"
 29#include "xfs_dinode.h"
 30#include "xfs_inode.h"
 31#include "xfs_btree.h"
 32#include "xfs_alloc.h"
 33#include "xfs_extent_busy.h"
 34#include "xfs_error.h"
 35#include "xfs_trace.h"
 
 
 36
 37
 38STATIC struct xfs_btree_cur *
 39xfs_allocbt_dup_cursor(
 40	struct xfs_btree_cur	*cur)
 41{
 42	return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
 43			cur->bc_private.a.agbp, cur->bc_private.a.agno,
 44			cur->bc_btnum);
 45}
 46
 47STATIC void
 48xfs_allocbt_set_root(
 49	struct xfs_btree_cur	*cur,
 50	union xfs_btree_ptr	*ptr,
 51	int			inc)
 52{
 53	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
 54	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
 55	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
 56	int			btnum = cur->bc_btnum;
 57	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
 58
 59	ASSERT(ptr->s != 0);
 60
 61	agf->agf_roots[btnum] = ptr->s;
 62	be32_add_cpu(&agf->agf_levels[btnum], inc);
 63	pag->pagf_levels[btnum] += inc;
 64	xfs_perag_put(pag);
 65
 66	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
 67}
 68
 69STATIC int
 70xfs_allocbt_alloc_block(
 71	struct xfs_btree_cur	*cur,
 72	union xfs_btree_ptr	*start,
 73	union xfs_btree_ptr	*new,
 74	int			length,
 75	int			*stat)
 76{
 77	int			error;
 78	xfs_agblock_t		bno;
 79
 80	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
 81
 82	/* Allocate the new block from the freelist. If we can't, give up.  */
 83	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
 84				       &bno, 1);
 85	if (error) {
 86		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
 87		return error;
 88	}
 89
 90	if (bno == NULLAGBLOCK) {
 91		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
 92		*stat = 0;
 93		return 0;
 94	}
 95
 96	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
 97
 98	xfs_trans_agbtree_delta(cur->bc_tp, 1);
 99	new->s = cpu_to_be32(bno);
100
101	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
102	*stat = 1;
103	return 0;
104}
105
106STATIC int
107xfs_allocbt_free_block(
108	struct xfs_btree_cur	*cur,
109	struct xfs_buf		*bp)
110{
111	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
112	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
113	xfs_agblock_t		bno;
114	int			error;
115
116	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
117	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
118	if (error)
119		return error;
120
121	xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
122			      XFS_EXTENT_BUSY_SKIP_DISCARD);
123	xfs_trans_agbtree_delta(cur->bc_tp, -1);
 
 
124	return 0;
125}
126
127/*
128 * Update the longest extent in the AGF
129 */
130STATIC void
131xfs_allocbt_update_lastrec(
132	struct xfs_btree_cur	*cur,
133	struct xfs_btree_block	*block,
134	union xfs_btree_rec	*rec,
135	int			ptr,
136	int			reason)
137{
138	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
139	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
140	struct xfs_perag	*pag;
141	__be32			len;
142	int			numrecs;
143
144	ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
145
146	switch (reason) {
147	case LASTREC_UPDATE:
148		/*
149		 * If this is the last leaf block and it's the last record,
150		 * then update the size of the longest extent in the AG.
151		 */
152		if (ptr != xfs_btree_get_numrecs(block))
153			return;
154		len = rec->alloc.ar_blockcount;
155		break;
156	case LASTREC_INSREC:
157		if (be32_to_cpu(rec->alloc.ar_blockcount) <=
158		    be32_to_cpu(agf->agf_longest))
159			return;
160		len = rec->alloc.ar_blockcount;
161		break;
162	case LASTREC_DELREC:
163		numrecs = xfs_btree_get_numrecs(block);
164		if (ptr <= numrecs)
165			return;
166		ASSERT(ptr == numrecs + 1);
167
168		if (numrecs) {
169			xfs_alloc_rec_t *rrp;
170
171			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
172			len = rrp->ar_blockcount;
173		} else {
174			len = 0;
175		}
176
177		break;
178	default:
179		ASSERT(0);
180		return;
181	}
182
183	agf->agf_longest = len;
184	pag = xfs_perag_get(cur->bc_mp, seqno);
185	pag->pagf_longest = be32_to_cpu(len);
186	xfs_perag_put(pag);
187	xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
188}
189
190STATIC int
191xfs_allocbt_get_minrecs(
192	struct xfs_btree_cur	*cur,
193	int			level)
194{
195	return cur->bc_mp->m_alloc_mnr[level != 0];
196}
197
198STATIC int
199xfs_allocbt_get_maxrecs(
200	struct xfs_btree_cur	*cur,
201	int			level)
202{
203	return cur->bc_mp->m_alloc_mxr[level != 0];
204}
205
206STATIC void
207xfs_allocbt_init_key_from_rec(
208	union xfs_btree_key	*key,
209	union xfs_btree_rec	*rec)
210{
211	ASSERT(rec->alloc.ar_startblock != 0);
212
213	key->alloc.ar_startblock = rec->alloc.ar_startblock;
214	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
215}
216
217STATIC void
218xfs_allocbt_init_rec_from_key(
219	union xfs_btree_key	*key,
220	union xfs_btree_rec	*rec)
221{
222	ASSERT(key->alloc.ar_startblock != 0);
223
224	rec->alloc.ar_startblock = key->alloc.ar_startblock;
225	rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
226}
227
228STATIC void
229xfs_allocbt_init_rec_from_cur(
230	struct xfs_btree_cur	*cur,
231	union xfs_btree_rec	*rec)
232{
233	ASSERT(cur->bc_rec.a.ar_startblock != 0);
234
235	rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
236	rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
237}
238
239STATIC void
240xfs_allocbt_init_ptr_from_cur(
241	struct xfs_btree_cur	*cur,
242	union xfs_btree_ptr	*ptr)
243{
244	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
245
246	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
247	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
248
249	ptr->s = agf->agf_roots[cur->bc_btnum];
250}
251
252STATIC __int64_t
253xfs_allocbt_key_diff(
254	struct xfs_btree_cur	*cur,
255	union xfs_btree_key	*key)
256{
257	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
258	xfs_alloc_key_t		*kp = &key->alloc;
259	__int64_t		diff;
260
261	if (cur->bc_btnum == XFS_BTNUM_BNO) {
262		return (__int64_t)be32_to_cpu(kp->ar_startblock) -
263				rec->ar_startblock;
264	}
265
266	diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
267	if (diff)
268		return diff;
269
270	return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
271}
272
273#ifdef DEBUG
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274STATIC int
275xfs_allocbt_keys_inorder(
276	struct xfs_btree_cur	*cur,
277	union xfs_btree_key	*k1,
278	union xfs_btree_key	*k2)
279{
280	if (cur->bc_btnum == XFS_BTNUM_BNO) {
281		return be32_to_cpu(k1->alloc.ar_startblock) <
282		       be32_to_cpu(k2->alloc.ar_startblock);
283	} else {
284		return be32_to_cpu(k1->alloc.ar_blockcount) <
285			be32_to_cpu(k2->alloc.ar_blockcount) ||
286			(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
287			 be32_to_cpu(k1->alloc.ar_startblock) <
288			 be32_to_cpu(k2->alloc.ar_startblock));
289	}
290}
291
292STATIC int
293xfs_allocbt_recs_inorder(
294	struct xfs_btree_cur	*cur,
295	union xfs_btree_rec	*r1,
296	union xfs_btree_rec	*r2)
297{
298	if (cur->bc_btnum == XFS_BTNUM_BNO) {
299		return be32_to_cpu(r1->alloc.ar_startblock) +
300			be32_to_cpu(r1->alloc.ar_blockcount) <=
301			be32_to_cpu(r2->alloc.ar_startblock);
302	} else {
303		return be32_to_cpu(r1->alloc.ar_blockcount) <
304			be32_to_cpu(r2->alloc.ar_blockcount) ||
305			(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
306			 be32_to_cpu(r1->alloc.ar_startblock) <
307			 be32_to_cpu(r2->alloc.ar_startblock));
308	}
309}
310#endif	/* DEBUG */
311
312static const struct xfs_btree_ops xfs_allocbt_ops = {
313	.rec_len		= sizeof(xfs_alloc_rec_t),
314	.key_len		= sizeof(xfs_alloc_key_t),
315
316	.dup_cursor		= xfs_allocbt_dup_cursor,
317	.set_root		= xfs_allocbt_set_root,
318	.alloc_block		= xfs_allocbt_alloc_block,
319	.free_block		= xfs_allocbt_free_block,
320	.update_lastrec		= xfs_allocbt_update_lastrec,
321	.get_minrecs		= xfs_allocbt_get_minrecs,
322	.get_maxrecs		= xfs_allocbt_get_maxrecs,
323	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
324	.init_rec_from_key	= xfs_allocbt_init_rec_from_key,
325	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
326	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
327	.key_diff		= xfs_allocbt_key_diff,
328#ifdef DEBUG
 
329	.keys_inorder		= xfs_allocbt_keys_inorder,
330	.recs_inorder		= xfs_allocbt_recs_inorder,
331#endif
332};
333
334/*
335 * Allocate a new allocation btree cursor.
336 */
337struct xfs_btree_cur *			/* new alloc btree cursor */
338xfs_allocbt_init_cursor(
339	struct xfs_mount	*mp,		/* file system mount point */
340	struct xfs_trans	*tp,		/* transaction pointer */
341	struct xfs_buf		*agbp,		/* buffer for agf structure */
342	xfs_agnumber_t		agno,		/* allocation group number */
343	xfs_btnum_t		btnum)		/* btree identifier */
344{
345	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
346	struct xfs_btree_cur	*cur;
347
348	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
349
350	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
351
352	cur->bc_tp = tp;
353	cur->bc_mp = mp;
354	cur->bc_btnum = btnum;
355	cur->bc_blocklog = mp->m_sb.sb_blocklog;
356	cur->bc_ops = &xfs_allocbt_ops;
357
358	if (btnum == XFS_BTNUM_CNT) {
359		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
360		cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
361	} else {
362		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
363	}
364
365	cur->bc_private.a.agbp = agbp;
366	cur->bc_private.a.agno = agno;
 
 
 
367
368	return cur;
369}
370
371/*
372 * Calculate number of records in an alloc btree block.
373 */
374int
375xfs_allocbt_maxrecs(
376	struct xfs_mount	*mp,
377	int			blocklen,
378	int			leaf)
379{
380	blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
381
382	if (leaf)
383		return blocklen / sizeof(xfs_alloc_rec_t);
384	return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
385}