Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1/*
  2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_shared.h"
 21#include "xfs_format.h"
 22#include "xfs_log_format.h"
 23#include "xfs_trans_resv.h"
 24#include "xfs_bit.h"
 25#include "xfs_sb.h"
 26#include "xfs_ag.h"
 27#include "xfs_mount.h"
 28#include "xfs_inode.h"
 29#include "xfs_trans.h"
 30#include "xfs_inode_item.h"
 31#include "xfs_alloc.h"
 32#include "xfs_btree.h"
 33#include "xfs_bmap_btree.h"
 34#include "xfs_bmap.h"
 35#include "xfs_error.h"
 36#include "xfs_quota.h"
 37#include "xfs_trace.h"
 38#include "xfs_cksum.h"
 39#include "xfs_dinode.h"
 40
 41/*
 42 * Determine the extent state.
 43 */
 44/* ARGSUSED */
 45STATIC xfs_exntst_t
 46xfs_extent_state(
 47	xfs_filblks_t		blks,
 48	int			extent_flag)
 49{
 50	if (extent_flag) {
 51		ASSERT(blks != 0);	/* saved for DMIG */
 52		return XFS_EXT_UNWRITTEN;
 53	}
 54	return XFS_EXT_NORM;
 55}
 56
 57/*
 58 * Convert on-disk form of btree root to in-memory form.
 59 */
 60void
 61xfs_bmdr_to_bmbt(
 62	struct xfs_inode	*ip,
 63	xfs_bmdr_block_t	*dblock,
 64	int			dblocklen,
 65	struct xfs_btree_block	*rblock,
 66	int			rblocklen)
 67{
 68	struct xfs_mount	*mp = ip->i_mount;
 69	int			dmxr;
 70	xfs_bmbt_key_t		*fkp;
 71	__be64			*fpp;
 72	xfs_bmbt_key_t		*tkp;
 73	__be64			*tpp;
 74
 75	if (xfs_sb_version_hascrc(&mp->m_sb))
 76		xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
 77				 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
 78				 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
 79	else
 80		xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
 81				 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
 82				 XFS_BTREE_LONG_PTRS);
 83
 84	rblock->bb_level = dblock->bb_level;
 85	ASSERT(be16_to_cpu(rblock->bb_level) > 0);
 86	rblock->bb_numrecs = dblock->bb_numrecs;
 87	dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
 88	fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
 89	tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
 90	fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
 91	tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
 92	dmxr = be16_to_cpu(dblock->bb_numrecs);
 93	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
 94	memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
 95}
 96
 97/*
 98 * Convert a compressed bmap extent record to an uncompressed form.
 99 * This code must be in sync with the routines xfs_bmbt_get_startoff,
100 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
101 */
102STATIC void
103__xfs_bmbt_get_all(
104		__uint64_t l0,
105		__uint64_t l1,
106		xfs_bmbt_irec_t *s)
107{
108	int	ext_flag;
109	xfs_exntst_t st;
110
111	ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
112	s->br_startoff = ((xfs_fileoff_t)l0 &
113			   xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
114#if XFS_BIG_BLKNOS
115	s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
116			   (((xfs_fsblock_t)l1) >> 21);
117#else
118#ifdef DEBUG
119	{
120		xfs_dfsbno_t	b;
121
122		b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
123		    (((xfs_dfsbno_t)l1) >> 21);
124		ASSERT((b >> 32) == 0 || isnulldstartblock(b));
125		s->br_startblock = (xfs_fsblock_t)b;
126	}
127#else	/* !DEBUG */
128	s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
129#endif	/* DEBUG */
130#endif	/* XFS_BIG_BLKNOS */
131	s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
132	/* This is xfs_extent_state() in-line */
133	if (ext_flag) {
134		ASSERT(s->br_blockcount != 0);	/* saved for DMIG */
135		st = XFS_EXT_UNWRITTEN;
136	} else
137		st = XFS_EXT_NORM;
138	s->br_state = st;
139}
140
141void
142xfs_bmbt_get_all(
143	xfs_bmbt_rec_host_t *r,
144	xfs_bmbt_irec_t *s)
145{
146	__xfs_bmbt_get_all(r->l0, r->l1, s);
147}
148
149/*
150 * Extract the blockcount field from an in memory bmap extent record.
151 */
152xfs_filblks_t
153xfs_bmbt_get_blockcount(
154	xfs_bmbt_rec_host_t	*r)
155{
156	return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
157}
158
159/*
160 * Extract the startblock field from an in memory bmap extent record.
161 */
162xfs_fsblock_t
163xfs_bmbt_get_startblock(
164	xfs_bmbt_rec_host_t	*r)
165{
166#if XFS_BIG_BLKNOS
167	return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
168	       (((xfs_fsblock_t)r->l1) >> 21);
169#else
170#ifdef DEBUG
171	xfs_dfsbno_t	b;
172
173	b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
174	    (((xfs_dfsbno_t)r->l1) >> 21);
175	ASSERT((b >> 32) == 0 || isnulldstartblock(b));
176	return (xfs_fsblock_t)b;
177#else	/* !DEBUG */
178	return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
179#endif	/* DEBUG */
180#endif	/* XFS_BIG_BLKNOS */
181}
182
183/*
184 * Extract the startoff field from an in memory bmap extent record.
185 */
186xfs_fileoff_t
187xfs_bmbt_get_startoff(
188	xfs_bmbt_rec_host_t	*r)
189{
190	return ((xfs_fileoff_t)r->l0 &
191		 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
192}
193
194xfs_exntst_t
195xfs_bmbt_get_state(
196	xfs_bmbt_rec_host_t	*r)
197{
198	int	ext_flag;
199
200	ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
201	return xfs_extent_state(xfs_bmbt_get_blockcount(r),
202				ext_flag);
203}
204
205/*
206 * Extract the blockcount field from an on disk bmap extent record.
207 */
208xfs_filblks_t
209xfs_bmbt_disk_get_blockcount(
210	xfs_bmbt_rec_t	*r)
211{
212	return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
213}
214
215/*
216 * Extract the startoff field from a disk format bmap extent record.
217 */
218xfs_fileoff_t
219xfs_bmbt_disk_get_startoff(
220	xfs_bmbt_rec_t	*r)
221{
222	return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
223		 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
224}
225
226
227/*
228 * Set all the fields in a bmap extent record from the arguments.
229 */
230void
231xfs_bmbt_set_allf(
232	xfs_bmbt_rec_host_t	*r,
233	xfs_fileoff_t		startoff,
234	xfs_fsblock_t		startblock,
235	xfs_filblks_t		blockcount,
236	xfs_exntst_t		state)
237{
238	int		extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
239
240	ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
241	ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
242	ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
243
244#if XFS_BIG_BLKNOS
245	ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
246
247	r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
248		((xfs_bmbt_rec_base_t)startoff << 9) |
249		((xfs_bmbt_rec_base_t)startblock >> 43);
250	r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
251		((xfs_bmbt_rec_base_t)blockcount &
252		(xfs_bmbt_rec_base_t)xfs_mask64lo(21));
253#else	/* !XFS_BIG_BLKNOS */
254	if (isnullstartblock(startblock)) {
255		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
256			((xfs_bmbt_rec_base_t)startoff << 9) |
257			 (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
258		r->l1 = xfs_mask64hi(11) |
259			  ((xfs_bmbt_rec_base_t)startblock << 21) |
260			  ((xfs_bmbt_rec_base_t)blockcount &
261			   (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
262	} else {
263		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
264			((xfs_bmbt_rec_base_t)startoff << 9);
265		r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
266			 ((xfs_bmbt_rec_base_t)blockcount &
267			 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
268	}
269#endif	/* XFS_BIG_BLKNOS */
270}
271
272/*
273 * Set all the fields in a bmap extent record from the uncompressed form.
274 */
275void
276xfs_bmbt_set_all(
277	xfs_bmbt_rec_host_t *r,
278	xfs_bmbt_irec_t	*s)
279{
280	xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
281			     s->br_blockcount, s->br_state);
282}
283
284
285/*
286 * Set all the fields in a disk format bmap extent record from the arguments.
287 */
288void
289xfs_bmbt_disk_set_allf(
290	xfs_bmbt_rec_t		*r,
291	xfs_fileoff_t		startoff,
292	xfs_fsblock_t		startblock,
293	xfs_filblks_t		blockcount,
294	xfs_exntst_t		state)
295{
296	int			extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
297
298	ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
299	ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
300	ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
301
302#if XFS_BIG_BLKNOS
303	ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
304
305	r->l0 = cpu_to_be64(
306		((xfs_bmbt_rec_base_t)extent_flag << 63) |
307		 ((xfs_bmbt_rec_base_t)startoff << 9) |
308		 ((xfs_bmbt_rec_base_t)startblock >> 43));
309	r->l1 = cpu_to_be64(
310		((xfs_bmbt_rec_base_t)startblock << 21) |
311		 ((xfs_bmbt_rec_base_t)blockcount &
312		  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
313#else	/* !XFS_BIG_BLKNOS */
314	if (isnullstartblock(startblock)) {
315		r->l0 = cpu_to_be64(
316			((xfs_bmbt_rec_base_t)extent_flag << 63) |
317			 ((xfs_bmbt_rec_base_t)startoff << 9) |
318			  (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
319		r->l1 = cpu_to_be64(xfs_mask64hi(11) |
320			  ((xfs_bmbt_rec_base_t)startblock << 21) |
321			  ((xfs_bmbt_rec_base_t)blockcount &
322			   (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
323	} else {
324		r->l0 = cpu_to_be64(
325			((xfs_bmbt_rec_base_t)extent_flag << 63) |
326			 ((xfs_bmbt_rec_base_t)startoff << 9));
327		r->l1 = cpu_to_be64(
328			((xfs_bmbt_rec_base_t)startblock << 21) |
329			 ((xfs_bmbt_rec_base_t)blockcount &
330			  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
331	}
332#endif	/* XFS_BIG_BLKNOS */
333}
334
335/*
336 * Set all the fields in a bmap extent record from the uncompressed form.
337 */
338STATIC void
339xfs_bmbt_disk_set_all(
340	xfs_bmbt_rec_t	*r,
341	xfs_bmbt_irec_t *s)
342{
343	xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
344				  s->br_blockcount, s->br_state);
345}
346
347/*
348 * Set the blockcount field in a bmap extent record.
349 */
350void
351xfs_bmbt_set_blockcount(
352	xfs_bmbt_rec_host_t *r,
353	xfs_filblks_t	v)
354{
355	ASSERT((v & xfs_mask64hi(43)) == 0);
356	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
357		  (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
358}
359
360/*
361 * Set the startblock field in a bmap extent record.
362 */
363void
364xfs_bmbt_set_startblock(
365	xfs_bmbt_rec_host_t *r,
366	xfs_fsblock_t	v)
367{
368#if XFS_BIG_BLKNOS
369	ASSERT((v & xfs_mask64hi(12)) == 0);
370	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
371		  (xfs_bmbt_rec_base_t)(v >> 43);
372	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
373		  (xfs_bmbt_rec_base_t)(v << 21);
374#else	/* !XFS_BIG_BLKNOS */
375	if (isnullstartblock(v)) {
376		r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
377		r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
378			  ((xfs_bmbt_rec_base_t)v << 21) |
379			  (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
380	} else {
381		r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
382		r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
383			  (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
384	}
385#endif	/* XFS_BIG_BLKNOS */
386}
387
388/*
389 * Set the startoff field in a bmap extent record.
390 */
391void
392xfs_bmbt_set_startoff(
393	xfs_bmbt_rec_host_t *r,
394	xfs_fileoff_t	v)
395{
396	ASSERT((v & xfs_mask64hi(9)) == 0);
397	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
398		((xfs_bmbt_rec_base_t)v << 9) |
399		  (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
400}
401
402/*
403 * Set the extent state field in a bmap extent record.
404 */
405void
406xfs_bmbt_set_state(
407	xfs_bmbt_rec_host_t *r,
408	xfs_exntst_t	v)
409{
410	ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
411	if (v == XFS_EXT_NORM)
412		r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
413	else
414		r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
415}
416
417/*
418 * Convert in-memory form of btree root to on-disk form.
419 */
420void
421xfs_bmbt_to_bmdr(
422	struct xfs_mount	*mp,
423	struct xfs_btree_block	*rblock,
424	int			rblocklen,
425	xfs_bmdr_block_t	*dblock,
426	int			dblocklen)
427{
428	int			dmxr;
429	xfs_bmbt_key_t		*fkp;
430	__be64			*fpp;
431	xfs_bmbt_key_t		*tkp;
432	__be64			*tpp;
433
434	if (xfs_sb_version_hascrc(&mp->m_sb)) {
435		ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
436		ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid));
437		ASSERT(rblock->bb_u.l.bb_blkno ==
438		       cpu_to_be64(XFS_BUF_DADDR_NULL));
439	} else
440		ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
441	ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO));
442	ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO));
443	ASSERT(rblock->bb_level != 0);
444	dblock->bb_level = rblock->bb_level;
445	dblock->bb_numrecs = rblock->bb_numrecs;
446	dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
447	fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
448	tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
449	fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
450	tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
451	dmxr = be16_to_cpu(dblock->bb_numrecs);
452	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
453	memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
454}
455
456/*
457 * Check extent records, which have just been read, for
458 * any bit in the extent flag field. ASSERT on debug
459 * kernels, as this condition should not occur.
460 * Return an error condition (1) if any flags found,
461 * otherwise return 0.
462 */
463
464int
465xfs_check_nostate_extents(
466	xfs_ifork_t		*ifp,
467	xfs_extnum_t		idx,
468	xfs_extnum_t		num)
469{
470	for (; num > 0; num--, idx++) {
471		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
472		if ((ep->l0 >>
473		     (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
474			ASSERT(0);
475			return 1;
476		}
477	}
478	return 0;
479}
480
481
482STATIC struct xfs_btree_cur *
483xfs_bmbt_dup_cursor(
484	struct xfs_btree_cur	*cur)
485{
486	struct xfs_btree_cur	*new;
487
488	new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
489			cur->bc_private.b.ip, cur->bc_private.b.whichfork);
490
491	/*
492	 * Copy the firstblock, flist, and flags values,
493	 * since init cursor doesn't get them.
494	 */
495	new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
496	new->bc_private.b.flist = cur->bc_private.b.flist;
497	new->bc_private.b.flags = cur->bc_private.b.flags;
498
499	return new;
500}
501
502STATIC void
503xfs_bmbt_update_cursor(
504	struct xfs_btree_cur	*src,
505	struct xfs_btree_cur	*dst)
506{
507	ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
508	       (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
509	ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
510
511	dst->bc_private.b.allocated += src->bc_private.b.allocated;
512	dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
513
514	src->bc_private.b.allocated = 0;
515}
516
517STATIC int
518xfs_bmbt_alloc_block(
519	struct xfs_btree_cur	*cur,
520	union xfs_btree_ptr	*start,
521	union xfs_btree_ptr	*new,
522	int			length,
523	int			*stat)
524{
525	xfs_alloc_arg_t		args;		/* block allocation args */
526	int			error;		/* error return value */
527
528	memset(&args, 0, sizeof(args));
529	args.tp = cur->bc_tp;
530	args.mp = cur->bc_mp;
531	args.fsbno = cur->bc_private.b.firstblock;
532	args.firstblock = args.fsbno;
533
534	if (args.fsbno == NULLFSBLOCK) {
535		args.fsbno = be64_to_cpu(start->l);
536		args.type = XFS_ALLOCTYPE_START_BNO;
537		/*
538		 * Make sure there is sufficient room left in the AG to
539		 * complete a full tree split for an extent insert.  If
540		 * we are converting the middle part of an extent then
541		 * we may need space for two tree splits.
542		 *
543		 * We are relying on the caller to make the correct block
544		 * reservation for this operation to succeed.  If the
545		 * reservation amount is insufficient then we may fail a
546		 * block allocation here and corrupt the filesystem.
547		 */
548		args.minleft = xfs_trans_get_block_res(args.tp);
549	} else if (cur->bc_private.b.flist->xbf_low) {
550		args.type = XFS_ALLOCTYPE_START_BNO;
551	} else {
552		args.type = XFS_ALLOCTYPE_NEAR_BNO;
553	}
554
555	args.minlen = args.maxlen = args.prod = 1;
556	args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
557	if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
558		error = XFS_ERROR(ENOSPC);
559		goto error0;
560	}
561	error = xfs_alloc_vextent(&args);
562	if (error)
563		goto error0;
564
565	if (args.fsbno == NULLFSBLOCK && args.minleft) {
566		/*
567		 * Could not find an AG with enough free space to satisfy
568		 * a full btree split.  Try again without minleft and if
569		 * successful activate the lowspace algorithm.
570		 */
571		args.fsbno = 0;
572		args.type = XFS_ALLOCTYPE_FIRST_AG;
573		args.minleft = 0;
574		error = xfs_alloc_vextent(&args);
575		if (error)
576			goto error0;
577		cur->bc_private.b.flist->xbf_low = 1;
578	}
579	if (args.fsbno == NULLFSBLOCK) {
580		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
581		*stat = 0;
582		return 0;
583	}
584	ASSERT(args.len == 1);
585	cur->bc_private.b.firstblock = args.fsbno;
586	cur->bc_private.b.allocated++;
587	cur->bc_private.b.ip->i_d.di_nblocks++;
588	xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
589	xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
590			XFS_TRANS_DQ_BCOUNT, 1L);
591
592	new->l = cpu_to_be64(args.fsbno);
593
594	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
595	*stat = 1;
596	return 0;
597
598 error0:
599	XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
600	return error;
601}
602
603STATIC int
604xfs_bmbt_free_block(
605	struct xfs_btree_cur	*cur,
606	struct xfs_buf		*bp)
607{
608	struct xfs_mount	*mp = cur->bc_mp;
609	struct xfs_inode	*ip = cur->bc_private.b.ip;
610	struct xfs_trans	*tp = cur->bc_tp;
611	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
612
613	xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
614	ip->i_d.di_nblocks--;
615
616	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
617	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
618	xfs_trans_binval(tp, bp);
619	return 0;
620}
621
622STATIC int
623xfs_bmbt_get_minrecs(
624	struct xfs_btree_cur	*cur,
625	int			level)
626{
627	if (level == cur->bc_nlevels - 1) {
628		struct xfs_ifork	*ifp;
629
630		ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
631				    cur->bc_private.b.whichfork);
632
633		return xfs_bmbt_maxrecs(cur->bc_mp,
634					ifp->if_broot_bytes, level == 0) / 2;
635	}
636
637	return cur->bc_mp->m_bmap_dmnr[level != 0];
638}
639
640int
641xfs_bmbt_get_maxrecs(
642	struct xfs_btree_cur	*cur,
643	int			level)
644{
645	if (level == cur->bc_nlevels - 1) {
646		struct xfs_ifork	*ifp;
647
648		ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
649				    cur->bc_private.b.whichfork);
650
651		return xfs_bmbt_maxrecs(cur->bc_mp,
652					ifp->if_broot_bytes, level == 0);
653	}
654
655	return cur->bc_mp->m_bmap_dmxr[level != 0];
656
657}
658
659/*
660 * Get the maximum records we could store in the on-disk format.
661 *
662 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
663 * for the root node this checks the available space in the dinode fork
664 * so that we can resize the in-memory buffer to match it.  After a
665 * resize to the maximum size this function returns the same value
666 * as xfs_bmbt_get_maxrecs for the root node, too.
667 */
668STATIC int
669xfs_bmbt_get_dmaxrecs(
670	struct xfs_btree_cur	*cur,
671	int			level)
672{
673	if (level != cur->bc_nlevels - 1)
674		return cur->bc_mp->m_bmap_dmxr[level != 0];
675	return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize,
676				level == 0);
677}
678
679STATIC void
680xfs_bmbt_init_key_from_rec(
681	union xfs_btree_key	*key,
682	union xfs_btree_rec	*rec)
683{
684	key->bmbt.br_startoff =
685		cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
686}
687
688STATIC void
689xfs_bmbt_init_rec_from_key(
690	union xfs_btree_key	*key,
691	union xfs_btree_rec	*rec)
692{
693	ASSERT(key->bmbt.br_startoff != 0);
694
695	xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
696			       0, 0, XFS_EXT_NORM);
697}
698
699STATIC void
700xfs_bmbt_init_rec_from_cur(
701	struct xfs_btree_cur	*cur,
702	union xfs_btree_rec	*rec)
703{
704	xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
705}
706
707STATIC void
708xfs_bmbt_init_ptr_from_cur(
709	struct xfs_btree_cur	*cur,
710	union xfs_btree_ptr	*ptr)
711{
712	ptr->l = 0;
713}
714
715STATIC __int64_t
716xfs_bmbt_key_diff(
717	struct xfs_btree_cur	*cur,
718	union xfs_btree_key	*key)
719{
720	return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
721				      cur->bc_rec.b.br_startoff;
722}
723
724static bool
725xfs_bmbt_verify(
726	struct xfs_buf		*bp)
727{
728	struct xfs_mount	*mp = bp->b_target->bt_mount;
729	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
730	unsigned int		level;
731
732	switch (block->bb_magic) {
733	case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
734		if (!xfs_sb_version_hascrc(&mp->m_sb))
735			return false;
736		if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid))
737			return false;
738		if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
739			return false;
740		/*
741		 * XXX: need a better way of verifying the owner here. Right now
742		 * just make sure there has been one set.
743		 */
744		if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
745			return false;
746		/* fall through */
747	case cpu_to_be32(XFS_BMAP_MAGIC):
748		break;
749	default:
750		return false;
751	}
752
753	/*
754	 * numrecs and level verification.
755	 *
756	 * We don't know what fork we belong to, so just verify that the level
757	 * is less than the maximum of the two. Later checks will be more
758	 * precise.
759	 */
760	level = be16_to_cpu(block->bb_level);
761	if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
762		return false;
763	if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
764		return false;
765
766	/* sibling pointer verification */
767	if (!block->bb_u.l.bb_leftsib ||
768	    (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLDFSBNO) &&
769	     !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
770		return false;
771	if (!block->bb_u.l.bb_rightsib ||
772	    (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLDFSBNO) &&
773	     !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
774		return false;
775
776	return true;
777}
778
779static void
780xfs_bmbt_read_verify(
781	struct xfs_buf	*bp)
782{
783	if (!xfs_btree_lblock_verify_crc(bp))
784		xfs_buf_ioerror(bp, EFSBADCRC);
785	else if (!xfs_bmbt_verify(bp))
786		xfs_buf_ioerror(bp, EFSCORRUPTED);
787
788	if (bp->b_error) {
789		trace_xfs_btree_corrupt(bp, _RET_IP_);
790		xfs_verifier_error(bp);
791	}
792}
793
794static void
795xfs_bmbt_write_verify(
796	struct xfs_buf	*bp)
797{
798	if (!xfs_bmbt_verify(bp)) {
799		trace_xfs_btree_corrupt(bp, _RET_IP_);
800		xfs_buf_ioerror(bp, EFSCORRUPTED);
801		xfs_verifier_error(bp);
802		return;
803	}
804	xfs_btree_lblock_calc_crc(bp);
805}
806
807const struct xfs_buf_ops xfs_bmbt_buf_ops = {
808	.verify_read = xfs_bmbt_read_verify,
809	.verify_write = xfs_bmbt_write_verify,
810};
811
812
813#if defined(DEBUG) || defined(XFS_WARN)
814STATIC int
815xfs_bmbt_keys_inorder(
816	struct xfs_btree_cur	*cur,
817	union xfs_btree_key	*k1,
818	union xfs_btree_key	*k2)
819{
820	return be64_to_cpu(k1->bmbt.br_startoff) <
821		be64_to_cpu(k2->bmbt.br_startoff);
822}
823
824STATIC int
825xfs_bmbt_recs_inorder(
826	struct xfs_btree_cur	*cur,
827	union xfs_btree_rec	*r1,
828	union xfs_btree_rec	*r2)
829{
830	return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
831		xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
832		xfs_bmbt_disk_get_startoff(&r2->bmbt);
833}
834#endif	/* DEBUG */
835
836static const struct xfs_btree_ops xfs_bmbt_ops = {
837	.rec_len		= sizeof(xfs_bmbt_rec_t),
838	.key_len		= sizeof(xfs_bmbt_key_t),
839
840	.dup_cursor		= xfs_bmbt_dup_cursor,
841	.update_cursor		= xfs_bmbt_update_cursor,
842	.alloc_block		= xfs_bmbt_alloc_block,
843	.free_block		= xfs_bmbt_free_block,
844	.get_maxrecs		= xfs_bmbt_get_maxrecs,
845	.get_minrecs		= xfs_bmbt_get_minrecs,
846	.get_dmaxrecs		= xfs_bmbt_get_dmaxrecs,
847	.init_key_from_rec	= xfs_bmbt_init_key_from_rec,
848	.init_rec_from_key	= xfs_bmbt_init_rec_from_key,
849	.init_rec_from_cur	= xfs_bmbt_init_rec_from_cur,
850	.init_ptr_from_cur	= xfs_bmbt_init_ptr_from_cur,
851	.key_diff		= xfs_bmbt_key_diff,
852	.buf_ops		= &xfs_bmbt_buf_ops,
853#if defined(DEBUG) || defined(XFS_WARN)
854	.keys_inorder		= xfs_bmbt_keys_inorder,
855	.recs_inorder		= xfs_bmbt_recs_inorder,
856#endif
857};
858
859/*
860 * Allocate a new bmap btree cursor.
861 */
862struct xfs_btree_cur *				/* new bmap btree cursor */
863xfs_bmbt_init_cursor(
864	struct xfs_mount	*mp,		/* file system mount point */
865	struct xfs_trans	*tp,		/* transaction pointer */
866	struct xfs_inode	*ip,		/* inode owning the btree */
867	int			whichfork)	/* data or attr fork */
868{
869	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
870	struct xfs_btree_cur	*cur;
871
872	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
873
874	cur->bc_tp = tp;
875	cur->bc_mp = mp;
876	cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
877	cur->bc_btnum = XFS_BTNUM_BMAP;
878	cur->bc_blocklog = mp->m_sb.sb_blocklog;
879
880	cur->bc_ops = &xfs_bmbt_ops;
881	cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
882	if (xfs_sb_version_hascrc(&mp->m_sb))
883		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
884
885	cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
886	cur->bc_private.b.ip = ip;
887	cur->bc_private.b.firstblock = NULLFSBLOCK;
888	cur->bc_private.b.flist = NULL;
889	cur->bc_private.b.allocated = 0;
890	cur->bc_private.b.flags = 0;
891	cur->bc_private.b.whichfork = whichfork;
892
893	return cur;
894}
895
896/*
897 * Calculate number of records in a bmap btree block.
898 */
899int
900xfs_bmbt_maxrecs(
901	struct xfs_mount	*mp,
902	int			blocklen,
903	int			leaf)
904{
905	blocklen -= XFS_BMBT_BLOCK_LEN(mp);
906
907	if (leaf)
908		return blocklen / sizeof(xfs_bmbt_rec_t);
909	return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
910}
911
912/*
913 * Calculate number of records in a bmap btree inode root.
914 */
915int
916xfs_bmdr_maxrecs(
917	struct xfs_mount	*mp,
918	int			blocklen,
919	int			leaf)
920{
921	blocklen -= sizeof(xfs_bmdr_block_t);
922
923	if (leaf)
924		return blocklen / sizeof(xfs_bmdr_rec_t);
925	return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
926}
927
928/*
929 * Change the owner of a btree format fork fo the inode passed in. Change it to
930 * the owner of that is passed in so that we can change owners before or after
931 * we switch forks between inodes. The operation that the caller is doing will
932 * determine whether is needs to change owner before or after the switch.
933 *
934 * For demand paged transactional modification, the fork switch should be done
935 * after reading in all the blocks, modifying them and pinning them in the
936 * transaction. For modification when the buffers are already pinned in memory,
937 * the fork switch can be done before changing the owner as we won't need to
938 * validate the owner until the btree buffers are unpinned and writes can occur
939 * again.
940 *
941 * For recovery based ownership change, there is no transactional context and
942 * so a buffer list must be supplied so that we can record the buffers that we
943 * modified for the caller to issue IO on.
944 */
945int
946xfs_bmbt_change_owner(
947	struct xfs_trans	*tp,
948	struct xfs_inode	*ip,
949	int			whichfork,
950	xfs_ino_t		new_owner,
951	struct list_head	*buffer_list)
952{
953	struct xfs_btree_cur	*cur;
954	int			error;
955
956	ASSERT(tp || buffer_list);
957	ASSERT(!(tp && buffer_list));
958	if (whichfork == XFS_DATA_FORK)
959		ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
960	else
961		ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
962
963	cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
964	if (!cur)
965		return ENOMEM;
966
967	error = xfs_btree_change_owner(cur, new_owner, buffer_list);
968	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
969	return error;
970}