Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * Copyright (c) 2013 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_bit.h"
  26#include "xfs_mount.h"
  27#include "xfs_da_format.h"
  28#include "xfs_da_btree.h"
  29#include "xfs_dir2.h"
  30#include "xfs_dir2_priv.h"
  31#include "xfs_inode.h"
  32#include "xfs_trans.h"
  33#include "xfs_inode_item.h"
  34#include "xfs_alloc.h"
  35#include "xfs_bmap.h"
  36#include "xfs_attr.h"
  37#include "xfs_attr_leaf.h"
  38#include "xfs_error.h"
  39#include "xfs_trace.h"
  40#include "xfs_cksum.h"
  41#include "xfs_buf_item.h"
  42#include "xfs_log.h"
  43
  44/*
  45 * xfs_da_btree.c
  46 *
  47 * Routines to implement directories as Btrees of hashed names.
  48 */
  49
  50/*========================================================================
  51 * Function prototypes for the kernel.
  52 *========================================================================*/
  53
  54/*
  55 * Routines used for growing the Btree.
  56 */
  57STATIC int xfs_da3_root_split(xfs_da_state_t *state,
  58					    xfs_da_state_blk_t *existing_root,
  59					    xfs_da_state_blk_t *new_child);
  60STATIC int xfs_da3_node_split(xfs_da_state_t *state,
  61					    xfs_da_state_blk_t *existing_blk,
  62					    xfs_da_state_blk_t *split_blk,
  63					    xfs_da_state_blk_t *blk_to_add,
  64					    int treelevel,
  65					    int *result);
  66STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
  67					 xfs_da_state_blk_t *node_blk_1,
  68					 xfs_da_state_blk_t *node_blk_2);
  69STATIC void xfs_da3_node_add(xfs_da_state_t *state,
  70				   xfs_da_state_blk_t *old_node_blk,
  71				   xfs_da_state_blk_t *new_node_blk);
  72
  73/*
  74 * Routines used for shrinking the Btree.
  75 */
  76STATIC int xfs_da3_root_join(xfs_da_state_t *state,
  77					   xfs_da_state_blk_t *root_blk);
  78STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
  79STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
  80					      xfs_da_state_blk_t *drop_blk);
  81STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
  82					 xfs_da_state_blk_t *src_node_blk,
  83					 xfs_da_state_blk_t *dst_node_blk);
  84
  85/*
  86 * Utility routines.
  87 */
  88STATIC int	xfs_da3_blk_unlink(xfs_da_state_t *state,
  89				  xfs_da_state_blk_t *drop_blk,
  90				  xfs_da_state_blk_t *save_blk);
  91
  92
  93kmem_zone_t *xfs_da_state_zone;	/* anchor for state struct zone */
  94
  95/*
  96 * Allocate a dir-state structure.
  97 * We don't put them on the stack since they're large.
  98 */
  99xfs_da_state_t *
 100xfs_da_state_alloc(void)
 101{
 102	return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
 103}
 104
 105/*
 106 * Kill the altpath contents of a da-state structure.
 107 */
 108STATIC void
 109xfs_da_state_kill_altpath(xfs_da_state_t *state)
 110{
 111	int	i;
 112
 113	for (i = 0; i < state->altpath.active; i++)
 114		state->altpath.blk[i].bp = NULL;
 115	state->altpath.active = 0;
 116}
 117
 118/*
 119 * Free a da-state structure.
 120 */
 121void
 122xfs_da_state_free(xfs_da_state_t *state)
 123{
 124	xfs_da_state_kill_altpath(state);
 125#ifdef DEBUG
 126	memset((char *)state, 0, sizeof(*state));
 127#endif /* DEBUG */
 128	kmem_zone_free(xfs_da_state_zone, state);
 129}
 130
 131static bool
 132xfs_da3_node_verify(
 133	struct xfs_buf		*bp)
 134{
 135	struct xfs_mount	*mp = bp->b_target->bt_mount;
 136	struct xfs_da_intnode	*hdr = bp->b_addr;
 137	struct xfs_da3_icnode_hdr ichdr;
 138	const struct xfs_dir_ops *ops;
 139
 140	ops = xfs_dir_get_ops(mp, NULL);
 141
 142	ops->node_hdr_from_disk(&ichdr, hdr);
 143
 144	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 145		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 146
 147		if (ichdr.magic != XFS_DA3_NODE_MAGIC)
 148			return false;
 149
 150		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
 151			return false;
 152		if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
 153			return false;
 154		if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
 155			return false;
 156	} else {
 157		if (ichdr.magic != XFS_DA_NODE_MAGIC)
 158			return false;
 159	}
 160	if (ichdr.level == 0)
 161		return false;
 162	if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
 163		return false;
 164	if (ichdr.count == 0)
 165		return false;
 166
 167	/*
 168	 * we don't know if the node is for and attribute or directory tree,
 169	 * so only fail if the count is outside both bounds
 170	 */
 171	if (ichdr.count > mp->m_dir_geo->node_ents &&
 172	    ichdr.count > mp->m_attr_geo->node_ents)
 173		return false;
 174
 175	/* XXX: hash order check? */
 176
 177	return true;
 178}
 179
 180static void
 181xfs_da3_node_write_verify(
 182	struct xfs_buf	*bp)
 183{
 184	struct xfs_mount	*mp = bp->b_target->bt_mount;
 185	struct xfs_buf_log_item	*bip = bp->b_fspriv;
 186	struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 187
 188	if (!xfs_da3_node_verify(bp)) {
 189		xfs_buf_ioerror(bp, -EFSCORRUPTED);
 190		xfs_verifier_error(bp);
 191		return;
 192	}
 193
 194	if (!xfs_sb_version_hascrc(&mp->m_sb))
 195		return;
 196
 197	if (bip)
 198		hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
 199
 200	xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
 201}
 202
 203/*
 204 * leaf/node format detection on trees is sketchy, so a node read can be done on
 205 * leaf level blocks when detection identifies the tree as a node format tree
 206 * incorrectly. In this case, we need to swap the verifier to match the correct
 207 * format of the block being read.
 208 */
 209static void
 210xfs_da3_node_read_verify(
 211	struct xfs_buf		*bp)
 212{
 213	struct xfs_da_blkinfo	*info = bp->b_addr;
 214
 215	switch (be16_to_cpu(info->magic)) {
 216		case XFS_DA3_NODE_MAGIC:
 217			if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
 218				xfs_buf_ioerror(bp, -EFSBADCRC);
 219				break;
 220			}
 221			/* fall through */
 222		case XFS_DA_NODE_MAGIC:
 223			if (!xfs_da3_node_verify(bp)) {
 224				xfs_buf_ioerror(bp, -EFSCORRUPTED);
 225				break;
 226			}
 227			return;
 228		case XFS_ATTR_LEAF_MAGIC:
 229		case XFS_ATTR3_LEAF_MAGIC:
 230			bp->b_ops = &xfs_attr3_leaf_buf_ops;
 231			bp->b_ops->verify_read(bp);
 232			return;
 233		case XFS_DIR2_LEAFN_MAGIC:
 234		case XFS_DIR3_LEAFN_MAGIC:
 235			bp->b_ops = &xfs_dir3_leafn_buf_ops;
 236			bp->b_ops->verify_read(bp);
 237			return;
 238		default:
 239			xfs_buf_ioerror(bp, -EFSCORRUPTED);
 240			break;
 241	}
 242
 243	/* corrupt block */
 244	xfs_verifier_error(bp);
 245}
 246
 247const struct xfs_buf_ops xfs_da3_node_buf_ops = {
 248	.name = "xfs_da3_node",
 249	.verify_read = xfs_da3_node_read_verify,
 250	.verify_write = xfs_da3_node_write_verify,
 251};
 252
 253int
 254xfs_da3_node_read(
 255	struct xfs_trans	*tp,
 256	struct xfs_inode	*dp,
 257	xfs_dablk_t		bno,
 258	xfs_daddr_t		mappedbno,
 259	struct xfs_buf		**bpp,
 260	int			which_fork)
 261{
 262	int			err;
 263
 264	err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
 265					which_fork, &xfs_da3_node_buf_ops);
 266	if (!err && tp) {
 267		struct xfs_da_blkinfo	*info = (*bpp)->b_addr;
 268		int			type;
 269
 270		switch (be16_to_cpu(info->magic)) {
 271		case XFS_DA_NODE_MAGIC:
 272		case XFS_DA3_NODE_MAGIC:
 273			type = XFS_BLFT_DA_NODE_BUF;
 274			break;
 275		case XFS_ATTR_LEAF_MAGIC:
 276		case XFS_ATTR3_LEAF_MAGIC:
 277			type = XFS_BLFT_ATTR_LEAF_BUF;
 278			break;
 279		case XFS_DIR2_LEAFN_MAGIC:
 280		case XFS_DIR3_LEAFN_MAGIC:
 281			type = XFS_BLFT_DIR_LEAFN_BUF;
 282			break;
 283		default:
 284			type = 0;
 285			ASSERT(0);
 286			break;
 287		}
 288		xfs_trans_buf_set_type(tp, *bpp, type);
 289	}
 290	return err;
 291}
 292
 293/*========================================================================
 294 * Routines used for growing the Btree.
 295 *========================================================================*/
 296
 297/*
 298 * Create the initial contents of an intermediate node.
 299 */
 300int
 301xfs_da3_node_create(
 302	struct xfs_da_args	*args,
 303	xfs_dablk_t		blkno,
 304	int			level,
 305	struct xfs_buf		**bpp,
 306	int			whichfork)
 307{
 308	struct xfs_da_intnode	*node;
 309	struct xfs_trans	*tp = args->trans;
 310	struct xfs_mount	*mp = tp->t_mountp;
 311	struct xfs_da3_icnode_hdr ichdr = {0};
 312	struct xfs_buf		*bp;
 313	int			error;
 314	struct xfs_inode	*dp = args->dp;
 315
 316	trace_xfs_da_node_create(args);
 317	ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
 318
 319	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
 320	if (error)
 321		return error;
 322	bp->b_ops = &xfs_da3_node_buf_ops;
 323	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
 324	node = bp->b_addr;
 325
 326	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 327		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 328
 329		memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
 330		ichdr.magic = XFS_DA3_NODE_MAGIC;
 331		hdr3->info.blkno = cpu_to_be64(bp->b_bn);
 332		hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
 333		uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
 334	} else {
 335		ichdr.magic = XFS_DA_NODE_MAGIC;
 336	}
 337	ichdr.level = level;
 338
 339	dp->d_ops->node_hdr_to_disk(node, &ichdr);
 340	xfs_trans_log_buf(tp, bp,
 341		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 342
 343	*bpp = bp;
 344	return 0;
 345}
 346
 347/*
 348 * Split a leaf node, rebalance, then possibly split
 349 * intermediate nodes, rebalance, etc.
 350 */
 351int							/* error */
 352xfs_da3_split(
 353	struct xfs_da_state	*state)
 354{
 355	struct xfs_da_state_blk	*oldblk;
 356	struct xfs_da_state_blk	*newblk;
 357	struct xfs_da_state_blk	*addblk;
 358	struct xfs_da_intnode	*node;
 
 359	int			max;
 360	int			action = 0;
 361	int			error;
 362	int			i;
 363
 364	trace_xfs_da_split(state->args);
 365
 366	/*
 367	 * Walk back up the tree splitting/inserting/adjusting as necessary.
 368	 * If we need to insert and there isn't room, split the node, then
 369	 * decide which fragment to insert the new block from below into.
 370	 * Note that we may split the root this way, but we need more fixup.
 371	 */
 372	max = state->path.active - 1;
 373	ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
 374	ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
 375	       state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
 376
 377	addblk = &state->path.blk[max];		/* initial dummy value */
 378	for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
 379		oldblk = &state->path.blk[i];
 380		newblk = &state->altpath.blk[i];
 381
 382		/*
 383		 * If a leaf node then
 384		 *     Allocate a new leaf node, then rebalance across them.
 385		 * else if an intermediate node then
 386		 *     We split on the last layer, must we split the node?
 387		 */
 388		switch (oldblk->magic) {
 389		case XFS_ATTR_LEAF_MAGIC:
 390			error = xfs_attr3_leaf_split(state, oldblk, newblk);
 391			if ((error != 0) && (error != -ENOSPC)) {
 392				return error;	/* GROT: attr is inconsistent */
 393			}
 394			if (!error) {
 395				addblk = newblk;
 396				break;
 397			}
 398			/*
 399			 * Entry wouldn't fit, split the leaf again. The new
 400			 * extrablk will be consumed by xfs_da3_node_split if
 401			 * the node is split.
 402			 */
 403			state->extravalid = 1;
 404			if (state->inleaf) {
 405				state->extraafter = 0;	/* before newblk */
 406				trace_xfs_attr_leaf_split_before(state->args);
 407				error = xfs_attr3_leaf_split(state, oldblk,
 408							    &state->extrablk);
 409			} else {
 410				state->extraafter = 1;	/* after newblk */
 411				trace_xfs_attr_leaf_split_after(state->args);
 412				error = xfs_attr3_leaf_split(state, newblk,
 413							    &state->extrablk);
 414			}
 415			if (error)
 416				return error;	/* GROT: attr inconsistent */
 417			addblk = newblk;
 418			break;
 419		case XFS_DIR2_LEAFN_MAGIC:
 420			error = xfs_dir2_leafn_split(state, oldblk, newblk);
 421			if (error)
 422				return error;
 423			addblk = newblk;
 424			break;
 425		case XFS_DA_NODE_MAGIC:
 426			error = xfs_da3_node_split(state, oldblk, newblk, addblk,
 427							 max - i, &action);
 428			addblk->bp = NULL;
 429			if (error)
 430				return error;	/* GROT: dir is inconsistent */
 431			/*
 432			 * Record the newly split block for the next time thru?
 433			 */
 434			if (action)
 435				addblk = newblk;
 436			else
 437				addblk = NULL;
 438			break;
 439		}
 440
 441		/*
 442		 * Update the btree to show the new hashval for this child.
 443		 */
 444		xfs_da3_fixhashpath(state, &state->path);
 445	}
 446	if (!addblk)
 447		return 0;
 448
 449	/*
 450	 * xfs_da3_node_split() should have consumed any extra blocks we added
 451	 * during a double leaf split in the attr fork. This is guaranteed as
 452	 * we can't be here if the attr fork only has a single leaf block.
 453	 */
 454	ASSERT(state->extravalid == 0 ||
 455	       state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
 456
 457	/*
 458	 * Split the root node.
 459	 */
 460	ASSERT(state->path.active == 0);
 461	oldblk = &state->path.blk[0];
 462	error = xfs_da3_root_split(state, oldblk, addblk);
 463	if (error) {
 464		addblk->bp = NULL;
 465		return error;	/* GROT: dir is inconsistent */
 466	}
 467
 468	/*
 469	 * Update pointers to the node which used to be block 0 and just got
 470	 * bumped because of the addition of a new root node.  Note that the
 471	 * original block 0 could be at any position in the list of blocks in
 472	 * the tree.
 473	 *
 474	 * Note: the magic numbers and sibling pointers are in the same physical
 475	 * place for both v2 and v3 headers (by design). Hence it doesn't matter
 476	 * which version of the xfs_da_intnode structure we use here as the
 477	 * result will be the same using either structure.
 478	 */
 479	node = oldblk->bp->b_addr;
 480	if (node->hdr.info.forw) {
 481		ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
 482		node = addblk->bp->b_addr;
 
 
 
 
 
 483		node->hdr.info.back = cpu_to_be32(oldblk->blkno);
 484		xfs_trans_log_buf(state->args->trans, addblk->bp,
 485				  XFS_DA_LOGRANGE(node, &node->hdr.info,
 486				  sizeof(node->hdr.info)));
 487	}
 488	node = oldblk->bp->b_addr;
 489	if (node->hdr.info.back) {
 490		ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
 491		node = addblk->bp->b_addr;
 
 
 
 
 
 492		node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
 493		xfs_trans_log_buf(state->args->trans, addblk->bp,
 494				  XFS_DA_LOGRANGE(node, &node->hdr.info,
 495				  sizeof(node->hdr.info)));
 496	}
 497	addblk->bp = NULL;
 498	return 0;
 499}
 500
 501/*
 502 * Split the root.  We have to create a new root and point to the two
 503 * parts (the split old root) that we just created.  Copy block zero to
 504 * the EOF, extending the inode in process.
 505 */
 506STATIC int						/* error */
 507xfs_da3_root_split(
 508	struct xfs_da_state	*state,
 509	struct xfs_da_state_blk	*blk1,
 510	struct xfs_da_state_blk	*blk2)
 511{
 512	struct xfs_da_intnode	*node;
 513	struct xfs_da_intnode	*oldroot;
 514	struct xfs_da_node_entry *btree;
 515	struct xfs_da3_icnode_hdr nodehdr;
 516	struct xfs_da_args	*args;
 517	struct xfs_buf		*bp;
 518	struct xfs_inode	*dp;
 519	struct xfs_trans	*tp;
 520	struct xfs_dir2_leaf	*leaf;
 521	xfs_dablk_t		blkno;
 522	int			level;
 523	int			error;
 524	int			size;
 525
 526	trace_xfs_da_root_split(state->args);
 527
 528	/*
 529	 * Copy the existing (incorrect) block from the root node position
 530	 * to a free space somewhere.
 531	 */
 532	args = state->args;
 533	error = xfs_da_grow_inode(args, &blkno);
 534	if (error)
 535		return error;
 536
 537	dp = args->dp;
 538	tp = args->trans;
 539	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
 540	if (error)
 541		return error;
 542	node = bp->b_addr;
 543	oldroot = blk1->bp->b_addr;
 544	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
 545	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
 546		struct xfs_da3_icnode_hdr icnodehdr;
 547
 548		dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
 549		btree = dp->d_ops->node_tree_p(oldroot);
 550		size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
 551		level = icnodehdr.level;
 552
 553		/*
 554		 * we are about to copy oldroot to bp, so set up the type
 555		 * of bp while we know exactly what it will be.
 556		 */
 557		xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
 558	} else {
 559		struct xfs_dir3_icleaf_hdr leafhdr;
 560		struct xfs_dir2_leaf_entry *ents;
 561
 562		leaf = (xfs_dir2_leaf_t *)oldroot;
 563		dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 564		ents = dp->d_ops->leaf_ents_p(leaf);
 565
 566		ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
 567		       leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
 568		size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
 569		level = 0;
 570
 571		/*
 572		 * we are about to copy oldroot to bp, so set up the type
 573		 * of bp while we know exactly what it will be.
 574		 */
 575		xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
 576	}
 577
 578	/*
 579	 * we can copy most of the information in the node from one block to
 580	 * another, but for CRC enabled headers we have to make sure that the
 581	 * block specific identifiers are kept intact. We update the buffer
 582	 * directly for this.
 583	 */
 584	memcpy(node, oldroot, size);
 585	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
 586	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
 587		struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
 588
 589		node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
 590	}
 591	xfs_trans_log_buf(tp, bp, 0, size - 1);
 592
 593	bp->b_ops = blk1->bp->b_ops;
 594	xfs_trans_buf_copy_type(bp, blk1->bp);
 595	blk1->bp = bp;
 596	blk1->blkno = blkno;
 597
 598	/*
 599	 * Set up the new root node.
 600	 */
 601	error = xfs_da3_node_create(args,
 602		(args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
 603		level + 1, &bp, args->whichfork);
 604	if (error)
 605		return error;
 606
 607	node = bp->b_addr;
 608	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 609	btree = dp->d_ops->node_tree_p(node);
 610	btree[0].hashval = cpu_to_be32(blk1->hashval);
 611	btree[0].before = cpu_to_be32(blk1->blkno);
 612	btree[1].hashval = cpu_to_be32(blk2->hashval);
 613	btree[1].before = cpu_to_be32(blk2->blkno);
 614	nodehdr.count = 2;
 615	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 616
 617#ifdef DEBUG
 618	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
 619	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
 620		ASSERT(blk1->blkno >= args->geo->leafblk &&
 621		       blk1->blkno < args->geo->freeblk);
 622		ASSERT(blk2->blkno >= args->geo->leafblk &&
 623		       blk2->blkno < args->geo->freeblk);
 624	}
 625#endif
 626
 627	/* Header is already logged by xfs_da_node_create */
 628	xfs_trans_log_buf(tp, bp,
 629		XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
 630
 631	return 0;
 632}
 633
 634/*
 635 * Split the node, rebalance, then add the new entry.
 636 */
 637STATIC int						/* error */
 638xfs_da3_node_split(
 639	struct xfs_da_state	*state,
 640	struct xfs_da_state_blk	*oldblk,
 641	struct xfs_da_state_blk	*newblk,
 642	struct xfs_da_state_blk	*addblk,
 643	int			treelevel,
 644	int			*result)
 645{
 646	struct xfs_da_intnode	*node;
 647	struct xfs_da3_icnode_hdr nodehdr;
 648	xfs_dablk_t		blkno;
 649	int			newcount;
 650	int			error;
 651	int			useextra;
 652	struct xfs_inode	*dp = state->args->dp;
 653
 654	trace_xfs_da_node_split(state->args);
 655
 656	node = oldblk->bp->b_addr;
 657	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 658
 659	/*
 660	 * With V2 dirs the extra block is data or freespace.
 661	 */
 662	useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
 663	newcount = 1 + useextra;
 664	/*
 665	 * Do we have to split the node?
 666	 */
 667	if (nodehdr.count + newcount > state->args->geo->node_ents) {
 668		/*
 669		 * Allocate a new node, add to the doubly linked chain of
 670		 * nodes, then move some of our excess entries into it.
 671		 */
 672		error = xfs_da_grow_inode(state->args, &blkno);
 673		if (error)
 674			return error;	/* GROT: dir is inconsistent */
 675
 676		error = xfs_da3_node_create(state->args, blkno, treelevel,
 677					   &newblk->bp, state->args->whichfork);
 678		if (error)
 679			return error;	/* GROT: dir is inconsistent */
 680		newblk->blkno = blkno;
 681		newblk->magic = XFS_DA_NODE_MAGIC;
 682		xfs_da3_node_rebalance(state, oldblk, newblk);
 683		error = xfs_da3_blk_link(state, oldblk, newblk);
 684		if (error)
 685			return error;
 686		*result = 1;
 687	} else {
 688		*result = 0;
 689	}
 690
 691	/*
 692	 * Insert the new entry(s) into the correct block
 693	 * (updating last hashval in the process).
 694	 *
 695	 * xfs_da3_node_add() inserts BEFORE the given index,
 696	 * and as a result of using node_lookup_int() we always
 697	 * point to a valid entry (not after one), but a split
 698	 * operation always results in a new block whose hashvals
 699	 * FOLLOW the current block.
 700	 *
 701	 * If we had double-split op below us, then add the extra block too.
 702	 */
 703	node = oldblk->bp->b_addr;
 704	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 705	if (oldblk->index <= nodehdr.count) {
 706		oldblk->index++;
 707		xfs_da3_node_add(state, oldblk, addblk);
 708		if (useextra) {
 709			if (state->extraafter)
 710				oldblk->index++;
 711			xfs_da3_node_add(state, oldblk, &state->extrablk);
 712			state->extravalid = 0;
 713		}
 714	} else {
 715		newblk->index++;
 716		xfs_da3_node_add(state, newblk, addblk);
 717		if (useextra) {
 718			if (state->extraafter)
 719				newblk->index++;
 720			xfs_da3_node_add(state, newblk, &state->extrablk);
 721			state->extravalid = 0;
 722		}
 723	}
 724
 725	return 0;
 726}
 727
 728/*
 729 * Balance the btree elements between two intermediate nodes,
 730 * usually one full and one empty.
 731 *
 732 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
 733 */
 734STATIC void
 735xfs_da3_node_rebalance(
 736	struct xfs_da_state	*state,
 737	struct xfs_da_state_blk	*blk1,
 738	struct xfs_da_state_blk	*blk2)
 739{
 740	struct xfs_da_intnode	*node1;
 741	struct xfs_da_intnode	*node2;
 742	struct xfs_da_intnode	*tmpnode;
 743	struct xfs_da_node_entry *btree1;
 744	struct xfs_da_node_entry *btree2;
 745	struct xfs_da_node_entry *btree_s;
 746	struct xfs_da_node_entry *btree_d;
 747	struct xfs_da3_icnode_hdr nodehdr1;
 748	struct xfs_da3_icnode_hdr nodehdr2;
 749	struct xfs_trans	*tp;
 750	int			count;
 751	int			tmp;
 752	int			swap = 0;
 753	struct xfs_inode	*dp = state->args->dp;
 754
 755	trace_xfs_da_node_rebalance(state->args);
 756
 757	node1 = blk1->bp->b_addr;
 758	node2 = blk2->bp->b_addr;
 759	dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 760	dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 761	btree1 = dp->d_ops->node_tree_p(node1);
 762	btree2 = dp->d_ops->node_tree_p(node2);
 763
 764	/*
 765	 * Figure out how many entries need to move, and in which direction.
 766	 * Swap the nodes around if that makes it simpler.
 767	 */
 768	if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
 769	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
 770	     (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
 771			be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
 772		tmpnode = node1;
 773		node1 = node2;
 774		node2 = tmpnode;
 775		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 776		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 777		btree1 = dp->d_ops->node_tree_p(node1);
 778		btree2 = dp->d_ops->node_tree_p(node2);
 779		swap = 1;
 780	}
 781
 782	count = (nodehdr1.count - nodehdr2.count) / 2;
 783	if (count == 0)
 784		return;
 785	tp = state->args->trans;
 786	/*
 787	 * Two cases: high-to-low and low-to-high.
 788	 */
 789	if (count > 0) {
 790		/*
 791		 * Move elements in node2 up to make a hole.
 792		 */
 793		tmp = nodehdr2.count;
 794		if (tmp > 0) {
 795			tmp *= (uint)sizeof(xfs_da_node_entry_t);
 796			btree_s = &btree2[0];
 797			btree_d = &btree2[count];
 798			memmove(btree_d, btree_s, tmp);
 799		}
 800
 801		/*
 802		 * Move the req'd B-tree elements from high in node1 to
 803		 * low in node2.
 804		 */
 805		nodehdr2.count += count;
 806		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 807		btree_s = &btree1[nodehdr1.count - count];
 808		btree_d = &btree2[0];
 809		memcpy(btree_d, btree_s, tmp);
 810		nodehdr1.count -= count;
 811	} else {
 812		/*
 813		 * Move the req'd B-tree elements from low in node2 to
 814		 * high in node1.
 815		 */
 816		count = -count;
 817		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 818		btree_s = &btree2[0];
 819		btree_d = &btree1[nodehdr1.count];
 820		memcpy(btree_d, btree_s, tmp);
 821		nodehdr1.count += count;
 822
 823		xfs_trans_log_buf(tp, blk1->bp,
 824			XFS_DA_LOGRANGE(node1, btree_d, tmp));
 825
 826		/*
 827		 * Move elements in node2 down to fill the hole.
 828		 */
 829		tmp  = nodehdr2.count - count;
 830		tmp *= (uint)sizeof(xfs_da_node_entry_t);
 831		btree_s = &btree2[count];
 832		btree_d = &btree2[0];
 833		memmove(btree_d, btree_s, tmp);
 834		nodehdr2.count -= count;
 835	}
 836
 837	/*
 838	 * Log header of node 1 and all current bits of node 2.
 839	 */
 840	dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
 841	xfs_trans_log_buf(tp, blk1->bp,
 842		XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
 843
 844	dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
 845	xfs_trans_log_buf(tp, blk2->bp,
 846		XFS_DA_LOGRANGE(node2, &node2->hdr,
 847				dp->d_ops->node_hdr_size +
 848				(sizeof(btree2[0]) * nodehdr2.count)));
 849
 850	/*
 851	 * Record the last hashval from each block for upward propagation.
 852	 * (note: don't use the swapped node pointers)
 853	 */
 854	if (swap) {
 855		node1 = blk1->bp->b_addr;
 856		node2 = blk2->bp->b_addr;
 857		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 858		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 859		btree1 = dp->d_ops->node_tree_p(node1);
 860		btree2 = dp->d_ops->node_tree_p(node2);
 861	}
 862	blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
 863	blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
 864
 865	/*
 866	 * Adjust the expected index for insertion.
 867	 */
 868	if (blk1->index >= nodehdr1.count) {
 869		blk2->index = blk1->index - nodehdr1.count;
 870		blk1->index = nodehdr1.count + 1;	/* make it invalid */
 871	}
 872}
 873
 874/*
 875 * Add a new entry to an intermediate node.
 876 */
 877STATIC void
 878xfs_da3_node_add(
 879	struct xfs_da_state	*state,
 880	struct xfs_da_state_blk	*oldblk,
 881	struct xfs_da_state_blk	*newblk)
 882{
 883	struct xfs_da_intnode	*node;
 884	struct xfs_da3_icnode_hdr nodehdr;
 885	struct xfs_da_node_entry *btree;
 886	int			tmp;
 887	struct xfs_inode	*dp = state->args->dp;
 888
 889	trace_xfs_da_node_add(state->args);
 890
 891	node = oldblk->bp->b_addr;
 892	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 893	btree = dp->d_ops->node_tree_p(node);
 894
 895	ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
 896	ASSERT(newblk->blkno != 0);
 897	if (state->args->whichfork == XFS_DATA_FORK)
 898		ASSERT(newblk->blkno >= state->args->geo->leafblk &&
 899		       newblk->blkno < state->args->geo->freeblk);
 900
 901	/*
 902	 * We may need to make some room before we insert the new node.
 903	 */
 904	tmp = 0;
 905	if (oldblk->index < nodehdr.count) {
 906		tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
 907		memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
 908	}
 909	btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
 910	btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
 911	xfs_trans_log_buf(state->args->trans, oldblk->bp,
 912		XFS_DA_LOGRANGE(node, &btree[oldblk->index],
 913				tmp + sizeof(*btree)));
 914
 915	nodehdr.count += 1;
 916	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 917	xfs_trans_log_buf(state->args->trans, oldblk->bp,
 918		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 919
 920	/*
 921	 * Copy the last hash value from the oldblk to propagate upwards.
 922	 */
 923	oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
 924}
 925
 926/*========================================================================
 927 * Routines used for shrinking the Btree.
 928 *========================================================================*/
 929
 930/*
 931 * Deallocate an empty leaf node, remove it from its parent,
 932 * possibly deallocating that block, etc...
 933 */
 934int
 935xfs_da3_join(
 936	struct xfs_da_state	*state)
 937{
 938	struct xfs_da_state_blk	*drop_blk;
 939	struct xfs_da_state_blk	*save_blk;
 940	int			action = 0;
 941	int			error;
 942
 943	trace_xfs_da_join(state->args);
 944
 945	drop_blk = &state->path.blk[ state->path.active-1 ];
 946	save_blk = &state->altpath.blk[ state->path.active-1 ];
 947	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
 948	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
 949	       drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
 950
 951	/*
 952	 * Walk back up the tree joining/deallocating as necessary.
 953	 * When we stop dropping blocks, break out.
 954	 */
 955	for (  ; state->path.active >= 2; drop_blk--, save_blk--,
 956		 state->path.active--) {
 957		/*
 958		 * See if we can combine the block with a neighbor.
 959		 *   (action == 0) => no options, just leave
 960		 *   (action == 1) => coalesce, then unlink
 961		 *   (action == 2) => block empty, unlink it
 962		 */
 963		switch (drop_blk->magic) {
 964		case XFS_ATTR_LEAF_MAGIC:
 965			error = xfs_attr3_leaf_toosmall(state, &action);
 966			if (error)
 967				return error;
 968			if (action == 0)
 969				return 0;
 970			xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
 971			break;
 972		case XFS_DIR2_LEAFN_MAGIC:
 973			error = xfs_dir2_leafn_toosmall(state, &action);
 974			if (error)
 975				return error;
 976			if (action == 0)
 977				return 0;
 978			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
 979			break;
 980		case XFS_DA_NODE_MAGIC:
 981			/*
 982			 * Remove the offending node, fixup hashvals,
 983			 * check for a toosmall neighbor.
 984			 */
 985			xfs_da3_node_remove(state, drop_blk);
 986			xfs_da3_fixhashpath(state, &state->path);
 987			error = xfs_da3_node_toosmall(state, &action);
 988			if (error)
 989				return error;
 990			if (action == 0)
 991				return 0;
 992			xfs_da3_node_unbalance(state, drop_blk, save_blk);
 993			break;
 994		}
 995		xfs_da3_fixhashpath(state, &state->altpath);
 996		error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
 997		xfs_da_state_kill_altpath(state);
 998		if (error)
 999			return error;
1000		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1001							 drop_blk->bp);
1002		drop_blk->bp = NULL;
1003		if (error)
1004			return error;
1005	}
1006	/*
1007	 * We joined all the way to the top.  If it turns out that
1008	 * we only have one entry in the root, make the child block
1009	 * the new root.
1010	 */
1011	xfs_da3_node_remove(state, drop_blk);
1012	xfs_da3_fixhashpath(state, &state->path);
1013	error = xfs_da3_root_join(state, &state->path.blk[0]);
1014	return error;
1015}
1016
1017#ifdef	DEBUG
1018static void
1019xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1020{
1021	__be16	magic = blkinfo->magic;
1022
1023	if (level == 1) {
1024		ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1025		       magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1026		       magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1027		       magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1028	} else {
1029		ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1030		       magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1031	}
1032	ASSERT(!blkinfo->forw);
1033	ASSERT(!blkinfo->back);
1034}
1035#else	/* !DEBUG */
1036#define	xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1037#endif	/* !DEBUG */
1038
1039/*
1040 * We have only one entry in the root.  Copy the only remaining child of
1041 * the old root to block 0 as the new root node.
1042 */
1043STATIC int
1044xfs_da3_root_join(
1045	struct xfs_da_state	*state,
1046	struct xfs_da_state_blk	*root_blk)
1047{
1048	struct xfs_da_intnode	*oldroot;
1049	struct xfs_da_args	*args;
1050	xfs_dablk_t		child;
1051	struct xfs_buf		*bp;
1052	struct xfs_da3_icnode_hdr oldroothdr;
1053	struct xfs_da_node_entry *btree;
1054	int			error;
1055	struct xfs_inode	*dp = state->args->dp;
1056
1057	trace_xfs_da_root_join(state->args);
1058
1059	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1060
1061	args = state->args;
1062	oldroot = root_blk->bp->b_addr;
1063	dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1064	ASSERT(oldroothdr.forw == 0);
1065	ASSERT(oldroothdr.back == 0);
1066
1067	/*
1068	 * If the root has more than one child, then don't do anything.
1069	 */
1070	if (oldroothdr.count > 1)
1071		return 0;
1072
1073	/*
1074	 * Read in the (only) child block, then copy those bytes into
1075	 * the root block's buffer and free the original child block.
1076	 */
1077	btree = dp->d_ops->node_tree_p(oldroot);
1078	child = be32_to_cpu(btree[0].before);
1079	ASSERT(child != 0);
1080	error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1081					     args->whichfork);
1082	if (error)
1083		return error;
1084	xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1085
1086	/*
1087	 * This could be copying a leaf back into the root block in the case of
1088	 * there only being a single leaf block left in the tree. Hence we have
1089	 * to update the b_ops pointer as well to match the buffer type change
1090	 * that could occur. For dir3 blocks we also need to update the block
1091	 * number in the buffer header.
1092	 */
1093	memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1094	root_blk->bp->b_ops = bp->b_ops;
1095	xfs_trans_buf_copy_type(root_blk->bp, bp);
1096	if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1097		struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1098		da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1099	}
1100	xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1101			  args->geo->blksize - 1);
1102	error = xfs_da_shrink_inode(args, child, bp);
1103	return error;
1104}
1105
1106/*
1107 * Check a node block and its neighbors to see if the block should be
1108 * collapsed into one or the other neighbor.  Always keep the block
1109 * with the smaller block number.
1110 * If the current block is over 50% full, don't try to join it, return 0.
1111 * If the block is empty, fill in the state structure and return 2.
1112 * If it can be collapsed, fill in the state structure and return 1.
1113 * If nothing can be done, return 0.
1114 */
1115STATIC int
1116xfs_da3_node_toosmall(
1117	struct xfs_da_state	*state,
1118	int			*action)
1119{
1120	struct xfs_da_intnode	*node;
1121	struct xfs_da_state_blk	*blk;
1122	struct xfs_da_blkinfo	*info;
1123	xfs_dablk_t		blkno;
1124	struct xfs_buf		*bp;
1125	struct xfs_da3_icnode_hdr nodehdr;
1126	int			count;
1127	int			forward;
1128	int			error;
1129	int			retval;
1130	int			i;
1131	struct xfs_inode	*dp = state->args->dp;
1132
1133	trace_xfs_da_node_toosmall(state->args);
1134
1135	/*
1136	 * Check for the degenerate case of the block being over 50% full.
1137	 * If so, it's not worth even looking to see if we might be able
1138	 * to coalesce with a sibling.
1139	 */
1140	blk = &state->path.blk[ state->path.active-1 ];
1141	info = blk->bp->b_addr;
1142	node = (xfs_da_intnode_t *)info;
1143	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1144	if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1145		*action = 0;	/* blk over 50%, don't try to join */
1146		return 0;	/* blk over 50%, don't try to join */
1147	}
1148
1149	/*
1150	 * Check for the degenerate case of the block being empty.
1151	 * If the block is empty, we'll simply delete it, no need to
1152	 * coalesce it with a sibling block.  We choose (arbitrarily)
1153	 * to merge with the forward block unless it is NULL.
1154	 */
1155	if (nodehdr.count == 0) {
1156		/*
1157		 * Make altpath point to the block we want to keep and
1158		 * path point to the block we want to drop (this one).
1159		 */
1160		forward = (info->forw != 0);
1161		memcpy(&state->altpath, &state->path, sizeof(state->path));
1162		error = xfs_da3_path_shift(state, &state->altpath, forward,
1163						 0, &retval);
1164		if (error)
1165			return error;
1166		if (retval) {
1167			*action = 0;
1168		} else {
1169			*action = 2;
1170		}
1171		return 0;
1172	}
1173
1174	/*
1175	 * Examine each sibling block to see if we can coalesce with
1176	 * at least 25% free space to spare.  We need to figure out
1177	 * whether to merge with the forward or the backward block.
1178	 * We prefer coalescing with the lower numbered sibling so as
1179	 * to shrink a directory over time.
1180	 */
1181	count  = state->args->geo->node_ents;
1182	count -= state->args->geo->node_ents >> 2;
1183	count -= nodehdr.count;
1184
1185	/* start with smaller blk num */
1186	forward = nodehdr.forw < nodehdr.back;
1187	for (i = 0; i < 2; forward = !forward, i++) {
1188		struct xfs_da3_icnode_hdr thdr;
1189		if (forward)
1190			blkno = nodehdr.forw;
1191		else
1192			blkno = nodehdr.back;
1193		if (blkno == 0)
1194			continue;
1195		error = xfs_da3_node_read(state->args->trans, dp,
1196					blkno, -1, &bp, state->args->whichfork);
1197		if (error)
1198			return error;
1199
1200		node = bp->b_addr;
1201		dp->d_ops->node_hdr_from_disk(&thdr, node);
1202		xfs_trans_brelse(state->args->trans, bp);
1203
1204		if (count - thdr.count >= 0)
1205			break;	/* fits with at least 25% to spare */
1206	}
1207	if (i >= 2) {
1208		*action = 0;
1209		return 0;
1210	}
1211
1212	/*
1213	 * Make altpath point to the block we want to keep (the lower
1214	 * numbered block) and path point to the block we want to drop.
1215	 */
1216	memcpy(&state->altpath, &state->path, sizeof(state->path));
1217	if (blkno < blk->blkno) {
1218		error = xfs_da3_path_shift(state, &state->altpath, forward,
1219						 0, &retval);
1220	} else {
1221		error = xfs_da3_path_shift(state, &state->path, forward,
1222						 0, &retval);
1223	}
1224	if (error)
1225		return error;
1226	if (retval) {
1227		*action = 0;
1228		return 0;
1229	}
1230	*action = 1;
1231	return 0;
1232}
1233
1234/*
1235 * Pick up the last hashvalue from an intermediate node.
1236 */
1237STATIC uint
1238xfs_da3_node_lasthash(
1239	struct xfs_inode	*dp,
1240	struct xfs_buf		*bp,
1241	int			*count)
1242{
1243	struct xfs_da_intnode	 *node;
1244	struct xfs_da_node_entry *btree;
1245	struct xfs_da3_icnode_hdr nodehdr;
1246
1247	node = bp->b_addr;
1248	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1249	if (count)
1250		*count = nodehdr.count;
1251	if (!nodehdr.count)
1252		return 0;
1253	btree = dp->d_ops->node_tree_p(node);
1254	return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1255}
1256
1257/*
1258 * Walk back up the tree adjusting hash values as necessary,
1259 * when we stop making changes, return.
1260 */
1261void
1262xfs_da3_fixhashpath(
1263	struct xfs_da_state	*state,
1264	struct xfs_da_state_path *path)
1265{
1266	struct xfs_da_state_blk	*blk;
1267	struct xfs_da_intnode	*node;
1268	struct xfs_da_node_entry *btree;
1269	xfs_dahash_t		lasthash=0;
1270	int			level;
1271	int			count;
1272	struct xfs_inode	*dp = state->args->dp;
1273
1274	trace_xfs_da_fixhashpath(state->args);
1275
1276	level = path->active-1;
1277	blk = &path->blk[ level ];
1278	switch (blk->magic) {
1279	case XFS_ATTR_LEAF_MAGIC:
1280		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1281		if (count == 0)
1282			return;
1283		break;
1284	case XFS_DIR2_LEAFN_MAGIC:
1285		lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
1286		if (count == 0)
1287			return;
1288		break;
1289	case XFS_DA_NODE_MAGIC:
1290		lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1291		if (count == 0)
1292			return;
1293		break;
1294	}
1295	for (blk--, level--; level >= 0; blk--, level--) {
1296		struct xfs_da3_icnode_hdr nodehdr;
1297
1298		node = blk->bp->b_addr;
1299		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1300		btree = dp->d_ops->node_tree_p(node);
1301		if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1302			break;
1303		blk->hashval = lasthash;
1304		btree[blk->index].hashval = cpu_to_be32(lasthash);
1305		xfs_trans_log_buf(state->args->trans, blk->bp,
1306				  XFS_DA_LOGRANGE(node, &btree[blk->index],
1307						  sizeof(*btree)));
1308
1309		lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1310	}
1311}
1312
1313/*
1314 * Remove an entry from an intermediate node.
1315 */
1316STATIC void
1317xfs_da3_node_remove(
1318	struct xfs_da_state	*state,
1319	struct xfs_da_state_blk	*drop_blk)
1320{
1321	struct xfs_da_intnode	*node;
1322	struct xfs_da3_icnode_hdr nodehdr;
1323	struct xfs_da_node_entry *btree;
1324	int			index;
1325	int			tmp;
1326	struct xfs_inode	*dp = state->args->dp;
1327
1328	trace_xfs_da_node_remove(state->args);
1329
1330	node = drop_blk->bp->b_addr;
1331	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1332	ASSERT(drop_blk->index < nodehdr.count);
1333	ASSERT(drop_blk->index >= 0);
1334
1335	/*
1336	 * Copy over the offending entry, or just zero it out.
1337	 */
1338	index = drop_blk->index;
1339	btree = dp->d_ops->node_tree_p(node);
1340	if (index < nodehdr.count - 1) {
1341		tmp  = nodehdr.count - index - 1;
1342		tmp *= (uint)sizeof(xfs_da_node_entry_t);
1343		memmove(&btree[index], &btree[index + 1], tmp);
1344		xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1345		    XFS_DA_LOGRANGE(node, &btree[index], tmp));
1346		index = nodehdr.count - 1;
1347	}
1348	memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1349	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1350	    XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1351	nodehdr.count -= 1;
1352	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1353	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1354	    XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1355
1356	/*
1357	 * Copy the last hash value from the block to propagate upwards.
1358	 */
1359	drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1360}
1361
1362/*
1363 * Unbalance the elements between two intermediate nodes,
1364 * move all Btree elements from one node into another.
1365 */
1366STATIC void
1367xfs_da3_node_unbalance(
1368	struct xfs_da_state	*state,
1369	struct xfs_da_state_blk	*drop_blk,
1370	struct xfs_da_state_blk	*save_blk)
1371{
1372	struct xfs_da_intnode	*drop_node;
1373	struct xfs_da_intnode	*save_node;
1374	struct xfs_da_node_entry *drop_btree;
1375	struct xfs_da_node_entry *save_btree;
1376	struct xfs_da3_icnode_hdr drop_hdr;
1377	struct xfs_da3_icnode_hdr save_hdr;
1378	struct xfs_trans	*tp;
1379	int			sindex;
1380	int			tmp;
1381	struct xfs_inode	*dp = state->args->dp;
1382
1383	trace_xfs_da_node_unbalance(state->args);
1384
1385	drop_node = drop_blk->bp->b_addr;
1386	save_node = save_blk->bp->b_addr;
1387	dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1388	dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1389	drop_btree = dp->d_ops->node_tree_p(drop_node);
1390	save_btree = dp->d_ops->node_tree_p(save_node);
1391	tp = state->args->trans;
1392
1393	/*
1394	 * If the dying block has lower hashvals, then move all the
1395	 * elements in the remaining block up to make a hole.
1396	 */
1397	if ((be32_to_cpu(drop_btree[0].hashval) <
1398			be32_to_cpu(save_btree[0].hashval)) ||
1399	    (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1400			be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1401		/* XXX: check this - is memmove dst correct? */
1402		tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1403		memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1404
1405		sindex = 0;
1406		xfs_trans_log_buf(tp, save_blk->bp,
1407			XFS_DA_LOGRANGE(save_node, &save_btree[0],
1408				(save_hdr.count + drop_hdr.count) *
1409						sizeof(xfs_da_node_entry_t)));
1410	} else {
1411		sindex = save_hdr.count;
1412		xfs_trans_log_buf(tp, save_blk->bp,
1413			XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1414				drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1415	}
1416
1417	/*
1418	 * Move all the B-tree elements from drop_blk to save_blk.
1419	 */
1420	tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1421	memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1422	save_hdr.count += drop_hdr.count;
1423
1424	dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1425	xfs_trans_log_buf(tp, save_blk->bp,
1426		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1427				dp->d_ops->node_hdr_size));
1428
1429	/*
1430	 * Save the last hashval in the remaining block for upward propagation.
1431	 */
1432	save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1433}
1434
1435/*========================================================================
1436 * Routines used for finding things in the Btree.
1437 *========================================================================*/
1438
1439/*
1440 * Walk down the Btree looking for a particular filename, filling
1441 * in the state structure as we go.
1442 *
1443 * We will set the state structure to point to each of the elements
1444 * in each of the nodes where either the hashval is or should be.
1445 *
1446 * We support duplicate hashval's so for each entry in the current
1447 * node that could contain the desired hashval, descend.  This is a
1448 * pruned depth-first tree search.
1449 */
1450int							/* error */
1451xfs_da3_node_lookup_int(
1452	struct xfs_da_state	*state,
1453	int			*result)
1454{
1455	struct xfs_da_state_blk	*blk;
1456	struct xfs_da_blkinfo	*curr;
1457	struct xfs_da_intnode	*node;
1458	struct xfs_da_node_entry *btree;
1459	struct xfs_da3_icnode_hdr nodehdr;
1460	struct xfs_da_args	*args;
1461	xfs_dablk_t		blkno;
1462	xfs_dahash_t		hashval;
1463	xfs_dahash_t		btreehashval;
1464	int			probe;
1465	int			span;
1466	int			max;
1467	int			error;
1468	int			retval;
1469	struct xfs_inode	*dp = state->args->dp;
1470
1471	args = state->args;
1472
1473	/*
1474	 * Descend thru the B-tree searching each level for the right
1475	 * node to use, until the right hashval is found.
1476	 */
1477	blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0;
1478	for (blk = &state->path.blk[0], state->path.active = 1;
1479			 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1480			 blk++, state->path.active++) {
1481		/*
1482		 * Read the next node down in the tree.
1483		 */
1484		blk->blkno = blkno;
1485		error = xfs_da3_node_read(args->trans, args->dp, blkno,
1486					-1, &blk->bp, args->whichfork);
1487		if (error) {
1488			blk->blkno = 0;
1489			state->path.active--;
1490			return error;
1491		}
1492		curr = blk->bp->b_addr;
1493		blk->magic = be16_to_cpu(curr->magic);
1494
1495		if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1496		    blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1497			blk->magic = XFS_ATTR_LEAF_MAGIC;
1498			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1499			break;
1500		}
1501
1502		if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1503		    blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1504			blk->magic = XFS_DIR2_LEAFN_MAGIC;
1505			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1506							       blk->bp, NULL);
1507			break;
1508		}
1509
1510		blk->magic = XFS_DA_NODE_MAGIC;
1511
1512
1513		/*
1514		 * Search an intermediate node for a match.
1515		 */
1516		node = blk->bp->b_addr;
1517		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1518		btree = dp->d_ops->node_tree_p(node);
1519
1520		max = nodehdr.count;
1521		blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1522
1523		/*
1524		 * Binary search.  (note: small blocks will skip loop)
1525		 */
1526		probe = span = max / 2;
1527		hashval = args->hashval;
1528		while (span > 4) {
1529			span /= 2;
1530			btreehashval = be32_to_cpu(btree[probe].hashval);
1531			if (btreehashval < hashval)
1532				probe += span;
1533			else if (btreehashval > hashval)
1534				probe -= span;
1535			else
1536				break;
1537		}
1538		ASSERT((probe >= 0) && (probe < max));
1539		ASSERT((span <= 4) ||
1540			(be32_to_cpu(btree[probe].hashval) == hashval));
1541
1542		/*
1543		 * Since we may have duplicate hashval's, find the first
1544		 * matching hashval in the node.
1545		 */
1546		while (probe > 0 &&
1547		       be32_to_cpu(btree[probe].hashval) >= hashval) {
1548			probe--;
1549		}
1550		while (probe < max &&
1551		       be32_to_cpu(btree[probe].hashval) < hashval) {
1552			probe++;
1553		}
1554
1555		/*
1556		 * Pick the right block to descend on.
1557		 */
1558		if (probe == max) {
1559			blk->index = max - 1;
1560			blkno = be32_to_cpu(btree[max - 1].before);
1561		} else {
1562			blk->index = probe;
1563			blkno = be32_to_cpu(btree[probe].before);
1564		}
1565	}
1566
1567	/*
1568	 * A leaf block that ends in the hashval that we are interested in
1569	 * (final hashval == search hashval) means that the next block may
1570	 * contain more entries with the same hashval, shift upward to the
1571	 * next leaf and keep searching.
1572	 */
1573	for (;;) {
1574		if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1575			retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1576							&blk->index, state);
1577		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1578			retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1579			blk->index = args->index;
1580			args->blkno = blk->blkno;
1581		} else {
1582			ASSERT(0);
1583			return -EFSCORRUPTED;
1584		}
1585		if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1586		    (blk->hashval == args->hashval)) {
1587			error = xfs_da3_path_shift(state, &state->path, 1, 1,
1588							 &retval);
1589			if (error)
1590				return error;
1591			if (retval == 0) {
1592				continue;
1593			} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1594				/* path_shift() gives ENOENT */
1595				retval = -ENOATTR;
1596			}
1597		}
1598		break;
1599	}
1600	*result = retval;
1601	return 0;
1602}
1603
1604/*========================================================================
1605 * Utility routines.
1606 *========================================================================*/
1607
1608/*
1609 * Compare two intermediate nodes for "order".
1610 */
1611STATIC int
1612xfs_da3_node_order(
1613	struct xfs_inode *dp,
1614	struct xfs_buf	*node1_bp,
1615	struct xfs_buf	*node2_bp)
1616{
1617	struct xfs_da_intnode	*node1;
1618	struct xfs_da_intnode	*node2;
1619	struct xfs_da_node_entry *btree1;
1620	struct xfs_da_node_entry *btree2;
1621	struct xfs_da3_icnode_hdr node1hdr;
1622	struct xfs_da3_icnode_hdr node2hdr;
1623
1624	node1 = node1_bp->b_addr;
1625	node2 = node2_bp->b_addr;
1626	dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1627	dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1628	btree1 = dp->d_ops->node_tree_p(node1);
1629	btree2 = dp->d_ops->node_tree_p(node2);
1630
1631	if (node1hdr.count > 0 && node2hdr.count > 0 &&
1632	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1633	     (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1634	      be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1635		return 1;
1636	}
1637	return 0;
1638}
1639
1640/*
1641 * Link a new block into a doubly linked list of blocks (of whatever type).
1642 */
1643int							/* error */
1644xfs_da3_blk_link(
1645	struct xfs_da_state	*state,
1646	struct xfs_da_state_blk	*old_blk,
1647	struct xfs_da_state_blk	*new_blk)
1648{
1649	struct xfs_da_blkinfo	*old_info;
1650	struct xfs_da_blkinfo	*new_info;
1651	struct xfs_da_blkinfo	*tmp_info;
1652	struct xfs_da_args	*args;
1653	struct xfs_buf		*bp;
1654	int			before = 0;
1655	int			error;
1656	struct xfs_inode	*dp = state->args->dp;
1657
1658	/*
1659	 * Set up environment.
1660	 */
1661	args = state->args;
1662	ASSERT(args != NULL);
1663	old_info = old_blk->bp->b_addr;
1664	new_info = new_blk->bp->b_addr;
1665	ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1666	       old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1667	       old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1668
1669	switch (old_blk->magic) {
1670	case XFS_ATTR_LEAF_MAGIC:
1671		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1672		break;
1673	case XFS_DIR2_LEAFN_MAGIC:
1674		before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1675		break;
1676	case XFS_DA_NODE_MAGIC:
1677		before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1678		break;
1679	}
1680
1681	/*
1682	 * Link blocks in appropriate order.
1683	 */
1684	if (before) {
1685		/*
1686		 * Link new block in before existing block.
1687		 */
1688		trace_xfs_da_link_before(args);
1689		new_info->forw = cpu_to_be32(old_blk->blkno);
1690		new_info->back = old_info->back;
1691		if (old_info->back) {
1692			error = xfs_da3_node_read(args->trans, dp,
1693						be32_to_cpu(old_info->back),
1694						-1, &bp, args->whichfork);
1695			if (error)
1696				return error;
1697			ASSERT(bp != NULL);
1698			tmp_info = bp->b_addr;
1699			ASSERT(tmp_info->magic == old_info->magic);
1700			ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1701			tmp_info->forw = cpu_to_be32(new_blk->blkno);
1702			xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1703		}
1704		old_info->back = cpu_to_be32(new_blk->blkno);
1705	} else {
1706		/*
1707		 * Link new block in after existing block.
1708		 */
1709		trace_xfs_da_link_after(args);
1710		new_info->forw = old_info->forw;
1711		new_info->back = cpu_to_be32(old_blk->blkno);
1712		if (old_info->forw) {
1713			error = xfs_da3_node_read(args->trans, dp,
1714						be32_to_cpu(old_info->forw),
1715						-1, &bp, args->whichfork);
1716			if (error)
1717				return error;
1718			ASSERT(bp != NULL);
1719			tmp_info = bp->b_addr;
1720			ASSERT(tmp_info->magic == old_info->magic);
1721			ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1722			tmp_info->back = cpu_to_be32(new_blk->blkno);
1723			xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1724		}
1725		old_info->forw = cpu_to_be32(new_blk->blkno);
1726	}
1727
1728	xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1729	xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1730	return 0;
1731}
1732
1733/*
1734 * Unlink a block from a doubly linked list of blocks.
1735 */
1736STATIC int						/* error */
1737xfs_da3_blk_unlink(
1738	struct xfs_da_state	*state,
1739	struct xfs_da_state_blk	*drop_blk,
1740	struct xfs_da_state_blk	*save_blk)
1741{
1742	struct xfs_da_blkinfo	*drop_info;
1743	struct xfs_da_blkinfo	*save_info;
1744	struct xfs_da_blkinfo	*tmp_info;
1745	struct xfs_da_args	*args;
1746	struct xfs_buf		*bp;
1747	int			error;
1748
1749	/*
1750	 * Set up environment.
1751	 */
1752	args = state->args;
1753	ASSERT(args != NULL);
1754	save_info = save_blk->bp->b_addr;
1755	drop_info = drop_blk->bp->b_addr;
1756	ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1757	       save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1758	       save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1759	ASSERT(save_blk->magic == drop_blk->magic);
1760	ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1761	       (be32_to_cpu(save_info->back) == drop_blk->blkno));
1762	ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1763	       (be32_to_cpu(drop_info->back) == save_blk->blkno));
1764
1765	/*
1766	 * Unlink the leaf block from the doubly linked chain of leaves.
1767	 */
1768	if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1769		trace_xfs_da_unlink_back(args);
1770		save_info->back = drop_info->back;
1771		if (drop_info->back) {
1772			error = xfs_da3_node_read(args->trans, args->dp,
1773						be32_to_cpu(drop_info->back),
1774						-1, &bp, args->whichfork);
1775			if (error)
1776				return error;
1777			ASSERT(bp != NULL);
1778			tmp_info = bp->b_addr;
1779			ASSERT(tmp_info->magic == save_info->magic);
1780			ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1781			tmp_info->forw = cpu_to_be32(save_blk->blkno);
1782			xfs_trans_log_buf(args->trans, bp, 0,
1783						    sizeof(*tmp_info) - 1);
1784		}
1785	} else {
1786		trace_xfs_da_unlink_forward(args);
1787		save_info->forw = drop_info->forw;
1788		if (drop_info->forw) {
1789			error = xfs_da3_node_read(args->trans, args->dp,
1790						be32_to_cpu(drop_info->forw),
1791						-1, &bp, args->whichfork);
1792			if (error)
1793				return error;
1794			ASSERT(bp != NULL);
1795			tmp_info = bp->b_addr;
1796			ASSERT(tmp_info->magic == save_info->magic);
1797			ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1798			tmp_info->back = cpu_to_be32(save_blk->blkno);
1799			xfs_trans_log_buf(args->trans, bp, 0,
1800						    sizeof(*tmp_info) - 1);
1801		}
1802	}
1803
1804	xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1805	return 0;
1806}
1807
1808/*
1809 * Move a path "forward" or "!forward" one block at the current level.
1810 *
1811 * This routine will adjust a "path" to point to the next block
1812 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1813 * Btree, including updating pointers to the intermediate nodes between
1814 * the new bottom and the root.
1815 */
1816int							/* error */
1817xfs_da3_path_shift(
1818	struct xfs_da_state	*state,
1819	struct xfs_da_state_path *path,
1820	int			forward,
1821	int			release,
1822	int			*result)
1823{
1824	struct xfs_da_state_blk	*blk;
1825	struct xfs_da_blkinfo	*info;
1826	struct xfs_da_intnode	*node;
1827	struct xfs_da_args	*args;
1828	struct xfs_da_node_entry *btree;
1829	struct xfs_da3_icnode_hdr nodehdr;
1830	struct xfs_buf		*bp;
1831	xfs_dablk_t		blkno = 0;
1832	int			level;
1833	int			error;
1834	struct xfs_inode	*dp = state->args->dp;
1835
1836	trace_xfs_da_path_shift(state->args);
1837
1838	/*
1839	 * Roll up the Btree looking for the first block where our
1840	 * current index is not at the edge of the block.  Note that
1841	 * we skip the bottom layer because we want the sibling block.
1842	 */
1843	args = state->args;
1844	ASSERT(args != NULL);
1845	ASSERT(path != NULL);
1846	ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1847	level = (path->active-1) - 1;	/* skip bottom layer in path */
1848	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1849		node = blk->bp->b_addr;
1850		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1851		btree = dp->d_ops->node_tree_p(node);
1852
1853		if (forward && (blk->index < nodehdr.count - 1)) {
1854			blk->index++;
1855			blkno = be32_to_cpu(btree[blk->index].before);
1856			break;
1857		} else if (!forward && (blk->index > 0)) {
1858			blk->index--;
1859			blkno = be32_to_cpu(btree[blk->index].before);
1860			break;
1861		}
1862	}
1863	if (level < 0) {
1864		*result = -ENOENT;	/* we're out of our tree */
1865		ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1866		return 0;
1867	}
1868
1869	/*
1870	 * Roll down the edge of the subtree until we reach the
1871	 * same depth we were at originally.
1872	 */
1873	for (blk++, level++; level < path->active; blk++, level++) {
1874		/*
1875		 * Read the next child block into a local buffer.
1876		 */
1877		error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1878					  args->whichfork);
1879		if (error)
1880			return error;
1881
1882		/*
1883		 * Release the old block (if it's dirty, the trans doesn't
1884		 * actually let go) and swap the local buffer into the path
1885		 * structure. This ensures failure of the above read doesn't set
1886		 * a NULL buffer in an active slot in the path.
1887		 */
1888		if (release)
1889			xfs_trans_brelse(args->trans, blk->bp);
1890		blk->blkno = blkno;
1891		blk->bp = bp;
1892
1893		info = blk->bp->b_addr;
1894		ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1895		       info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1896		       info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1897		       info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1898		       info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1899		       info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1900
1901
1902		/*
1903		 * Note: we flatten the magic number to a single type so we
1904		 * don't have to compare against crc/non-crc types elsewhere.
1905		 */
1906		switch (be16_to_cpu(info->magic)) {
1907		case XFS_DA_NODE_MAGIC:
1908		case XFS_DA3_NODE_MAGIC:
1909			blk->magic = XFS_DA_NODE_MAGIC;
1910			node = (xfs_da_intnode_t *)info;
1911			dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1912			btree = dp->d_ops->node_tree_p(node);
1913			blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1914			if (forward)
1915				blk->index = 0;
1916			else
1917				blk->index = nodehdr.count - 1;
1918			blkno = be32_to_cpu(btree[blk->index].before);
1919			break;
1920		case XFS_ATTR_LEAF_MAGIC:
1921		case XFS_ATTR3_LEAF_MAGIC:
1922			blk->magic = XFS_ATTR_LEAF_MAGIC;
1923			ASSERT(level == path->active-1);
1924			blk->index = 0;
1925			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1926			break;
1927		case XFS_DIR2_LEAFN_MAGIC:
1928		case XFS_DIR3_LEAFN_MAGIC:
1929			blk->magic = XFS_DIR2_LEAFN_MAGIC;
1930			ASSERT(level == path->active-1);
1931			blk->index = 0;
1932			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1933							       blk->bp, NULL);
1934			break;
1935		default:
1936			ASSERT(0);
1937			break;
1938		}
1939	}
1940	*result = 0;
1941	return 0;
1942}
1943
1944
1945/*========================================================================
1946 * Utility routines.
1947 *========================================================================*/
1948
1949/*
1950 * Implement a simple hash on a character string.
1951 * Rotate the hash value by 7 bits, then XOR each character in.
1952 * This is implemented with some source-level loop unrolling.
1953 */
1954xfs_dahash_t
1955xfs_da_hashname(const __uint8_t *name, int namelen)
1956{
1957	xfs_dahash_t hash;
1958
1959	/*
1960	 * Do four characters at a time as long as we can.
1961	 */
1962	for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1963		hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1964		       (name[3] << 0) ^ rol32(hash, 7 * 4);
1965
1966	/*
1967	 * Now do the rest of the characters.
1968	 */
1969	switch (namelen) {
1970	case 3:
1971		return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1972		       rol32(hash, 7 * 3);
1973	case 2:
1974		return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1975	case 1:
1976		return (name[0] << 0) ^ rol32(hash, 7 * 1);
1977	default: /* case 0: */
1978		return hash;
1979	}
1980}
1981
1982enum xfs_dacmp
1983xfs_da_compname(
1984	struct xfs_da_args *args,
1985	const unsigned char *name,
1986	int		len)
1987{
1988	return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1989					XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1990}
1991
1992static xfs_dahash_t
1993xfs_default_hashname(
1994	struct xfs_name	*name)
1995{
1996	return xfs_da_hashname(name->name, name->len);
1997}
1998
1999const struct xfs_nameops xfs_default_nameops = {
2000	.hashname	= xfs_default_hashname,
2001	.compname	= xfs_da_compname
2002};
2003
2004int
2005xfs_da_grow_inode_int(
2006	struct xfs_da_args	*args,
2007	xfs_fileoff_t		*bno,
2008	int			count)
2009{
2010	struct xfs_trans	*tp = args->trans;
2011	struct xfs_inode	*dp = args->dp;
2012	int			w = args->whichfork;
2013	xfs_rfsblock_t		nblks = dp->i_d.di_nblocks;
2014	struct xfs_bmbt_irec	map, *mapp;
2015	int			nmap, error, got, i, mapi;
2016
2017	/*
2018	 * Find a spot in the file space to put the new block.
2019	 */
2020	error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2021	if (error)
2022		return error;
2023
2024	/*
2025	 * Try mapping it in one filesystem block.
2026	 */
2027	nmap = 1;
2028	ASSERT(args->firstblock != NULL);
2029	error = xfs_bmapi_write(tp, dp, *bno, count,
2030			xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2031			args->firstblock, args->total, &map, &nmap,
2032			args->dfops);
2033	if (error)
2034		return error;
2035
2036	ASSERT(nmap <= 1);
2037	if (nmap == 1) {
2038		mapp = &map;
2039		mapi = 1;
2040	} else if (nmap == 0 && count > 1) {
2041		xfs_fileoff_t		b;
2042		int			c;
2043
2044		/*
2045		 * If we didn't get it and the block might work if fragmented,
2046		 * try without the CONTIG flag.  Loop until we get it all.
2047		 */
2048		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2049		for (b = *bno, mapi = 0; b < *bno + count; ) {
2050			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2051			c = (int)(*bno + count - b);
2052			error = xfs_bmapi_write(tp, dp, b, c,
2053					xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2054					args->firstblock, args->total,
2055					&mapp[mapi], &nmap, args->dfops);
2056			if (error)
2057				goto out_free_map;
2058			if (nmap < 1)
2059				break;
2060			mapi += nmap;
2061			b = mapp[mapi - 1].br_startoff +
2062			    mapp[mapi - 1].br_blockcount;
2063		}
2064	} else {
2065		mapi = 0;
2066		mapp = NULL;
2067	}
2068
2069	/*
2070	 * Count the blocks we got, make sure it matches the total.
2071	 */
2072	for (i = 0, got = 0; i < mapi; i++)
2073		got += mapp[i].br_blockcount;
2074	if (got != count || mapp[0].br_startoff != *bno ||
2075	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2076	    *bno + count) {
2077		error = -ENOSPC;
2078		goto out_free_map;
2079	}
2080
2081	/* account for newly allocated blocks in reserved blocks total */
2082	args->total -= dp->i_d.di_nblocks - nblks;
2083
2084out_free_map:
2085	if (mapp != &map)
2086		kmem_free(mapp);
2087	return error;
2088}
2089
2090/*
2091 * Add a block to the btree ahead of the file.
2092 * Return the new block number to the caller.
2093 */
2094int
2095xfs_da_grow_inode(
2096	struct xfs_da_args	*args,
2097	xfs_dablk_t		*new_blkno)
2098{
2099	xfs_fileoff_t		bno;
2100	int			error;
2101
2102	trace_xfs_da_grow_inode(args);
2103
2104	bno = args->geo->leafblk;
2105	error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2106	if (!error)
2107		*new_blkno = (xfs_dablk_t)bno;
2108	return error;
2109}
2110
2111/*
2112 * Ick.  We need to always be able to remove a btree block, even
2113 * if there's no space reservation because the filesystem is full.
2114 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2115 * It swaps the target block with the last block in the file.  The
2116 * last block in the file can always be removed since it can't cause
2117 * a bmap btree split to do that.
2118 */
2119STATIC int
2120xfs_da3_swap_lastblock(
2121	struct xfs_da_args	*args,
2122	xfs_dablk_t		*dead_blknop,
2123	struct xfs_buf		**dead_bufp)
2124{
2125	struct xfs_da_blkinfo	*dead_info;
2126	struct xfs_da_blkinfo	*sib_info;
2127	struct xfs_da_intnode	*par_node;
2128	struct xfs_da_intnode	*dead_node;
2129	struct xfs_dir2_leaf	*dead_leaf2;
2130	struct xfs_da_node_entry *btree;
2131	struct xfs_da3_icnode_hdr par_hdr;
2132	struct xfs_inode	*dp;
2133	struct xfs_trans	*tp;
2134	struct xfs_mount	*mp;
2135	struct xfs_buf		*dead_buf;
2136	struct xfs_buf		*last_buf;
2137	struct xfs_buf		*sib_buf;
2138	struct xfs_buf		*par_buf;
2139	xfs_dahash_t		dead_hash;
2140	xfs_fileoff_t		lastoff;
2141	xfs_dablk_t		dead_blkno;
2142	xfs_dablk_t		last_blkno;
2143	xfs_dablk_t		sib_blkno;
2144	xfs_dablk_t		par_blkno;
2145	int			error;
2146	int			w;
2147	int			entno;
2148	int			level;
2149	int			dead_level;
2150
2151	trace_xfs_da_swap_lastblock(args);
2152
2153	dead_buf = *dead_bufp;
2154	dead_blkno = *dead_blknop;
2155	tp = args->trans;
2156	dp = args->dp;
2157	w = args->whichfork;
2158	ASSERT(w == XFS_DATA_FORK);
2159	mp = dp->i_mount;
2160	lastoff = args->geo->freeblk;
2161	error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2162	if (error)
2163		return error;
2164	if (unlikely(lastoff == 0)) {
2165		XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2166				 mp);
2167		return -EFSCORRUPTED;
2168	}
2169	/*
2170	 * Read the last block in the btree space.
2171	 */
2172	last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2173	error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2174	if (error)
2175		return error;
2176	/*
2177	 * Copy the last block into the dead buffer and log it.
2178	 */
2179	memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2180	xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2181	dead_info = dead_buf->b_addr;
2182	/*
2183	 * Get values from the moved block.
2184	 */
2185	if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2186	    dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2187		struct xfs_dir3_icleaf_hdr leafhdr;
2188		struct xfs_dir2_leaf_entry *ents;
2189
2190		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2191		dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2192		ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2193		dead_level = 0;
2194		dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2195	} else {
2196		struct xfs_da3_icnode_hdr deadhdr;
2197
2198		dead_node = (xfs_da_intnode_t *)dead_info;
2199		dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2200		btree = dp->d_ops->node_tree_p(dead_node);
2201		dead_level = deadhdr.level;
2202		dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2203	}
2204	sib_buf = par_buf = NULL;
2205	/*
2206	 * If the moved block has a left sibling, fix up the pointers.
2207	 */
2208	if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2209		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2210		if (error)
2211			goto done;
2212		sib_info = sib_buf->b_addr;
2213		if (unlikely(
2214		    be32_to_cpu(sib_info->forw) != last_blkno ||
2215		    sib_info->magic != dead_info->magic)) {
2216			XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2217					 XFS_ERRLEVEL_LOW, mp);
2218			error = -EFSCORRUPTED;
2219			goto done;
2220		}
2221		sib_info->forw = cpu_to_be32(dead_blkno);
2222		xfs_trans_log_buf(tp, sib_buf,
2223			XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2224					sizeof(sib_info->forw)));
2225		sib_buf = NULL;
2226	}
2227	/*
2228	 * If the moved block has a right sibling, fix up the pointers.
2229	 */
2230	if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2231		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2232		if (error)
2233			goto done;
2234		sib_info = sib_buf->b_addr;
2235		if (unlikely(
2236		       be32_to_cpu(sib_info->back) != last_blkno ||
2237		       sib_info->magic != dead_info->magic)) {
2238			XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2239					 XFS_ERRLEVEL_LOW, mp);
2240			error = -EFSCORRUPTED;
2241			goto done;
2242		}
2243		sib_info->back = cpu_to_be32(dead_blkno);
2244		xfs_trans_log_buf(tp, sib_buf,
2245			XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2246					sizeof(sib_info->back)));
2247		sib_buf = NULL;
2248	}
2249	par_blkno = args->geo->leafblk;
2250	level = -1;
2251	/*
2252	 * Walk down the tree looking for the parent of the moved block.
2253	 */
2254	for (;;) {
2255		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2256		if (error)
2257			goto done;
2258		par_node = par_buf->b_addr;
2259		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2260		if (level >= 0 && level != par_hdr.level + 1) {
2261			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2262					 XFS_ERRLEVEL_LOW, mp);
2263			error = -EFSCORRUPTED;
2264			goto done;
2265		}
2266		level = par_hdr.level;
2267		btree = dp->d_ops->node_tree_p(par_node);
2268		for (entno = 0;
2269		     entno < par_hdr.count &&
2270		     be32_to_cpu(btree[entno].hashval) < dead_hash;
2271		     entno++)
2272			continue;
2273		if (entno == par_hdr.count) {
2274			XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2275					 XFS_ERRLEVEL_LOW, mp);
2276			error = -EFSCORRUPTED;
2277			goto done;
2278		}
2279		par_blkno = be32_to_cpu(btree[entno].before);
2280		if (level == dead_level + 1)
2281			break;
2282		xfs_trans_brelse(tp, par_buf);
2283		par_buf = NULL;
2284	}
2285	/*
2286	 * We're in the right parent block.
2287	 * Look for the right entry.
2288	 */
2289	for (;;) {
2290		for (;
2291		     entno < par_hdr.count &&
2292		     be32_to_cpu(btree[entno].before) != last_blkno;
2293		     entno++)
2294			continue;
2295		if (entno < par_hdr.count)
2296			break;
2297		par_blkno = par_hdr.forw;
2298		xfs_trans_brelse(tp, par_buf);
2299		par_buf = NULL;
2300		if (unlikely(par_blkno == 0)) {
2301			XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2302					 XFS_ERRLEVEL_LOW, mp);
2303			error = -EFSCORRUPTED;
2304			goto done;
2305		}
2306		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2307		if (error)
2308			goto done;
2309		par_node = par_buf->b_addr;
2310		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2311		if (par_hdr.level != level) {
2312			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2313					 XFS_ERRLEVEL_LOW, mp);
2314			error = -EFSCORRUPTED;
2315			goto done;
2316		}
2317		btree = dp->d_ops->node_tree_p(par_node);
2318		entno = 0;
2319	}
2320	/*
2321	 * Update the parent entry pointing to the moved block.
2322	 */
2323	btree[entno].before = cpu_to_be32(dead_blkno);
2324	xfs_trans_log_buf(tp, par_buf,
2325		XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2326				sizeof(btree[entno].before)));
2327	*dead_blknop = last_blkno;
2328	*dead_bufp = last_buf;
2329	return 0;
2330done:
2331	if (par_buf)
2332		xfs_trans_brelse(tp, par_buf);
2333	if (sib_buf)
2334		xfs_trans_brelse(tp, sib_buf);
2335	xfs_trans_brelse(tp, last_buf);
2336	return error;
2337}
2338
2339/*
2340 * Remove a btree block from a directory or attribute.
2341 */
2342int
2343xfs_da_shrink_inode(
2344	xfs_da_args_t	*args,
2345	xfs_dablk_t	dead_blkno,
2346	struct xfs_buf	*dead_buf)
2347{
2348	xfs_inode_t *dp;
2349	int done, error, w, count;
2350	xfs_trans_t *tp;
2351
2352	trace_xfs_da_shrink_inode(args);
2353
2354	dp = args->dp;
2355	w = args->whichfork;
2356	tp = args->trans;
2357	count = args->geo->fsbcount;
2358	for (;;) {
2359		/*
2360		 * Remove extents.  If we get ENOSPC for a dir we have to move
2361		 * the last block to the place we want to kill.
2362		 */
2363		error = xfs_bunmapi(tp, dp, dead_blkno, count,
2364				    xfs_bmapi_aflag(w), 0, args->firstblock,
2365				    args->dfops, &done);
2366		if (error == -ENOSPC) {
2367			if (w != XFS_DATA_FORK)
2368				break;
2369			error = xfs_da3_swap_lastblock(args, &dead_blkno,
2370						      &dead_buf);
2371			if (error)
2372				break;
2373		} else {
2374			break;
2375		}
2376	}
2377	xfs_trans_binval(tp, dead_buf);
2378	return error;
2379}
2380
2381/*
2382 * See if the mapping(s) for this btree block are valid, i.e.
2383 * don't contain holes, are logically contiguous, and cover the whole range.
2384 */
2385STATIC int
2386xfs_da_map_covers_blocks(
2387	int		nmap,
2388	xfs_bmbt_irec_t	*mapp,
2389	xfs_dablk_t	bno,
2390	int		count)
2391{
2392	int		i;
2393	xfs_fileoff_t	off;
2394
2395	for (i = 0, off = bno; i < nmap; i++) {
2396		if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2397		    mapp[i].br_startblock == DELAYSTARTBLOCK) {
2398			return 0;
2399		}
2400		if (off != mapp[i].br_startoff) {
2401			return 0;
2402		}
2403		off += mapp[i].br_blockcount;
2404	}
2405	return off == bno + count;
2406}
2407
2408/*
2409 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2410 *
2411 * For the single map case, it is assumed that the caller has provided a pointer
2412 * to a valid xfs_buf_map.  For the multiple map case, this function will
2413 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2414 * map pointer with the allocated map.
2415 */
2416static int
2417xfs_buf_map_from_irec(
2418	struct xfs_mount	*mp,
2419	struct xfs_buf_map	**mapp,
2420	int			*nmaps,
2421	struct xfs_bmbt_irec	*irecs,
2422	int			nirecs)
2423{
2424	struct xfs_buf_map	*map;
2425	int			i;
2426
2427	ASSERT(*nmaps == 1);
2428	ASSERT(nirecs >= 1);
2429
2430	if (nirecs > 1) {
2431		map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2432				  KM_SLEEP | KM_NOFS);
2433		if (!map)
2434			return -ENOMEM;
2435		*mapp = map;
2436	}
2437
2438	*nmaps = nirecs;
2439	map = *mapp;
2440	for (i = 0; i < *nmaps; i++) {
2441		ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2442		       irecs[i].br_startblock != HOLESTARTBLOCK);
2443		map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2444		map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2445	}
2446	return 0;
2447}
2448
2449/*
2450 * Map the block we are given ready for reading. There are three possible return
2451 * values:
2452 *	-1 - will be returned if we land in a hole and mappedbno == -2 so the
2453 *	     caller knows not to execute a subsequent read.
2454 *	 0 - if we mapped the block successfully
2455 *	>0 - positive error number if there was an error.
2456 */
2457static int
2458xfs_dabuf_map(
2459	struct xfs_inode	*dp,
2460	xfs_dablk_t		bno,
2461	xfs_daddr_t		mappedbno,
2462	int			whichfork,
2463	struct xfs_buf_map	**map,
2464	int			*nmaps)
2465{
2466	struct xfs_mount	*mp = dp->i_mount;
2467	int			nfsb;
2468	int			error = 0;
2469	struct xfs_bmbt_irec	irec;
2470	struct xfs_bmbt_irec	*irecs = &irec;
2471	int			nirecs;
2472
2473	ASSERT(map && *map);
2474	ASSERT(*nmaps == 1);
2475
2476	if (whichfork == XFS_DATA_FORK)
2477		nfsb = mp->m_dir_geo->fsbcount;
2478	else
2479		nfsb = mp->m_attr_geo->fsbcount;
2480
2481	/*
2482	 * Caller doesn't have a mapping.  -2 means don't complain
2483	 * if we land in a hole.
2484	 */
2485	if (mappedbno == -1 || mappedbno == -2) {
2486		/*
2487		 * Optimize the one-block case.
2488		 */
2489		if (nfsb != 1)
2490			irecs = kmem_zalloc(sizeof(irec) * nfsb,
2491					    KM_SLEEP | KM_NOFS);
2492
2493		nirecs = nfsb;
2494		error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2495				       &nirecs, xfs_bmapi_aflag(whichfork));
2496		if (error)
2497			goto out;
2498	} else {
2499		irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2500		irecs->br_startoff = (xfs_fileoff_t)bno;
2501		irecs->br_blockcount = nfsb;
2502		irecs->br_state = 0;
2503		nirecs = 1;
2504	}
2505
2506	if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2507		error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2508		if (unlikely(error == -EFSCORRUPTED)) {
2509			if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2510				int i;
2511				xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2512					__func__, (long long)bno,
2513					(long long)dp->i_ino);
2514				for (i = 0; i < *nmaps; i++) {
2515					xfs_alert(mp,
2516"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2517						i,
2518						(long long)irecs[i].br_startoff,
2519						(long long)irecs[i].br_startblock,
2520						(long long)irecs[i].br_blockcount,
2521						irecs[i].br_state);
2522				}
2523			}
2524			XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2525					 XFS_ERRLEVEL_LOW, mp);
2526		}
2527		goto out;
2528	}
2529	error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2530out:
2531	if (irecs != &irec)
2532		kmem_free(irecs);
2533	return error;
2534}
2535
2536/*
2537 * Get a buffer for the dir/attr block.
2538 */
2539int
2540xfs_da_get_buf(
2541	struct xfs_trans	*trans,
2542	struct xfs_inode	*dp,
2543	xfs_dablk_t		bno,
2544	xfs_daddr_t		mappedbno,
2545	struct xfs_buf		**bpp,
2546	int			whichfork)
2547{
2548	struct xfs_buf		*bp;
2549	struct xfs_buf_map	map;
2550	struct xfs_buf_map	*mapp;
2551	int			nmap;
2552	int			error;
2553
2554	*bpp = NULL;
2555	mapp = &map;
2556	nmap = 1;
2557	error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2558				&mapp, &nmap);
2559	if (error) {
2560		/* mapping a hole is not an error, but we don't continue */
2561		if (error == -1)
2562			error = 0;
2563		goto out_free;
2564	}
2565
2566	bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2567				    mapp, nmap, 0);
2568	error = bp ? bp->b_error : -EIO;
2569	if (error) {
2570		if (bp)
2571			xfs_trans_brelse(trans, bp);
2572		goto out_free;
2573	}
2574
2575	*bpp = bp;
2576
2577out_free:
2578	if (mapp != &map)
2579		kmem_free(mapp);
2580
2581	return error;
2582}
2583
2584/*
2585 * Get a buffer for the dir/attr block, fill in the contents.
2586 */
2587int
2588xfs_da_read_buf(
2589	struct xfs_trans	*trans,
2590	struct xfs_inode	*dp,
2591	xfs_dablk_t		bno,
2592	xfs_daddr_t		mappedbno,
2593	struct xfs_buf		**bpp,
2594	int			whichfork,
2595	const struct xfs_buf_ops *ops)
2596{
2597	struct xfs_buf		*bp;
2598	struct xfs_buf_map	map;
2599	struct xfs_buf_map	*mapp;
2600	int			nmap;
2601	int			error;
2602
2603	*bpp = NULL;
2604	mapp = &map;
2605	nmap = 1;
2606	error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2607				&mapp, &nmap);
2608	if (error) {
2609		/* mapping a hole is not an error, but we don't continue */
2610		if (error == -1)
2611			error = 0;
2612		goto out_free;
2613	}
2614
2615	error = xfs_trans_read_buf_map(dp->i_mount, trans,
2616					dp->i_mount->m_ddev_targp,
2617					mapp, nmap, 0, &bp, ops);
2618	if (error)
2619		goto out_free;
2620
2621	if (whichfork == XFS_ATTR_FORK)
2622		xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2623	else
2624		xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2625	*bpp = bp;
2626out_free:
2627	if (mapp != &map)
2628		kmem_free(mapp);
2629
2630	return error;
2631}
2632
2633/*
2634 * Readahead the dir/attr block.
2635 */
2636int
2637xfs_da_reada_buf(
2638	struct xfs_inode	*dp,
2639	xfs_dablk_t		bno,
2640	xfs_daddr_t		mappedbno,
2641	int			whichfork,
2642	const struct xfs_buf_ops *ops)
2643{
2644	struct xfs_buf_map	map;
2645	struct xfs_buf_map	*mapp;
2646	int			nmap;
2647	int			error;
2648
2649	mapp = &map;
2650	nmap = 1;
2651	error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2652				&mapp, &nmap);
2653	if (error) {
2654		/* mapping a hole is not an error, but we don't continue */
2655		if (error == -1)
2656			error = 0;
2657		goto out_free;
2658	}
2659
2660	mappedbno = mapp[0].bm_bn;
2661	xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2662
2663out_free:
2664	if (mapp != &map)
2665		kmem_free(mapp);
2666
2667	return error;
 
 
2668}
v4.6
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * Copyright (c) 2013 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_bit.h"
  26#include "xfs_mount.h"
  27#include "xfs_da_format.h"
  28#include "xfs_da_btree.h"
  29#include "xfs_dir2.h"
  30#include "xfs_dir2_priv.h"
  31#include "xfs_inode.h"
  32#include "xfs_trans.h"
  33#include "xfs_inode_item.h"
  34#include "xfs_alloc.h"
  35#include "xfs_bmap.h"
  36#include "xfs_attr.h"
  37#include "xfs_attr_leaf.h"
  38#include "xfs_error.h"
  39#include "xfs_trace.h"
  40#include "xfs_cksum.h"
  41#include "xfs_buf_item.h"
  42#include "xfs_log.h"
  43
  44/*
  45 * xfs_da_btree.c
  46 *
  47 * Routines to implement directories as Btrees of hashed names.
  48 */
  49
  50/*========================================================================
  51 * Function prototypes for the kernel.
  52 *========================================================================*/
  53
  54/*
  55 * Routines used for growing the Btree.
  56 */
  57STATIC int xfs_da3_root_split(xfs_da_state_t *state,
  58					    xfs_da_state_blk_t *existing_root,
  59					    xfs_da_state_blk_t *new_child);
  60STATIC int xfs_da3_node_split(xfs_da_state_t *state,
  61					    xfs_da_state_blk_t *existing_blk,
  62					    xfs_da_state_blk_t *split_blk,
  63					    xfs_da_state_blk_t *blk_to_add,
  64					    int treelevel,
  65					    int *result);
  66STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
  67					 xfs_da_state_blk_t *node_blk_1,
  68					 xfs_da_state_blk_t *node_blk_2);
  69STATIC void xfs_da3_node_add(xfs_da_state_t *state,
  70				   xfs_da_state_blk_t *old_node_blk,
  71				   xfs_da_state_blk_t *new_node_blk);
  72
  73/*
  74 * Routines used for shrinking the Btree.
  75 */
  76STATIC int xfs_da3_root_join(xfs_da_state_t *state,
  77					   xfs_da_state_blk_t *root_blk);
  78STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
  79STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
  80					      xfs_da_state_blk_t *drop_blk);
  81STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
  82					 xfs_da_state_blk_t *src_node_blk,
  83					 xfs_da_state_blk_t *dst_node_blk);
  84
  85/*
  86 * Utility routines.
  87 */
  88STATIC int	xfs_da3_blk_unlink(xfs_da_state_t *state,
  89				  xfs_da_state_blk_t *drop_blk,
  90				  xfs_da_state_blk_t *save_blk);
  91
  92
  93kmem_zone_t *xfs_da_state_zone;	/* anchor for state struct zone */
  94
  95/*
  96 * Allocate a dir-state structure.
  97 * We don't put them on the stack since they're large.
  98 */
  99xfs_da_state_t *
 100xfs_da_state_alloc(void)
 101{
 102	return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
 103}
 104
 105/*
 106 * Kill the altpath contents of a da-state structure.
 107 */
 108STATIC void
 109xfs_da_state_kill_altpath(xfs_da_state_t *state)
 110{
 111	int	i;
 112
 113	for (i = 0; i < state->altpath.active; i++)
 114		state->altpath.blk[i].bp = NULL;
 115	state->altpath.active = 0;
 116}
 117
 118/*
 119 * Free a da-state structure.
 120 */
 121void
 122xfs_da_state_free(xfs_da_state_t *state)
 123{
 124	xfs_da_state_kill_altpath(state);
 125#ifdef DEBUG
 126	memset((char *)state, 0, sizeof(*state));
 127#endif /* DEBUG */
 128	kmem_zone_free(xfs_da_state_zone, state);
 129}
 130
 131static bool
 132xfs_da3_node_verify(
 133	struct xfs_buf		*bp)
 134{
 135	struct xfs_mount	*mp = bp->b_target->bt_mount;
 136	struct xfs_da_intnode	*hdr = bp->b_addr;
 137	struct xfs_da3_icnode_hdr ichdr;
 138	const struct xfs_dir_ops *ops;
 139
 140	ops = xfs_dir_get_ops(mp, NULL);
 141
 142	ops->node_hdr_from_disk(&ichdr, hdr);
 143
 144	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 145		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 146
 147		if (ichdr.magic != XFS_DA3_NODE_MAGIC)
 148			return false;
 149
 150		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
 151			return false;
 152		if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
 153			return false;
 154		if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
 155			return false;
 156	} else {
 157		if (ichdr.magic != XFS_DA_NODE_MAGIC)
 158			return false;
 159	}
 160	if (ichdr.level == 0)
 161		return false;
 162	if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
 163		return false;
 164	if (ichdr.count == 0)
 165		return false;
 166
 167	/*
 168	 * we don't know if the node is for and attribute or directory tree,
 169	 * so only fail if the count is outside both bounds
 170	 */
 171	if (ichdr.count > mp->m_dir_geo->node_ents &&
 172	    ichdr.count > mp->m_attr_geo->node_ents)
 173		return false;
 174
 175	/* XXX: hash order check? */
 176
 177	return true;
 178}
 179
 180static void
 181xfs_da3_node_write_verify(
 182	struct xfs_buf	*bp)
 183{
 184	struct xfs_mount	*mp = bp->b_target->bt_mount;
 185	struct xfs_buf_log_item	*bip = bp->b_fspriv;
 186	struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 187
 188	if (!xfs_da3_node_verify(bp)) {
 189		xfs_buf_ioerror(bp, -EFSCORRUPTED);
 190		xfs_verifier_error(bp);
 191		return;
 192	}
 193
 194	if (!xfs_sb_version_hascrc(&mp->m_sb))
 195		return;
 196
 197	if (bip)
 198		hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
 199
 200	xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
 201}
 202
 203/*
 204 * leaf/node format detection on trees is sketchy, so a node read can be done on
 205 * leaf level blocks when detection identifies the tree as a node format tree
 206 * incorrectly. In this case, we need to swap the verifier to match the correct
 207 * format of the block being read.
 208 */
 209static void
 210xfs_da3_node_read_verify(
 211	struct xfs_buf		*bp)
 212{
 213	struct xfs_da_blkinfo	*info = bp->b_addr;
 214
 215	switch (be16_to_cpu(info->magic)) {
 216		case XFS_DA3_NODE_MAGIC:
 217			if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
 218				xfs_buf_ioerror(bp, -EFSBADCRC);
 219				break;
 220			}
 221			/* fall through */
 222		case XFS_DA_NODE_MAGIC:
 223			if (!xfs_da3_node_verify(bp)) {
 224				xfs_buf_ioerror(bp, -EFSCORRUPTED);
 225				break;
 226			}
 227			return;
 228		case XFS_ATTR_LEAF_MAGIC:
 229		case XFS_ATTR3_LEAF_MAGIC:
 230			bp->b_ops = &xfs_attr3_leaf_buf_ops;
 231			bp->b_ops->verify_read(bp);
 232			return;
 233		case XFS_DIR2_LEAFN_MAGIC:
 234		case XFS_DIR3_LEAFN_MAGIC:
 235			bp->b_ops = &xfs_dir3_leafn_buf_ops;
 236			bp->b_ops->verify_read(bp);
 237			return;
 238		default:
 239			xfs_buf_ioerror(bp, -EFSCORRUPTED);
 240			break;
 241	}
 242
 243	/* corrupt block */
 244	xfs_verifier_error(bp);
 245}
 246
 247const struct xfs_buf_ops xfs_da3_node_buf_ops = {
 248	.name = "xfs_da3_node",
 249	.verify_read = xfs_da3_node_read_verify,
 250	.verify_write = xfs_da3_node_write_verify,
 251};
 252
 253int
 254xfs_da3_node_read(
 255	struct xfs_trans	*tp,
 256	struct xfs_inode	*dp,
 257	xfs_dablk_t		bno,
 258	xfs_daddr_t		mappedbno,
 259	struct xfs_buf		**bpp,
 260	int			which_fork)
 261{
 262	int			err;
 263
 264	err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
 265					which_fork, &xfs_da3_node_buf_ops);
 266	if (!err && tp) {
 267		struct xfs_da_blkinfo	*info = (*bpp)->b_addr;
 268		int			type;
 269
 270		switch (be16_to_cpu(info->magic)) {
 271		case XFS_DA_NODE_MAGIC:
 272		case XFS_DA3_NODE_MAGIC:
 273			type = XFS_BLFT_DA_NODE_BUF;
 274			break;
 275		case XFS_ATTR_LEAF_MAGIC:
 276		case XFS_ATTR3_LEAF_MAGIC:
 277			type = XFS_BLFT_ATTR_LEAF_BUF;
 278			break;
 279		case XFS_DIR2_LEAFN_MAGIC:
 280		case XFS_DIR3_LEAFN_MAGIC:
 281			type = XFS_BLFT_DIR_LEAFN_BUF;
 282			break;
 283		default:
 284			type = 0;
 285			ASSERT(0);
 286			break;
 287		}
 288		xfs_trans_buf_set_type(tp, *bpp, type);
 289	}
 290	return err;
 291}
 292
 293/*========================================================================
 294 * Routines used for growing the Btree.
 295 *========================================================================*/
 296
 297/*
 298 * Create the initial contents of an intermediate node.
 299 */
 300int
 301xfs_da3_node_create(
 302	struct xfs_da_args	*args,
 303	xfs_dablk_t		blkno,
 304	int			level,
 305	struct xfs_buf		**bpp,
 306	int			whichfork)
 307{
 308	struct xfs_da_intnode	*node;
 309	struct xfs_trans	*tp = args->trans;
 310	struct xfs_mount	*mp = tp->t_mountp;
 311	struct xfs_da3_icnode_hdr ichdr = {0};
 312	struct xfs_buf		*bp;
 313	int			error;
 314	struct xfs_inode	*dp = args->dp;
 315
 316	trace_xfs_da_node_create(args);
 317	ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
 318
 319	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
 320	if (error)
 321		return error;
 322	bp->b_ops = &xfs_da3_node_buf_ops;
 323	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
 324	node = bp->b_addr;
 325
 326	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 327		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 328
 329		memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
 330		ichdr.magic = XFS_DA3_NODE_MAGIC;
 331		hdr3->info.blkno = cpu_to_be64(bp->b_bn);
 332		hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
 333		uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
 334	} else {
 335		ichdr.magic = XFS_DA_NODE_MAGIC;
 336	}
 337	ichdr.level = level;
 338
 339	dp->d_ops->node_hdr_to_disk(node, &ichdr);
 340	xfs_trans_log_buf(tp, bp,
 341		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 342
 343	*bpp = bp;
 344	return 0;
 345}
 346
 347/*
 348 * Split a leaf node, rebalance, then possibly split
 349 * intermediate nodes, rebalance, etc.
 350 */
 351int							/* error */
 352xfs_da3_split(
 353	struct xfs_da_state	*state)
 354{
 355	struct xfs_da_state_blk	*oldblk;
 356	struct xfs_da_state_blk	*newblk;
 357	struct xfs_da_state_blk	*addblk;
 358	struct xfs_da_intnode	*node;
 359	struct xfs_buf		*bp;
 360	int			max;
 361	int			action = 0;
 362	int			error;
 363	int			i;
 364
 365	trace_xfs_da_split(state->args);
 366
 367	/*
 368	 * Walk back up the tree splitting/inserting/adjusting as necessary.
 369	 * If we need to insert and there isn't room, split the node, then
 370	 * decide which fragment to insert the new block from below into.
 371	 * Note that we may split the root this way, but we need more fixup.
 372	 */
 373	max = state->path.active - 1;
 374	ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
 375	ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
 376	       state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
 377
 378	addblk = &state->path.blk[max];		/* initial dummy value */
 379	for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
 380		oldblk = &state->path.blk[i];
 381		newblk = &state->altpath.blk[i];
 382
 383		/*
 384		 * If a leaf node then
 385		 *     Allocate a new leaf node, then rebalance across them.
 386		 * else if an intermediate node then
 387		 *     We split on the last layer, must we split the node?
 388		 */
 389		switch (oldblk->magic) {
 390		case XFS_ATTR_LEAF_MAGIC:
 391			error = xfs_attr3_leaf_split(state, oldblk, newblk);
 392			if ((error != 0) && (error != -ENOSPC)) {
 393				return error;	/* GROT: attr is inconsistent */
 394			}
 395			if (!error) {
 396				addblk = newblk;
 397				break;
 398			}
 399			/*
 400			 * Entry wouldn't fit, split the leaf again.
 
 
 401			 */
 402			state->extravalid = 1;
 403			if (state->inleaf) {
 404				state->extraafter = 0;	/* before newblk */
 405				trace_xfs_attr_leaf_split_before(state->args);
 406				error = xfs_attr3_leaf_split(state, oldblk,
 407							    &state->extrablk);
 408			} else {
 409				state->extraafter = 1;	/* after newblk */
 410				trace_xfs_attr_leaf_split_after(state->args);
 411				error = xfs_attr3_leaf_split(state, newblk,
 412							    &state->extrablk);
 413			}
 414			if (error)
 415				return error;	/* GROT: attr inconsistent */
 416			addblk = newblk;
 417			break;
 418		case XFS_DIR2_LEAFN_MAGIC:
 419			error = xfs_dir2_leafn_split(state, oldblk, newblk);
 420			if (error)
 421				return error;
 422			addblk = newblk;
 423			break;
 424		case XFS_DA_NODE_MAGIC:
 425			error = xfs_da3_node_split(state, oldblk, newblk, addblk,
 426							 max - i, &action);
 427			addblk->bp = NULL;
 428			if (error)
 429				return error;	/* GROT: dir is inconsistent */
 430			/*
 431			 * Record the newly split block for the next time thru?
 432			 */
 433			if (action)
 434				addblk = newblk;
 435			else
 436				addblk = NULL;
 437			break;
 438		}
 439
 440		/*
 441		 * Update the btree to show the new hashval for this child.
 442		 */
 443		xfs_da3_fixhashpath(state, &state->path);
 444	}
 445	if (!addblk)
 446		return 0;
 447
 448	/*
 
 
 
 
 
 
 
 
 449	 * Split the root node.
 450	 */
 451	ASSERT(state->path.active == 0);
 452	oldblk = &state->path.blk[0];
 453	error = xfs_da3_root_split(state, oldblk, addblk);
 454	if (error) {
 455		addblk->bp = NULL;
 456		return error;	/* GROT: dir is inconsistent */
 457	}
 458
 459	/*
 460	 * Update pointers to the node which used to be block 0 and
 461	 * just got bumped because of the addition of a new root node.
 462	 * There might be three blocks involved if a double split occurred,
 463	 * and the original block 0 could be at any position in the list.
 464	 *
 465	 * Note: the magic numbers and sibling pointers are in the same
 466	 * physical place for both v2 and v3 headers (by design). Hence it
 467	 * doesn't matter which version of the xfs_da_intnode structure we use
 468	 * here as the result will be the same using either structure.
 469	 */
 470	node = oldblk->bp->b_addr;
 471	if (node->hdr.info.forw) {
 472		if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
 473			bp = addblk->bp;
 474		} else {
 475			ASSERT(state->extravalid);
 476			bp = state->extrablk.bp;
 477		}
 478		node = bp->b_addr;
 479		node->hdr.info.back = cpu_to_be32(oldblk->blkno);
 480		xfs_trans_log_buf(state->args->trans, bp,
 481		    XFS_DA_LOGRANGE(node, &node->hdr.info,
 482		    sizeof(node->hdr.info)));
 483	}
 484	node = oldblk->bp->b_addr;
 485	if (node->hdr.info.back) {
 486		if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
 487			bp = addblk->bp;
 488		} else {
 489			ASSERT(state->extravalid);
 490			bp = state->extrablk.bp;
 491		}
 492		node = bp->b_addr;
 493		node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
 494		xfs_trans_log_buf(state->args->trans, bp,
 495		    XFS_DA_LOGRANGE(node, &node->hdr.info,
 496		    sizeof(node->hdr.info)));
 497	}
 498	addblk->bp = NULL;
 499	return 0;
 500}
 501
 502/*
 503 * Split the root.  We have to create a new root and point to the two
 504 * parts (the split old root) that we just created.  Copy block zero to
 505 * the EOF, extending the inode in process.
 506 */
 507STATIC int						/* error */
 508xfs_da3_root_split(
 509	struct xfs_da_state	*state,
 510	struct xfs_da_state_blk	*blk1,
 511	struct xfs_da_state_blk	*blk2)
 512{
 513	struct xfs_da_intnode	*node;
 514	struct xfs_da_intnode	*oldroot;
 515	struct xfs_da_node_entry *btree;
 516	struct xfs_da3_icnode_hdr nodehdr;
 517	struct xfs_da_args	*args;
 518	struct xfs_buf		*bp;
 519	struct xfs_inode	*dp;
 520	struct xfs_trans	*tp;
 521	struct xfs_dir2_leaf	*leaf;
 522	xfs_dablk_t		blkno;
 523	int			level;
 524	int			error;
 525	int			size;
 526
 527	trace_xfs_da_root_split(state->args);
 528
 529	/*
 530	 * Copy the existing (incorrect) block from the root node position
 531	 * to a free space somewhere.
 532	 */
 533	args = state->args;
 534	error = xfs_da_grow_inode(args, &blkno);
 535	if (error)
 536		return error;
 537
 538	dp = args->dp;
 539	tp = args->trans;
 540	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
 541	if (error)
 542		return error;
 543	node = bp->b_addr;
 544	oldroot = blk1->bp->b_addr;
 545	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
 546	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
 547		struct xfs_da3_icnode_hdr icnodehdr;
 548
 549		dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
 550		btree = dp->d_ops->node_tree_p(oldroot);
 551		size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
 552		level = icnodehdr.level;
 553
 554		/*
 555		 * we are about to copy oldroot to bp, so set up the type
 556		 * of bp while we know exactly what it will be.
 557		 */
 558		xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
 559	} else {
 560		struct xfs_dir3_icleaf_hdr leafhdr;
 561		struct xfs_dir2_leaf_entry *ents;
 562
 563		leaf = (xfs_dir2_leaf_t *)oldroot;
 564		dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 565		ents = dp->d_ops->leaf_ents_p(leaf);
 566
 567		ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
 568		       leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
 569		size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
 570		level = 0;
 571
 572		/*
 573		 * we are about to copy oldroot to bp, so set up the type
 574		 * of bp while we know exactly what it will be.
 575		 */
 576		xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
 577	}
 578
 579	/*
 580	 * we can copy most of the information in the node from one block to
 581	 * another, but for CRC enabled headers we have to make sure that the
 582	 * block specific identifiers are kept intact. We update the buffer
 583	 * directly for this.
 584	 */
 585	memcpy(node, oldroot, size);
 586	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
 587	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
 588		struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
 589
 590		node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
 591	}
 592	xfs_trans_log_buf(tp, bp, 0, size - 1);
 593
 594	bp->b_ops = blk1->bp->b_ops;
 595	xfs_trans_buf_copy_type(bp, blk1->bp);
 596	blk1->bp = bp;
 597	blk1->blkno = blkno;
 598
 599	/*
 600	 * Set up the new root node.
 601	 */
 602	error = xfs_da3_node_create(args,
 603		(args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
 604		level + 1, &bp, args->whichfork);
 605	if (error)
 606		return error;
 607
 608	node = bp->b_addr;
 609	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 610	btree = dp->d_ops->node_tree_p(node);
 611	btree[0].hashval = cpu_to_be32(blk1->hashval);
 612	btree[0].before = cpu_to_be32(blk1->blkno);
 613	btree[1].hashval = cpu_to_be32(blk2->hashval);
 614	btree[1].before = cpu_to_be32(blk2->blkno);
 615	nodehdr.count = 2;
 616	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 617
 618#ifdef DEBUG
 619	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
 620	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
 621		ASSERT(blk1->blkno >= args->geo->leafblk &&
 622		       blk1->blkno < args->geo->freeblk);
 623		ASSERT(blk2->blkno >= args->geo->leafblk &&
 624		       blk2->blkno < args->geo->freeblk);
 625	}
 626#endif
 627
 628	/* Header is already logged by xfs_da_node_create */
 629	xfs_trans_log_buf(tp, bp,
 630		XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
 631
 632	return 0;
 633}
 634
 635/*
 636 * Split the node, rebalance, then add the new entry.
 637 */
 638STATIC int						/* error */
 639xfs_da3_node_split(
 640	struct xfs_da_state	*state,
 641	struct xfs_da_state_blk	*oldblk,
 642	struct xfs_da_state_blk	*newblk,
 643	struct xfs_da_state_blk	*addblk,
 644	int			treelevel,
 645	int			*result)
 646{
 647	struct xfs_da_intnode	*node;
 648	struct xfs_da3_icnode_hdr nodehdr;
 649	xfs_dablk_t		blkno;
 650	int			newcount;
 651	int			error;
 652	int			useextra;
 653	struct xfs_inode	*dp = state->args->dp;
 654
 655	trace_xfs_da_node_split(state->args);
 656
 657	node = oldblk->bp->b_addr;
 658	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 659
 660	/*
 661	 * With V2 dirs the extra block is data or freespace.
 662	 */
 663	useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
 664	newcount = 1 + useextra;
 665	/*
 666	 * Do we have to split the node?
 667	 */
 668	if (nodehdr.count + newcount > state->args->geo->node_ents) {
 669		/*
 670		 * Allocate a new node, add to the doubly linked chain of
 671		 * nodes, then move some of our excess entries into it.
 672		 */
 673		error = xfs_da_grow_inode(state->args, &blkno);
 674		if (error)
 675			return error;	/* GROT: dir is inconsistent */
 676
 677		error = xfs_da3_node_create(state->args, blkno, treelevel,
 678					   &newblk->bp, state->args->whichfork);
 679		if (error)
 680			return error;	/* GROT: dir is inconsistent */
 681		newblk->blkno = blkno;
 682		newblk->magic = XFS_DA_NODE_MAGIC;
 683		xfs_da3_node_rebalance(state, oldblk, newblk);
 684		error = xfs_da3_blk_link(state, oldblk, newblk);
 685		if (error)
 686			return error;
 687		*result = 1;
 688	} else {
 689		*result = 0;
 690	}
 691
 692	/*
 693	 * Insert the new entry(s) into the correct block
 694	 * (updating last hashval in the process).
 695	 *
 696	 * xfs_da3_node_add() inserts BEFORE the given index,
 697	 * and as a result of using node_lookup_int() we always
 698	 * point to a valid entry (not after one), but a split
 699	 * operation always results in a new block whose hashvals
 700	 * FOLLOW the current block.
 701	 *
 702	 * If we had double-split op below us, then add the extra block too.
 703	 */
 704	node = oldblk->bp->b_addr;
 705	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 706	if (oldblk->index <= nodehdr.count) {
 707		oldblk->index++;
 708		xfs_da3_node_add(state, oldblk, addblk);
 709		if (useextra) {
 710			if (state->extraafter)
 711				oldblk->index++;
 712			xfs_da3_node_add(state, oldblk, &state->extrablk);
 713			state->extravalid = 0;
 714		}
 715	} else {
 716		newblk->index++;
 717		xfs_da3_node_add(state, newblk, addblk);
 718		if (useextra) {
 719			if (state->extraafter)
 720				newblk->index++;
 721			xfs_da3_node_add(state, newblk, &state->extrablk);
 722			state->extravalid = 0;
 723		}
 724	}
 725
 726	return 0;
 727}
 728
 729/*
 730 * Balance the btree elements between two intermediate nodes,
 731 * usually one full and one empty.
 732 *
 733 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
 734 */
 735STATIC void
 736xfs_da3_node_rebalance(
 737	struct xfs_da_state	*state,
 738	struct xfs_da_state_blk	*blk1,
 739	struct xfs_da_state_blk	*blk2)
 740{
 741	struct xfs_da_intnode	*node1;
 742	struct xfs_da_intnode	*node2;
 743	struct xfs_da_intnode	*tmpnode;
 744	struct xfs_da_node_entry *btree1;
 745	struct xfs_da_node_entry *btree2;
 746	struct xfs_da_node_entry *btree_s;
 747	struct xfs_da_node_entry *btree_d;
 748	struct xfs_da3_icnode_hdr nodehdr1;
 749	struct xfs_da3_icnode_hdr nodehdr2;
 750	struct xfs_trans	*tp;
 751	int			count;
 752	int			tmp;
 753	int			swap = 0;
 754	struct xfs_inode	*dp = state->args->dp;
 755
 756	trace_xfs_da_node_rebalance(state->args);
 757
 758	node1 = blk1->bp->b_addr;
 759	node2 = blk2->bp->b_addr;
 760	dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 761	dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 762	btree1 = dp->d_ops->node_tree_p(node1);
 763	btree2 = dp->d_ops->node_tree_p(node2);
 764
 765	/*
 766	 * Figure out how many entries need to move, and in which direction.
 767	 * Swap the nodes around if that makes it simpler.
 768	 */
 769	if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
 770	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
 771	     (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
 772			be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
 773		tmpnode = node1;
 774		node1 = node2;
 775		node2 = tmpnode;
 776		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 777		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 778		btree1 = dp->d_ops->node_tree_p(node1);
 779		btree2 = dp->d_ops->node_tree_p(node2);
 780		swap = 1;
 781	}
 782
 783	count = (nodehdr1.count - nodehdr2.count) / 2;
 784	if (count == 0)
 785		return;
 786	tp = state->args->trans;
 787	/*
 788	 * Two cases: high-to-low and low-to-high.
 789	 */
 790	if (count > 0) {
 791		/*
 792		 * Move elements in node2 up to make a hole.
 793		 */
 794		tmp = nodehdr2.count;
 795		if (tmp > 0) {
 796			tmp *= (uint)sizeof(xfs_da_node_entry_t);
 797			btree_s = &btree2[0];
 798			btree_d = &btree2[count];
 799			memmove(btree_d, btree_s, tmp);
 800		}
 801
 802		/*
 803		 * Move the req'd B-tree elements from high in node1 to
 804		 * low in node2.
 805		 */
 806		nodehdr2.count += count;
 807		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 808		btree_s = &btree1[nodehdr1.count - count];
 809		btree_d = &btree2[0];
 810		memcpy(btree_d, btree_s, tmp);
 811		nodehdr1.count -= count;
 812	} else {
 813		/*
 814		 * Move the req'd B-tree elements from low in node2 to
 815		 * high in node1.
 816		 */
 817		count = -count;
 818		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 819		btree_s = &btree2[0];
 820		btree_d = &btree1[nodehdr1.count];
 821		memcpy(btree_d, btree_s, tmp);
 822		nodehdr1.count += count;
 823
 824		xfs_trans_log_buf(tp, blk1->bp,
 825			XFS_DA_LOGRANGE(node1, btree_d, tmp));
 826
 827		/*
 828		 * Move elements in node2 down to fill the hole.
 829		 */
 830		tmp  = nodehdr2.count - count;
 831		tmp *= (uint)sizeof(xfs_da_node_entry_t);
 832		btree_s = &btree2[count];
 833		btree_d = &btree2[0];
 834		memmove(btree_d, btree_s, tmp);
 835		nodehdr2.count -= count;
 836	}
 837
 838	/*
 839	 * Log header of node 1 and all current bits of node 2.
 840	 */
 841	dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
 842	xfs_trans_log_buf(tp, blk1->bp,
 843		XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
 844
 845	dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
 846	xfs_trans_log_buf(tp, blk2->bp,
 847		XFS_DA_LOGRANGE(node2, &node2->hdr,
 848				dp->d_ops->node_hdr_size +
 849				(sizeof(btree2[0]) * nodehdr2.count)));
 850
 851	/*
 852	 * Record the last hashval from each block for upward propagation.
 853	 * (note: don't use the swapped node pointers)
 854	 */
 855	if (swap) {
 856		node1 = blk1->bp->b_addr;
 857		node2 = blk2->bp->b_addr;
 858		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 859		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 860		btree1 = dp->d_ops->node_tree_p(node1);
 861		btree2 = dp->d_ops->node_tree_p(node2);
 862	}
 863	blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
 864	blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
 865
 866	/*
 867	 * Adjust the expected index for insertion.
 868	 */
 869	if (blk1->index >= nodehdr1.count) {
 870		blk2->index = blk1->index - nodehdr1.count;
 871		blk1->index = nodehdr1.count + 1;	/* make it invalid */
 872	}
 873}
 874
 875/*
 876 * Add a new entry to an intermediate node.
 877 */
 878STATIC void
 879xfs_da3_node_add(
 880	struct xfs_da_state	*state,
 881	struct xfs_da_state_blk	*oldblk,
 882	struct xfs_da_state_blk	*newblk)
 883{
 884	struct xfs_da_intnode	*node;
 885	struct xfs_da3_icnode_hdr nodehdr;
 886	struct xfs_da_node_entry *btree;
 887	int			tmp;
 888	struct xfs_inode	*dp = state->args->dp;
 889
 890	trace_xfs_da_node_add(state->args);
 891
 892	node = oldblk->bp->b_addr;
 893	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 894	btree = dp->d_ops->node_tree_p(node);
 895
 896	ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
 897	ASSERT(newblk->blkno != 0);
 898	if (state->args->whichfork == XFS_DATA_FORK)
 899		ASSERT(newblk->blkno >= state->args->geo->leafblk &&
 900		       newblk->blkno < state->args->geo->freeblk);
 901
 902	/*
 903	 * We may need to make some room before we insert the new node.
 904	 */
 905	tmp = 0;
 906	if (oldblk->index < nodehdr.count) {
 907		tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
 908		memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
 909	}
 910	btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
 911	btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
 912	xfs_trans_log_buf(state->args->trans, oldblk->bp,
 913		XFS_DA_LOGRANGE(node, &btree[oldblk->index],
 914				tmp + sizeof(*btree)));
 915
 916	nodehdr.count += 1;
 917	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 918	xfs_trans_log_buf(state->args->trans, oldblk->bp,
 919		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 920
 921	/*
 922	 * Copy the last hash value from the oldblk to propagate upwards.
 923	 */
 924	oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
 925}
 926
 927/*========================================================================
 928 * Routines used for shrinking the Btree.
 929 *========================================================================*/
 930
 931/*
 932 * Deallocate an empty leaf node, remove it from its parent,
 933 * possibly deallocating that block, etc...
 934 */
 935int
 936xfs_da3_join(
 937	struct xfs_da_state	*state)
 938{
 939	struct xfs_da_state_blk	*drop_blk;
 940	struct xfs_da_state_blk	*save_blk;
 941	int			action = 0;
 942	int			error;
 943
 944	trace_xfs_da_join(state->args);
 945
 946	drop_blk = &state->path.blk[ state->path.active-1 ];
 947	save_blk = &state->altpath.blk[ state->path.active-1 ];
 948	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
 949	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
 950	       drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
 951
 952	/*
 953	 * Walk back up the tree joining/deallocating as necessary.
 954	 * When we stop dropping blocks, break out.
 955	 */
 956	for (  ; state->path.active >= 2; drop_blk--, save_blk--,
 957		 state->path.active--) {
 958		/*
 959		 * See if we can combine the block with a neighbor.
 960		 *   (action == 0) => no options, just leave
 961		 *   (action == 1) => coalesce, then unlink
 962		 *   (action == 2) => block empty, unlink it
 963		 */
 964		switch (drop_blk->magic) {
 965		case XFS_ATTR_LEAF_MAGIC:
 966			error = xfs_attr3_leaf_toosmall(state, &action);
 967			if (error)
 968				return error;
 969			if (action == 0)
 970				return 0;
 971			xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
 972			break;
 973		case XFS_DIR2_LEAFN_MAGIC:
 974			error = xfs_dir2_leafn_toosmall(state, &action);
 975			if (error)
 976				return error;
 977			if (action == 0)
 978				return 0;
 979			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
 980			break;
 981		case XFS_DA_NODE_MAGIC:
 982			/*
 983			 * Remove the offending node, fixup hashvals,
 984			 * check for a toosmall neighbor.
 985			 */
 986			xfs_da3_node_remove(state, drop_blk);
 987			xfs_da3_fixhashpath(state, &state->path);
 988			error = xfs_da3_node_toosmall(state, &action);
 989			if (error)
 990				return error;
 991			if (action == 0)
 992				return 0;
 993			xfs_da3_node_unbalance(state, drop_blk, save_blk);
 994			break;
 995		}
 996		xfs_da3_fixhashpath(state, &state->altpath);
 997		error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
 998		xfs_da_state_kill_altpath(state);
 999		if (error)
1000			return error;
1001		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1002							 drop_blk->bp);
1003		drop_blk->bp = NULL;
1004		if (error)
1005			return error;
1006	}
1007	/*
1008	 * We joined all the way to the top.  If it turns out that
1009	 * we only have one entry in the root, make the child block
1010	 * the new root.
1011	 */
1012	xfs_da3_node_remove(state, drop_blk);
1013	xfs_da3_fixhashpath(state, &state->path);
1014	error = xfs_da3_root_join(state, &state->path.blk[0]);
1015	return error;
1016}
1017
1018#ifdef	DEBUG
1019static void
1020xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1021{
1022	__be16	magic = blkinfo->magic;
1023
1024	if (level == 1) {
1025		ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1026		       magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1027		       magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1028		       magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1029	} else {
1030		ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1031		       magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1032	}
1033	ASSERT(!blkinfo->forw);
1034	ASSERT(!blkinfo->back);
1035}
1036#else	/* !DEBUG */
1037#define	xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1038#endif	/* !DEBUG */
1039
1040/*
1041 * We have only one entry in the root.  Copy the only remaining child of
1042 * the old root to block 0 as the new root node.
1043 */
1044STATIC int
1045xfs_da3_root_join(
1046	struct xfs_da_state	*state,
1047	struct xfs_da_state_blk	*root_blk)
1048{
1049	struct xfs_da_intnode	*oldroot;
1050	struct xfs_da_args	*args;
1051	xfs_dablk_t		child;
1052	struct xfs_buf		*bp;
1053	struct xfs_da3_icnode_hdr oldroothdr;
1054	struct xfs_da_node_entry *btree;
1055	int			error;
1056	struct xfs_inode	*dp = state->args->dp;
1057
1058	trace_xfs_da_root_join(state->args);
1059
1060	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1061
1062	args = state->args;
1063	oldroot = root_blk->bp->b_addr;
1064	dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1065	ASSERT(oldroothdr.forw == 0);
1066	ASSERT(oldroothdr.back == 0);
1067
1068	/*
1069	 * If the root has more than one child, then don't do anything.
1070	 */
1071	if (oldroothdr.count > 1)
1072		return 0;
1073
1074	/*
1075	 * Read in the (only) child block, then copy those bytes into
1076	 * the root block's buffer and free the original child block.
1077	 */
1078	btree = dp->d_ops->node_tree_p(oldroot);
1079	child = be32_to_cpu(btree[0].before);
1080	ASSERT(child != 0);
1081	error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1082					     args->whichfork);
1083	if (error)
1084		return error;
1085	xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1086
1087	/*
1088	 * This could be copying a leaf back into the root block in the case of
1089	 * there only being a single leaf block left in the tree. Hence we have
1090	 * to update the b_ops pointer as well to match the buffer type change
1091	 * that could occur. For dir3 blocks we also need to update the block
1092	 * number in the buffer header.
1093	 */
1094	memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1095	root_blk->bp->b_ops = bp->b_ops;
1096	xfs_trans_buf_copy_type(root_blk->bp, bp);
1097	if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1098		struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1099		da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1100	}
1101	xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1102			  args->geo->blksize - 1);
1103	error = xfs_da_shrink_inode(args, child, bp);
1104	return error;
1105}
1106
1107/*
1108 * Check a node block and its neighbors to see if the block should be
1109 * collapsed into one or the other neighbor.  Always keep the block
1110 * with the smaller block number.
1111 * If the current block is over 50% full, don't try to join it, return 0.
1112 * If the block is empty, fill in the state structure and return 2.
1113 * If it can be collapsed, fill in the state structure and return 1.
1114 * If nothing can be done, return 0.
1115 */
1116STATIC int
1117xfs_da3_node_toosmall(
1118	struct xfs_da_state	*state,
1119	int			*action)
1120{
1121	struct xfs_da_intnode	*node;
1122	struct xfs_da_state_blk	*blk;
1123	struct xfs_da_blkinfo	*info;
1124	xfs_dablk_t		blkno;
1125	struct xfs_buf		*bp;
1126	struct xfs_da3_icnode_hdr nodehdr;
1127	int			count;
1128	int			forward;
1129	int			error;
1130	int			retval;
1131	int			i;
1132	struct xfs_inode	*dp = state->args->dp;
1133
1134	trace_xfs_da_node_toosmall(state->args);
1135
1136	/*
1137	 * Check for the degenerate case of the block being over 50% full.
1138	 * If so, it's not worth even looking to see if we might be able
1139	 * to coalesce with a sibling.
1140	 */
1141	blk = &state->path.blk[ state->path.active-1 ];
1142	info = blk->bp->b_addr;
1143	node = (xfs_da_intnode_t *)info;
1144	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1145	if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1146		*action = 0;	/* blk over 50%, don't try to join */
1147		return 0;	/* blk over 50%, don't try to join */
1148	}
1149
1150	/*
1151	 * Check for the degenerate case of the block being empty.
1152	 * If the block is empty, we'll simply delete it, no need to
1153	 * coalesce it with a sibling block.  We choose (arbitrarily)
1154	 * to merge with the forward block unless it is NULL.
1155	 */
1156	if (nodehdr.count == 0) {
1157		/*
1158		 * Make altpath point to the block we want to keep and
1159		 * path point to the block we want to drop (this one).
1160		 */
1161		forward = (info->forw != 0);
1162		memcpy(&state->altpath, &state->path, sizeof(state->path));
1163		error = xfs_da3_path_shift(state, &state->altpath, forward,
1164						 0, &retval);
1165		if (error)
1166			return error;
1167		if (retval) {
1168			*action = 0;
1169		} else {
1170			*action = 2;
1171		}
1172		return 0;
1173	}
1174
1175	/*
1176	 * Examine each sibling block to see if we can coalesce with
1177	 * at least 25% free space to spare.  We need to figure out
1178	 * whether to merge with the forward or the backward block.
1179	 * We prefer coalescing with the lower numbered sibling so as
1180	 * to shrink a directory over time.
1181	 */
1182	count  = state->args->geo->node_ents;
1183	count -= state->args->geo->node_ents >> 2;
1184	count -= nodehdr.count;
1185
1186	/* start with smaller blk num */
1187	forward = nodehdr.forw < nodehdr.back;
1188	for (i = 0; i < 2; forward = !forward, i++) {
1189		struct xfs_da3_icnode_hdr thdr;
1190		if (forward)
1191			blkno = nodehdr.forw;
1192		else
1193			blkno = nodehdr.back;
1194		if (blkno == 0)
1195			continue;
1196		error = xfs_da3_node_read(state->args->trans, dp,
1197					blkno, -1, &bp, state->args->whichfork);
1198		if (error)
1199			return error;
1200
1201		node = bp->b_addr;
1202		dp->d_ops->node_hdr_from_disk(&thdr, node);
1203		xfs_trans_brelse(state->args->trans, bp);
1204
1205		if (count - thdr.count >= 0)
1206			break;	/* fits with at least 25% to spare */
1207	}
1208	if (i >= 2) {
1209		*action = 0;
1210		return 0;
1211	}
1212
1213	/*
1214	 * Make altpath point to the block we want to keep (the lower
1215	 * numbered block) and path point to the block we want to drop.
1216	 */
1217	memcpy(&state->altpath, &state->path, sizeof(state->path));
1218	if (blkno < blk->blkno) {
1219		error = xfs_da3_path_shift(state, &state->altpath, forward,
1220						 0, &retval);
1221	} else {
1222		error = xfs_da3_path_shift(state, &state->path, forward,
1223						 0, &retval);
1224	}
1225	if (error)
1226		return error;
1227	if (retval) {
1228		*action = 0;
1229		return 0;
1230	}
1231	*action = 1;
1232	return 0;
1233}
1234
1235/*
1236 * Pick up the last hashvalue from an intermediate node.
1237 */
1238STATIC uint
1239xfs_da3_node_lasthash(
1240	struct xfs_inode	*dp,
1241	struct xfs_buf		*bp,
1242	int			*count)
1243{
1244	struct xfs_da_intnode	 *node;
1245	struct xfs_da_node_entry *btree;
1246	struct xfs_da3_icnode_hdr nodehdr;
1247
1248	node = bp->b_addr;
1249	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1250	if (count)
1251		*count = nodehdr.count;
1252	if (!nodehdr.count)
1253		return 0;
1254	btree = dp->d_ops->node_tree_p(node);
1255	return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1256}
1257
1258/*
1259 * Walk back up the tree adjusting hash values as necessary,
1260 * when we stop making changes, return.
1261 */
1262void
1263xfs_da3_fixhashpath(
1264	struct xfs_da_state	*state,
1265	struct xfs_da_state_path *path)
1266{
1267	struct xfs_da_state_blk	*blk;
1268	struct xfs_da_intnode	*node;
1269	struct xfs_da_node_entry *btree;
1270	xfs_dahash_t		lasthash=0;
1271	int			level;
1272	int			count;
1273	struct xfs_inode	*dp = state->args->dp;
1274
1275	trace_xfs_da_fixhashpath(state->args);
1276
1277	level = path->active-1;
1278	blk = &path->blk[ level ];
1279	switch (blk->magic) {
1280	case XFS_ATTR_LEAF_MAGIC:
1281		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1282		if (count == 0)
1283			return;
1284		break;
1285	case XFS_DIR2_LEAFN_MAGIC:
1286		lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
1287		if (count == 0)
1288			return;
1289		break;
1290	case XFS_DA_NODE_MAGIC:
1291		lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1292		if (count == 0)
1293			return;
1294		break;
1295	}
1296	for (blk--, level--; level >= 0; blk--, level--) {
1297		struct xfs_da3_icnode_hdr nodehdr;
1298
1299		node = blk->bp->b_addr;
1300		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1301		btree = dp->d_ops->node_tree_p(node);
1302		if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1303			break;
1304		blk->hashval = lasthash;
1305		btree[blk->index].hashval = cpu_to_be32(lasthash);
1306		xfs_trans_log_buf(state->args->trans, blk->bp,
1307				  XFS_DA_LOGRANGE(node, &btree[blk->index],
1308						  sizeof(*btree)));
1309
1310		lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1311	}
1312}
1313
1314/*
1315 * Remove an entry from an intermediate node.
1316 */
1317STATIC void
1318xfs_da3_node_remove(
1319	struct xfs_da_state	*state,
1320	struct xfs_da_state_blk	*drop_blk)
1321{
1322	struct xfs_da_intnode	*node;
1323	struct xfs_da3_icnode_hdr nodehdr;
1324	struct xfs_da_node_entry *btree;
1325	int			index;
1326	int			tmp;
1327	struct xfs_inode	*dp = state->args->dp;
1328
1329	trace_xfs_da_node_remove(state->args);
1330
1331	node = drop_blk->bp->b_addr;
1332	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1333	ASSERT(drop_blk->index < nodehdr.count);
1334	ASSERT(drop_blk->index >= 0);
1335
1336	/*
1337	 * Copy over the offending entry, or just zero it out.
1338	 */
1339	index = drop_blk->index;
1340	btree = dp->d_ops->node_tree_p(node);
1341	if (index < nodehdr.count - 1) {
1342		tmp  = nodehdr.count - index - 1;
1343		tmp *= (uint)sizeof(xfs_da_node_entry_t);
1344		memmove(&btree[index], &btree[index + 1], tmp);
1345		xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1346		    XFS_DA_LOGRANGE(node, &btree[index], tmp));
1347		index = nodehdr.count - 1;
1348	}
1349	memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1350	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1351	    XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1352	nodehdr.count -= 1;
1353	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1354	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1355	    XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1356
1357	/*
1358	 * Copy the last hash value from the block to propagate upwards.
1359	 */
1360	drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1361}
1362
1363/*
1364 * Unbalance the elements between two intermediate nodes,
1365 * move all Btree elements from one node into another.
1366 */
1367STATIC void
1368xfs_da3_node_unbalance(
1369	struct xfs_da_state	*state,
1370	struct xfs_da_state_blk	*drop_blk,
1371	struct xfs_da_state_blk	*save_blk)
1372{
1373	struct xfs_da_intnode	*drop_node;
1374	struct xfs_da_intnode	*save_node;
1375	struct xfs_da_node_entry *drop_btree;
1376	struct xfs_da_node_entry *save_btree;
1377	struct xfs_da3_icnode_hdr drop_hdr;
1378	struct xfs_da3_icnode_hdr save_hdr;
1379	struct xfs_trans	*tp;
1380	int			sindex;
1381	int			tmp;
1382	struct xfs_inode	*dp = state->args->dp;
1383
1384	trace_xfs_da_node_unbalance(state->args);
1385
1386	drop_node = drop_blk->bp->b_addr;
1387	save_node = save_blk->bp->b_addr;
1388	dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1389	dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1390	drop_btree = dp->d_ops->node_tree_p(drop_node);
1391	save_btree = dp->d_ops->node_tree_p(save_node);
1392	tp = state->args->trans;
1393
1394	/*
1395	 * If the dying block has lower hashvals, then move all the
1396	 * elements in the remaining block up to make a hole.
1397	 */
1398	if ((be32_to_cpu(drop_btree[0].hashval) <
1399			be32_to_cpu(save_btree[0].hashval)) ||
1400	    (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1401			be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1402		/* XXX: check this - is memmove dst correct? */
1403		tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1404		memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1405
1406		sindex = 0;
1407		xfs_trans_log_buf(tp, save_blk->bp,
1408			XFS_DA_LOGRANGE(save_node, &save_btree[0],
1409				(save_hdr.count + drop_hdr.count) *
1410						sizeof(xfs_da_node_entry_t)));
1411	} else {
1412		sindex = save_hdr.count;
1413		xfs_trans_log_buf(tp, save_blk->bp,
1414			XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1415				drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1416	}
1417
1418	/*
1419	 * Move all the B-tree elements from drop_blk to save_blk.
1420	 */
1421	tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1422	memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1423	save_hdr.count += drop_hdr.count;
1424
1425	dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1426	xfs_trans_log_buf(tp, save_blk->bp,
1427		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1428				dp->d_ops->node_hdr_size));
1429
1430	/*
1431	 * Save the last hashval in the remaining block for upward propagation.
1432	 */
1433	save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1434}
1435
1436/*========================================================================
1437 * Routines used for finding things in the Btree.
1438 *========================================================================*/
1439
1440/*
1441 * Walk down the Btree looking for a particular filename, filling
1442 * in the state structure as we go.
1443 *
1444 * We will set the state structure to point to each of the elements
1445 * in each of the nodes where either the hashval is or should be.
1446 *
1447 * We support duplicate hashval's so for each entry in the current
1448 * node that could contain the desired hashval, descend.  This is a
1449 * pruned depth-first tree search.
1450 */
1451int							/* error */
1452xfs_da3_node_lookup_int(
1453	struct xfs_da_state	*state,
1454	int			*result)
1455{
1456	struct xfs_da_state_blk	*blk;
1457	struct xfs_da_blkinfo	*curr;
1458	struct xfs_da_intnode	*node;
1459	struct xfs_da_node_entry *btree;
1460	struct xfs_da3_icnode_hdr nodehdr;
1461	struct xfs_da_args	*args;
1462	xfs_dablk_t		blkno;
1463	xfs_dahash_t		hashval;
1464	xfs_dahash_t		btreehashval;
1465	int			probe;
1466	int			span;
1467	int			max;
1468	int			error;
1469	int			retval;
1470	struct xfs_inode	*dp = state->args->dp;
1471
1472	args = state->args;
1473
1474	/*
1475	 * Descend thru the B-tree searching each level for the right
1476	 * node to use, until the right hashval is found.
1477	 */
1478	blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0;
1479	for (blk = &state->path.blk[0], state->path.active = 1;
1480			 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1481			 blk++, state->path.active++) {
1482		/*
1483		 * Read the next node down in the tree.
1484		 */
1485		blk->blkno = blkno;
1486		error = xfs_da3_node_read(args->trans, args->dp, blkno,
1487					-1, &blk->bp, args->whichfork);
1488		if (error) {
1489			blk->blkno = 0;
1490			state->path.active--;
1491			return error;
1492		}
1493		curr = blk->bp->b_addr;
1494		blk->magic = be16_to_cpu(curr->magic);
1495
1496		if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1497		    blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1498			blk->magic = XFS_ATTR_LEAF_MAGIC;
1499			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1500			break;
1501		}
1502
1503		if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1504		    blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1505			blk->magic = XFS_DIR2_LEAFN_MAGIC;
1506			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1507							       blk->bp, NULL);
1508			break;
1509		}
1510
1511		blk->magic = XFS_DA_NODE_MAGIC;
1512
1513
1514		/*
1515		 * Search an intermediate node for a match.
1516		 */
1517		node = blk->bp->b_addr;
1518		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1519		btree = dp->d_ops->node_tree_p(node);
1520
1521		max = nodehdr.count;
1522		blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1523
1524		/*
1525		 * Binary search.  (note: small blocks will skip loop)
1526		 */
1527		probe = span = max / 2;
1528		hashval = args->hashval;
1529		while (span > 4) {
1530			span /= 2;
1531			btreehashval = be32_to_cpu(btree[probe].hashval);
1532			if (btreehashval < hashval)
1533				probe += span;
1534			else if (btreehashval > hashval)
1535				probe -= span;
1536			else
1537				break;
1538		}
1539		ASSERT((probe >= 0) && (probe < max));
1540		ASSERT((span <= 4) ||
1541			(be32_to_cpu(btree[probe].hashval) == hashval));
1542
1543		/*
1544		 * Since we may have duplicate hashval's, find the first
1545		 * matching hashval in the node.
1546		 */
1547		while (probe > 0 &&
1548		       be32_to_cpu(btree[probe].hashval) >= hashval) {
1549			probe--;
1550		}
1551		while (probe < max &&
1552		       be32_to_cpu(btree[probe].hashval) < hashval) {
1553			probe++;
1554		}
1555
1556		/*
1557		 * Pick the right block to descend on.
1558		 */
1559		if (probe == max) {
1560			blk->index = max - 1;
1561			blkno = be32_to_cpu(btree[max - 1].before);
1562		} else {
1563			blk->index = probe;
1564			blkno = be32_to_cpu(btree[probe].before);
1565		}
1566	}
1567
1568	/*
1569	 * A leaf block that ends in the hashval that we are interested in
1570	 * (final hashval == search hashval) means that the next block may
1571	 * contain more entries with the same hashval, shift upward to the
1572	 * next leaf and keep searching.
1573	 */
1574	for (;;) {
1575		if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1576			retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1577							&blk->index, state);
1578		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1579			retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1580			blk->index = args->index;
1581			args->blkno = blk->blkno;
1582		} else {
1583			ASSERT(0);
1584			return -EFSCORRUPTED;
1585		}
1586		if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1587		    (blk->hashval == args->hashval)) {
1588			error = xfs_da3_path_shift(state, &state->path, 1, 1,
1589							 &retval);
1590			if (error)
1591				return error;
1592			if (retval == 0) {
1593				continue;
1594			} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1595				/* path_shift() gives ENOENT */
1596				retval = -ENOATTR;
1597			}
1598		}
1599		break;
1600	}
1601	*result = retval;
1602	return 0;
1603}
1604
1605/*========================================================================
1606 * Utility routines.
1607 *========================================================================*/
1608
1609/*
1610 * Compare two intermediate nodes for "order".
1611 */
1612STATIC int
1613xfs_da3_node_order(
1614	struct xfs_inode *dp,
1615	struct xfs_buf	*node1_bp,
1616	struct xfs_buf	*node2_bp)
1617{
1618	struct xfs_da_intnode	*node1;
1619	struct xfs_da_intnode	*node2;
1620	struct xfs_da_node_entry *btree1;
1621	struct xfs_da_node_entry *btree2;
1622	struct xfs_da3_icnode_hdr node1hdr;
1623	struct xfs_da3_icnode_hdr node2hdr;
1624
1625	node1 = node1_bp->b_addr;
1626	node2 = node2_bp->b_addr;
1627	dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1628	dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1629	btree1 = dp->d_ops->node_tree_p(node1);
1630	btree2 = dp->d_ops->node_tree_p(node2);
1631
1632	if (node1hdr.count > 0 && node2hdr.count > 0 &&
1633	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1634	     (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1635	      be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1636		return 1;
1637	}
1638	return 0;
1639}
1640
1641/*
1642 * Link a new block into a doubly linked list of blocks (of whatever type).
1643 */
1644int							/* error */
1645xfs_da3_blk_link(
1646	struct xfs_da_state	*state,
1647	struct xfs_da_state_blk	*old_blk,
1648	struct xfs_da_state_blk	*new_blk)
1649{
1650	struct xfs_da_blkinfo	*old_info;
1651	struct xfs_da_blkinfo	*new_info;
1652	struct xfs_da_blkinfo	*tmp_info;
1653	struct xfs_da_args	*args;
1654	struct xfs_buf		*bp;
1655	int			before = 0;
1656	int			error;
1657	struct xfs_inode	*dp = state->args->dp;
1658
1659	/*
1660	 * Set up environment.
1661	 */
1662	args = state->args;
1663	ASSERT(args != NULL);
1664	old_info = old_blk->bp->b_addr;
1665	new_info = new_blk->bp->b_addr;
1666	ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1667	       old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1668	       old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1669
1670	switch (old_blk->magic) {
1671	case XFS_ATTR_LEAF_MAGIC:
1672		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1673		break;
1674	case XFS_DIR2_LEAFN_MAGIC:
1675		before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1676		break;
1677	case XFS_DA_NODE_MAGIC:
1678		before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1679		break;
1680	}
1681
1682	/*
1683	 * Link blocks in appropriate order.
1684	 */
1685	if (before) {
1686		/*
1687		 * Link new block in before existing block.
1688		 */
1689		trace_xfs_da_link_before(args);
1690		new_info->forw = cpu_to_be32(old_blk->blkno);
1691		new_info->back = old_info->back;
1692		if (old_info->back) {
1693			error = xfs_da3_node_read(args->trans, dp,
1694						be32_to_cpu(old_info->back),
1695						-1, &bp, args->whichfork);
1696			if (error)
1697				return error;
1698			ASSERT(bp != NULL);
1699			tmp_info = bp->b_addr;
1700			ASSERT(tmp_info->magic == old_info->magic);
1701			ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1702			tmp_info->forw = cpu_to_be32(new_blk->blkno);
1703			xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1704		}
1705		old_info->back = cpu_to_be32(new_blk->blkno);
1706	} else {
1707		/*
1708		 * Link new block in after existing block.
1709		 */
1710		trace_xfs_da_link_after(args);
1711		new_info->forw = old_info->forw;
1712		new_info->back = cpu_to_be32(old_blk->blkno);
1713		if (old_info->forw) {
1714			error = xfs_da3_node_read(args->trans, dp,
1715						be32_to_cpu(old_info->forw),
1716						-1, &bp, args->whichfork);
1717			if (error)
1718				return error;
1719			ASSERT(bp != NULL);
1720			tmp_info = bp->b_addr;
1721			ASSERT(tmp_info->magic == old_info->magic);
1722			ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1723			tmp_info->back = cpu_to_be32(new_blk->blkno);
1724			xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1725		}
1726		old_info->forw = cpu_to_be32(new_blk->blkno);
1727	}
1728
1729	xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1730	xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1731	return 0;
1732}
1733
1734/*
1735 * Unlink a block from a doubly linked list of blocks.
1736 */
1737STATIC int						/* error */
1738xfs_da3_blk_unlink(
1739	struct xfs_da_state	*state,
1740	struct xfs_da_state_blk	*drop_blk,
1741	struct xfs_da_state_blk	*save_blk)
1742{
1743	struct xfs_da_blkinfo	*drop_info;
1744	struct xfs_da_blkinfo	*save_info;
1745	struct xfs_da_blkinfo	*tmp_info;
1746	struct xfs_da_args	*args;
1747	struct xfs_buf		*bp;
1748	int			error;
1749
1750	/*
1751	 * Set up environment.
1752	 */
1753	args = state->args;
1754	ASSERT(args != NULL);
1755	save_info = save_blk->bp->b_addr;
1756	drop_info = drop_blk->bp->b_addr;
1757	ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1758	       save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1759	       save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1760	ASSERT(save_blk->magic == drop_blk->magic);
1761	ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1762	       (be32_to_cpu(save_info->back) == drop_blk->blkno));
1763	ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1764	       (be32_to_cpu(drop_info->back) == save_blk->blkno));
1765
1766	/*
1767	 * Unlink the leaf block from the doubly linked chain of leaves.
1768	 */
1769	if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1770		trace_xfs_da_unlink_back(args);
1771		save_info->back = drop_info->back;
1772		if (drop_info->back) {
1773			error = xfs_da3_node_read(args->trans, args->dp,
1774						be32_to_cpu(drop_info->back),
1775						-1, &bp, args->whichfork);
1776			if (error)
1777				return error;
1778			ASSERT(bp != NULL);
1779			tmp_info = bp->b_addr;
1780			ASSERT(tmp_info->magic == save_info->magic);
1781			ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1782			tmp_info->forw = cpu_to_be32(save_blk->blkno);
1783			xfs_trans_log_buf(args->trans, bp, 0,
1784						    sizeof(*tmp_info) - 1);
1785		}
1786	} else {
1787		trace_xfs_da_unlink_forward(args);
1788		save_info->forw = drop_info->forw;
1789		if (drop_info->forw) {
1790			error = xfs_da3_node_read(args->trans, args->dp,
1791						be32_to_cpu(drop_info->forw),
1792						-1, &bp, args->whichfork);
1793			if (error)
1794				return error;
1795			ASSERT(bp != NULL);
1796			tmp_info = bp->b_addr;
1797			ASSERT(tmp_info->magic == save_info->magic);
1798			ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1799			tmp_info->back = cpu_to_be32(save_blk->blkno);
1800			xfs_trans_log_buf(args->trans, bp, 0,
1801						    sizeof(*tmp_info) - 1);
1802		}
1803	}
1804
1805	xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1806	return 0;
1807}
1808
1809/*
1810 * Move a path "forward" or "!forward" one block at the current level.
1811 *
1812 * This routine will adjust a "path" to point to the next block
1813 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1814 * Btree, including updating pointers to the intermediate nodes between
1815 * the new bottom and the root.
1816 */
1817int							/* error */
1818xfs_da3_path_shift(
1819	struct xfs_da_state	*state,
1820	struct xfs_da_state_path *path,
1821	int			forward,
1822	int			release,
1823	int			*result)
1824{
1825	struct xfs_da_state_blk	*blk;
1826	struct xfs_da_blkinfo	*info;
1827	struct xfs_da_intnode	*node;
1828	struct xfs_da_args	*args;
1829	struct xfs_da_node_entry *btree;
1830	struct xfs_da3_icnode_hdr nodehdr;
1831	struct xfs_buf		*bp;
1832	xfs_dablk_t		blkno = 0;
1833	int			level;
1834	int			error;
1835	struct xfs_inode	*dp = state->args->dp;
1836
1837	trace_xfs_da_path_shift(state->args);
1838
1839	/*
1840	 * Roll up the Btree looking for the first block where our
1841	 * current index is not at the edge of the block.  Note that
1842	 * we skip the bottom layer because we want the sibling block.
1843	 */
1844	args = state->args;
1845	ASSERT(args != NULL);
1846	ASSERT(path != NULL);
1847	ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1848	level = (path->active-1) - 1;	/* skip bottom layer in path */
1849	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1850		node = blk->bp->b_addr;
1851		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1852		btree = dp->d_ops->node_tree_p(node);
1853
1854		if (forward && (blk->index < nodehdr.count - 1)) {
1855			blk->index++;
1856			blkno = be32_to_cpu(btree[blk->index].before);
1857			break;
1858		} else if (!forward && (blk->index > 0)) {
1859			blk->index--;
1860			blkno = be32_to_cpu(btree[blk->index].before);
1861			break;
1862		}
1863	}
1864	if (level < 0) {
1865		*result = -ENOENT;	/* we're out of our tree */
1866		ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1867		return 0;
1868	}
1869
1870	/*
1871	 * Roll down the edge of the subtree until we reach the
1872	 * same depth we were at originally.
1873	 */
1874	for (blk++, level++; level < path->active; blk++, level++) {
1875		/*
1876		 * Read the next child block into a local buffer.
1877		 */
1878		error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1879					  args->whichfork);
1880		if (error)
1881			return error;
1882
1883		/*
1884		 * Release the old block (if it's dirty, the trans doesn't
1885		 * actually let go) and swap the local buffer into the path
1886		 * structure. This ensures failure of the above read doesn't set
1887		 * a NULL buffer in an active slot in the path.
1888		 */
1889		if (release)
1890			xfs_trans_brelse(args->trans, blk->bp);
1891		blk->blkno = blkno;
1892		blk->bp = bp;
1893
1894		info = blk->bp->b_addr;
1895		ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1896		       info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1897		       info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1898		       info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1899		       info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1900		       info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1901
1902
1903		/*
1904		 * Note: we flatten the magic number to a single type so we
1905		 * don't have to compare against crc/non-crc types elsewhere.
1906		 */
1907		switch (be16_to_cpu(info->magic)) {
1908		case XFS_DA_NODE_MAGIC:
1909		case XFS_DA3_NODE_MAGIC:
1910			blk->magic = XFS_DA_NODE_MAGIC;
1911			node = (xfs_da_intnode_t *)info;
1912			dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1913			btree = dp->d_ops->node_tree_p(node);
1914			blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1915			if (forward)
1916				blk->index = 0;
1917			else
1918				blk->index = nodehdr.count - 1;
1919			blkno = be32_to_cpu(btree[blk->index].before);
1920			break;
1921		case XFS_ATTR_LEAF_MAGIC:
1922		case XFS_ATTR3_LEAF_MAGIC:
1923			blk->magic = XFS_ATTR_LEAF_MAGIC;
1924			ASSERT(level == path->active-1);
1925			blk->index = 0;
1926			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1927			break;
1928		case XFS_DIR2_LEAFN_MAGIC:
1929		case XFS_DIR3_LEAFN_MAGIC:
1930			blk->magic = XFS_DIR2_LEAFN_MAGIC;
1931			ASSERT(level == path->active-1);
1932			blk->index = 0;
1933			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1934							       blk->bp, NULL);
1935			break;
1936		default:
1937			ASSERT(0);
1938			break;
1939		}
1940	}
1941	*result = 0;
1942	return 0;
1943}
1944
1945
1946/*========================================================================
1947 * Utility routines.
1948 *========================================================================*/
1949
1950/*
1951 * Implement a simple hash on a character string.
1952 * Rotate the hash value by 7 bits, then XOR each character in.
1953 * This is implemented with some source-level loop unrolling.
1954 */
1955xfs_dahash_t
1956xfs_da_hashname(const __uint8_t *name, int namelen)
1957{
1958	xfs_dahash_t hash;
1959
1960	/*
1961	 * Do four characters at a time as long as we can.
1962	 */
1963	for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1964		hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1965		       (name[3] << 0) ^ rol32(hash, 7 * 4);
1966
1967	/*
1968	 * Now do the rest of the characters.
1969	 */
1970	switch (namelen) {
1971	case 3:
1972		return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1973		       rol32(hash, 7 * 3);
1974	case 2:
1975		return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1976	case 1:
1977		return (name[0] << 0) ^ rol32(hash, 7 * 1);
1978	default: /* case 0: */
1979		return hash;
1980	}
1981}
1982
1983enum xfs_dacmp
1984xfs_da_compname(
1985	struct xfs_da_args *args,
1986	const unsigned char *name,
1987	int		len)
1988{
1989	return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1990					XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1991}
1992
1993static xfs_dahash_t
1994xfs_default_hashname(
1995	struct xfs_name	*name)
1996{
1997	return xfs_da_hashname(name->name, name->len);
1998}
1999
2000const struct xfs_nameops xfs_default_nameops = {
2001	.hashname	= xfs_default_hashname,
2002	.compname	= xfs_da_compname
2003};
2004
2005int
2006xfs_da_grow_inode_int(
2007	struct xfs_da_args	*args,
2008	xfs_fileoff_t		*bno,
2009	int			count)
2010{
2011	struct xfs_trans	*tp = args->trans;
2012	struct xfs_inode	*dp = args->dp;
2013	int			w = args->whichfork;
2014	xfs_rfsblock_t		nblks = dp->i_d.di_nblocks;
2015	struct xfs_bmbt_irec	map, *mapp;
2016	int			nmap, error, got, i, mapi;
2017
2018	/*
2019	 * Find a spot in the file space to put the new block.
2020	 */
2021	error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2022	if (error)
2023		return error;
2024
2025	/*
2026	 * Try mapping it in one filesystem block.
2027	 */
2028	nmap = 1;
2029	ASSERT(args->firstblock != NULL);
2030	error = xfs_bmapi_write(tp, dp, *bno, count,
2031			xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2032			args->firstblock, args->total, &map, &nmap,
2033			args->flist);
2034	if (error)
2035		return error;
2036
2037	ASSERT(nmap <= 1);
2038	if (nmap == 1) {
2039		mapp = &map;
2040		mapi = 1;
2041	} else if (nmap == 0 && count > 1) {
2042		xfs_fileoff_t		b;
2043		int			c;
2044
2045		/*
2046		 * If we didn't get it and the block might work if fragmented,
2047		 * try without the CONTIG flag.  Loop until we get it all.
2048		 */
2049		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2050		for (b = *bno, mapi = 0; b < *bno + count; ) {
2051			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2052			c = (int)(*bno + count - b);
2053			error = xfs_bmapi_write(tp, dp, b, c,
2054					xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2055					args->firstblock, args->total,
2056					&mapp[mapi], &nmap, args->flist);
2057			if (error)
2058				goto out_free_map;
2059			if (nmap < 1)
2060				break;
2061			mapi += nmap;
2062			b = mapp[mapi - 1].br_startoff +
2063			    mapp[mapi - 1].br_blockcount;
2064		}
2065	} else {
2066		mapi = 0;
2067		mapp = NULL;
2068	}
2069
2070	/*
2071	 * Count the blocks we got, make sure it matches the total.
2072	 */
2073	for (i = 0, got = 0; i < mapi; i++)
2074		got += mapp[i].br_blockcount;
2075	if (got != count || mapp[0].br_startoff != *bno ||
2076	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2077	    *bno + count) {
2078		error = -ENOSPC;
2079		goto out_free_map;
2080	}
2081
2082	/* account for newly allocated blocks in reserved blocks total */
2083	args->total -= dp->i_d.di_nblocks - nblks;
2084
2085out_free_map:
2086	if (mapp != &map)
2087		kmem_free(mapp);
2088	return error;
2089}
2090
2091/*
2092 * Add a block to the btree ahead of the file.
2093 * Return the new block number to the caller.
2094 */
2095int
2096xfs_da_grow_inode(
2097	struct xfs_da_args	*args,
2098	xfs_dablk_t		*new_blkno)
2099{
2100	xfs_fileoff_t		bno;
2101	int			error;
2102
2103	trace_xfs_da_grow_inode(args);
2104
2105	bno = args->geo->leafblk;
2106	error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2107	if (!error)
2108		*new_blkno = (xfs_dablk_t)bno;
2109	return error;
2110}
2111
2112/*
2113 * Ick.  We need to always be able to remove a btree block, even
2114 * if there's no space reservation because the filesystem is full.
2115 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2116 * It swaps the target block with the last block in the file.  The
2117 * last block in the file can always be removed since it can't cause
2118 * a bmap btree split to do that.
2119 */
2120STATIC int
2121xfs_da3_swap_lastblock(
2122	struct xfs_da_args	*args,
2123	xfs_dablk_t		*dead_blknop,
2124	struct xfs_buf		**dead_bufp)
2125{
2126	struct xfs_da_blkinfo	*dead_info;
2127	struct xfs_da_blkinfo	*sib_info;
2128	struct xfs_da_intnode	*par_node;
2129	struct xfs_da_intnode	*dead_node;
2130	struct xfs_dir2_leaf	*dead_leaf2;
2131	struct xfs_da_node_entry *btree;
2132	struct xfs_da3_icnode_hdr par_hdr;
2133	struct xfs_inode	*dp;
2134	struct xfs_trans	*tp;
2135	struct xfs_mount	*mp;
2136	struct xfs_buf		*dead_buf;
2137	struct xfs_buf		*last_buf;
2138	struct xfs_buf		*sib_buf;
2139	struct xfs_buf		*par_buf;
2140	xfs_dahash_t		dead_hash;
2141	xfs_fileoff_t		lastoff;
2142	xfs_dablk_t		dead_blkno;
2143	xfs_dablk_t		last_blkno;
2144	xfs_dablk_t		sib_blkno;
2145	xfs_dablk_t		par_blkno;
2146	int			error;
2147	int			w;
2148	int			entno;
2149	int			level;
2150	int			dead_level;
2151
2152	trace_xfs_da_swap_lastblock(args);
2153
2154	dead_buf = *dead_bufp;
2155	dead_blkno = *dead_blknop;
2156	tp = args->trans;
2157	dp = args->dp;
2158	w = args->whichfork;
2159	ASSERT(w == XFS_DATA_FORK);
2160	mp = dp->i_mount;
2161	lastoff = args->geo->freeblk;
2162	error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2163	if (error)
2164		return error;
2165	if (unlikely(lastoff == 0)) {
2166		XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2167				 mp);
2168		return -EFSCORRUPTED;
2169	}
2170	/*
2171	 * Read the last block in the btree space.
2172	 */
2173	last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2174	error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2175	if (error)
2176		return error;
2177	/*
2178	 * Copy the last block into the dead buffer and log it.
2179	 */
2180	memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2181	xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2182	dead_info = dead_buf->b_addr;
2183	/*
2184	 * Get values from the moved block.
2185	 */
2186	if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2187	    dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2188		struct xfs_dir3_icleaf_hdr leafhdr;
2189		struct xfs_dir2_leaf_entry *ents;
2190
2191		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2192		dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2193		ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2194		dead_level = 0;
2195		dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2196	} else {
2197		struct xfs_da3_icnode_hdr deadhdr;
2198
2199		dead_node = (xfs_da_intnode_t *)dead_info;
2200		dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2201		btree = dp->d_ops->node_tree_p(dead_node);
2202		dead_level = deadhdr.level;
2203		dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2204	}
2205	sib_buf = par_buf = NULL;
2206	/*
2207	 * If the moved block has a left sibling, fix up the pointers.
2208	 */
2209	if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2210		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2211		if (error)
2212			goto done;
2213		sib_info = sib_buf->b_addr;
2214		if (unlikely(
2215		    be32_to_cpu(sib_info->forw) != last_blkno ||
2216		    sib_info->magic != dead_info->magic)) {
2217			XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2218					 XFS_ERRLEVEL_LOW, mp);
2219			error = -EFSCORRUPTED;
2220			goto done;
2221		}
2222		sib_info->forw = cpu_to_be32(dead_blkno);
2223		xfs_trans_log_buf(tp, sib_buf,
2224			XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2225					sizeof(sib_info->forw)));
2226		sib_buf = NULL;
2227	}
2228	/*
2229	 * If the moved block has a right sibling, fix up the pointers.
2230	 */
2231	if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2232		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2233		if (error)
2234			goto done;
2235		sib_info = sib_buf->b_addr;
2236		if (unlikely(
2237		       be32_to_cpu(sib_info->back) != last_blkno ||
2238		       sib_info->magic != dead_info->magic)) {
2239			XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2240					 XFS_ERRLEVEL_LOW, mp);
2241			error = -EFSCORRUPTED;
2242			goto done;
2243		}
2244		sib_info->back = cpu_to_be32(dead_blkno);
2245		xfs_trans_log_buf(tp, sib_buf,
2246			XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2247					sizeof(sib_info->back)));
2248		sib_buf = NULL;
2249	}
2250	par_blkno = args->geo->leafblk;
2251	level = -1;
2252	/*
2253	 * Walk down the tree looking for the parent of the moved block.
2254	 */
2255	for (;;) {
2256		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2257		if (error)
2258			goto done;
2259		par_node = par_buf->b_addr;
2260		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2261		if (level >= 0 && level != par_hdr.level + 1) {
2262			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2263					 XFS_ERRLEVEL_LOW, mp);
2264			error = -EFSCORRUPTED;
2265			goto done;
2266		}
2267		level = par_hdr.level;
2268		btree = dp->d_ops->node_tree_p(par_node);
2269		for (entno = 0;
2270		     entno < par_hdr.count &&
2271		     be32_to_cpu(btree[entno].hashval) < dead_hash;
2272		     entno++)
2273			continue;
2274		if (entno == par_hdr.count) {
2275			XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2276					 XFS_ERRLEVEL_LOW, mp);
2277			error = -EFSCORRUPTED;
2278			goto done;
2279		}
2280		par_blkno = be32_to_cpu(btree[entno].before);
2281		if (level == dead_level + 1)
2282			break;
2283		xfs_trans_brelse(tp, par_buf);
2284		par_buf = NULL;
2285	}
2286	/*
2287	 * We're in the right parent block.
2288	 * Look for the right entry.
2289	 */
2290	for (;;) {
2291		for (;
2292		     entno < par_hdr.count &&
2293		     be32_to_cpu(btree[entno].before) != last_blkno;
2294		     entno++)
2295			continue;
2296		if (entno < par_hdr.count)
2297			break;
2298		par_blkno = par_hdr.forw;
2299		xfs_trans_brelse(tp, par_buf);
2300		par_buf = NULL;
2301		if (unlikely(par_blkno == 0)) {
2302			XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2303					 XFS_ERRLEVEL_LOW, mp);
2304			error = -EFSCORRUPTED;
2305			goto done;
2306		}
2307		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2308		if (error)
2309			goto done;
2310		par_node = par_buf->b_addr;
2311		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2312		if (par_hdr.level != level) {
2313			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2314					 XFS_ERRLEVEL_LOW, mp);
2315			error = -EFSCORRUPTED;
2316			goto done;
2317		}
2318		btree = dp->d_ops->node_tree_p(par_node);
2319		entno = 0;
2320	}
2321	/*
2322	 * Update the parent entry pointing to the moved block.
2323	 */
2324	btree[entno].before = cpu_to_be32(dead_blkno);
2325	xfs_trans_log_buf(tp, par_buf,
2326		XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2327				sizeof(btree[entno].before)));
2328	*dead_blknop = last_blkno;
2329	*dead_bufp = last_buf;
2330	return 0;
2331done:
2332	if (par_buf)
2333		xfs_trans_brelse(tp, par_buf);
2334	if (sib_buf)
2335		xfs_trans_brelse(tp, sib_buf);
2336	xfs_trans_brelse(tp, last_buf);
2337	return error;
2338}
2339
2340/*
2341 * Remove a btree block from a directory or attribute.
2342 */
2343int
2344xfs_da_shrink_inode(
2345	xfs_da_args_t	*args,
2346	xfs_dablk_t	dead_blkno,
2347	struct xfs_buf	*dead_buf)
2348{
2349	xfs_inode_t *dp;
2350	int done, error, w, count;
2351	xfs_trans_t *tp;
2352
2353	trace_xfs_da_shrink_inode(args);
2354
2355	dp = args->dp;
2356	w = args->whichfork;
2357	tp = args->trans;
2358	count = args->geo->fsbcount;
2359	for (;;) {
2360		/*
2361		 * Remove extents.  If we get ENOSPC for a dir we have to move
2362		 * the last block to the place we want to kill.
2363		 */
2364		error = xfs_bunmapi(tp, dp, dead_blkno, count,
2365				    xfs_bmapi_aflag(w), 0, args->firstblock,
2366				    args->flist, &done);
2367		if (error == -ENOSPC) {
2368			if (w != XFS_DATA_FORK)
2369				break;
2370			error = xfs_da3_swap_lastblock(args, &dead_blkno,
2371						      &dead_buf);
2372			if (error)
2373				break;
2374		} else {
2375			break;
2376		}
2377	}
2378	xfs_trans_binval(tp, dead_buf);
2379	return error;
2380}
2381
2382/*
2383 * See if the mapping(s) for this btree block are valid, i.e.
2384 * don't contain holes, are logically contiguous, and cover the whole range.
2385 */
2386STATIC int
2387xfs_da_map_covers_blocks(
2388	int		nmap,
2389	xfs_bmbt_irec_t	*mapp,
2390	xfs_dablk_t	bno,
2391	int		count)
2392{
2393	int		i;
2394	xfs_fileoff_t	off;
2395
2396	for (i = 0, off = bno; i < nmap; i++) {
2397		if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2398		    mapp[i].br_startblock == DELAYSTARTBLOCK) {
2399			return 0;
2400		}
2401		if (off != mapp[i].br_startoff) {
2402			return 0;
2403		}
2404		off += mapp[i].br_blockcount;
2405	}
2406	return off == bno + count;
2407}
2408
2409/*
2410 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2411 *
2412 * For the single map case, it is assumed that the caller has provided a pointer
2413 * to a valid xfs_buf_map.  For the multiple map case, this function will
2414 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2415 * map pointer with the allocated map.
2416 */
2417static int
2418xfs_buf_map_from_irec(
2419	struct xfs_mount	*mp,
2420	struct xfs_buf_map	**mapp,
2421	int			*nmaps,
2422	struct xfs_bmbt_irec	*irecs,
2423	int			nirecs)
2424{
2425	struct xfs_buf_map	*map;
2426	int			i;
2427
2428	ASSERT(*nmaps == 1);
2429	ASSERT(nirecs >= 1);
2430
2431	if (nirecs > 1) {
2432		map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2433				  KM_SLEEP | KM_NOFS);
2434		if (!map)
2435			return -ENOMEM;
2436		*mapp = map;
2437	}
2438
2439	*nmaps = nirecs;
2440	map = *mapp;
2441	for (i = 0; i < *nmaps; i++) {
2442		ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2443		       irecs[i].br_startblock != HOLESTARTBLOCK);
2444		map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2445		map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2446	}
2447	return 0;
2448}
2449
2450/*
2451 * Map the block we are given ready for reading. There are three possible return
2452 * values:
2453 *	-1 - will be returned if we land in a hole and mappedbno == -2 so the
2454 *	     caller knows not to execute a subsequent read.
2455 *	 0 - if we mapped the block successfully
2456 *	>0 - positive error number if there was an error.
2457 */
2458static int
2459xfs_dabuf_map(
2460	struct xfs_inode	*dp,
2461	xfs_dablk_t		bno,
2462	xfs_daddr_t		mappedbno,
2463	int			whichfork,
2464	struct xfs_buf_map	**map,
2465	int			*nmaps)
2466{
2467	struct xfs_mount	*mp = dp->i_mount;
2468	int			nfsb;
2469	int			error = 0;
2470	struct xfs_bmbt_irec	irec;
2471	struct xfs_bmbt_irec	*irecs = &irec;
2472	int			nirecs;
2473
2474	ASSERT(map && *map);
2475	ASSERT(*nmaps == 1);
2476
2477	if (whichfork == XFS_DATA_FORK)
2478		nfsb = mp->m_dir_geo->fsbcount;
2479	else
2480		nfsb = mp->m_attr_geo->fsbcount;
2481
2482	/*
2483	 * Caller doesn't have a mapping.  -2 means don't complain
2484	 * if we land in a hole.
2485	 */
2486	if (mappedbno == -1 || mappedbno == -2) {
2487		/*
2488		 * Optimize the one-block case.
2489		 */
2490		if (nfsb != 1)
2491			irecs = kmem_zalloc(sizeof(irec) * nfsb,
2492					    KM_SLEEP | KM_NOFS);
2493
2494		nirecs = nfsb;
2495		error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2496				       &nirecs, xfs_bmapi_aflag(whichfork));
2497		if (error)
2498			goto out;
2499	} else {
2500		irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2501		irecs->br_startoff = (xfs_fileoff_t)bno;
2502		irecs->br_blockcount = nfsb;
2503		irecs->br_state = 0;
2504		nirecs = 1;
2505	}
2506
2507	if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2508		error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2509		if (unlikely(error == -EFSCORRUPTED)) {
2510			if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2511				int i;
2512				xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2513					__func__, (long long)bno,
2514					(long long)dp->i_ino);
2515				for (i = 0; i < *nmaps; i++) {
2516					xfs_alert(mp,
2517"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2518						i,
2519						(long long)irecs[i].br_startoff,
2520						(long long)irecs[i].br_startblock,
2521						(long long)irecs[i].br_blockcount,
2522						irecs[i].br_state);
2523				}
2524			}
2525			XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2526					 XFS_ERRLEVEL_LOW, mp);
2527		}
2528		goto out;
2529	}
2530	error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2531out:
2532	if (irecs != &irec)
2533		kmem_free(irecs);
2534	return error;
2535}
2536
2537/*
2538 * Get a buffer for the dir/attr block.
2539 */
2540int
2541xfs_da_get_buf(
2542	struct xfs_trans	*trans,
2543	struct xfs_inode	*dp,
2544	xfs_dablk_t		bno,
2545	xfs_daddr_t		mappedbno,
2546	struct xfs_buf		**bpp,
2547	int			whichfork)
2548{
2549	struct xfs_buf		*bp;
2550	struct xfs_buf_map	map;
2551	struct xfs_buf_map	*mapp;
2552	int			nmap;
2553	int			error;
2554
2555	*bpp = NULL;
2556	mapp = &map;
2557	nmap = 1;
2558	error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2559				&mapp, &nmap);
2560	if (error) {
2561		/* mapping a hole is not an error, but we don't continue */
2562		if (error == -1)
2563			error = 0;
2564		goto out_free;
2565	}
2566
2567	bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2568				    mapp, nmap, 0);
2569	error = bp ? bp->b_error : -EIO;
2570	if (error) {
2571		if (bp)
2572			xfs_trans_brelse(trans, bp);
2573		goto out_free;
2574	}
2575
2576	*bpp = bp;
2577
2578out_free:
2579	if (mapp != &map)
2580		kmem_free(mapp);
2581
2582	return error;
2583}
2584
2585/*
2586 * Get a buffer for the dir/attr block, fill in the contents.
2587 */
2588int
2589xfs_da_read_buf(
2590	struct xfs_trans	*trans,
2591	struct xfs_inode	*dp,
2592	xfs_dablk_t		bno,
2593	xfs_daddr_t		mappedbno,
2594	struct xfs_buf		**bpp,
2595	int			whichfork,
2596	const struct xfs_buf_ops *ops)
2597{
2598	struct xfs_buf		*bp;
2599	struct xfs_buf_map	map;
2600	struct xfs_buf_map	*mapp;
2601	int			nmap;
2602	int			error;
2603
2604	*bpp = NULL;
2605	mapp = &map;
2606	nmap = 1;
2607	error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2608				&mapp, &nmap);
2609	if (error) {
2610		/* mapping a hole is not an error, but we don't continue */
2611		if (error == -1)
2612			error = 0;
2613		goto out_free;
2614	}
2615
2616	error = xfs_trans_read_buf_map(dp->i_mount, trans,
2617					dp->i_mount->m_ddev_targp,
2618					mapp, nmap, 0, &bp, ops);
2619	if (error)
2620		goto out_free;
2621
2622	if (whichfork == XFS_ATTR_FORK)
2623		xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2624	else
2625		xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2626	*bpp = bp;
2627out_free:
2628	if (mapp != &map)
2629		kmem_free(mapp);
2630
2631	return error;
2632}
2633
2634/*
2635 * Readahead the dir/attr block.
2636 */
2637xfs_daddr_t
2638xfs_da_reada_buf(
2639	struct xfs_inode	*dp,
2640	xfs_dablk_t		bno,
2641	xfs_daddr_t		mappedbno,
2642	int			whichfork,
2643	const struct xfs_buf_ops *ops)
2644{
2645	struct xfs_buf_map	map;
2646	struct xfs_buf_map	*mapp;
2647	int			nmap;
2648	int			error;
2649
2650	mapp = &map;
2651	nmap = 1;
2652	error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2653				&mapp, &nmap);
2654	if (error) {
2655		/* mapping a hole is not an error, but we don't continue */
2656		if (error == -1)
2657			error = 0;
2658		goto out_free;
2659	}
2660
2661	mappedbno = mapp[0].bm_bn;
2662	xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2663
2664out_free:
2665	if (mapp != &map)
2666		kmem_free(mapp);
2667
2668	if (error)
2669		return -1;
2670	return mappedbno;
2671}