Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
 
 
 
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_mount.h"
 
  27#include "xfs_da_btree.h"
  28#include "xfs_bmap_btree.h"
  29#include "xfs_dir2.h"
  30#include "xfs_dir2_format.h"
  31#include "xfs_dir2_priv.h"
  32#include "xfs_dinode.h"
  33#include "xfs_inode.h"
 
  34#include "xfs_inode_item.h"
  35#include "xfs_alloc.h"
  36#include "xfs_bmap.h"
  37#include "xfs_attr.h"
  38#include "xfs_attr_leaf.h"
  39#include "xfs_error.h"
  40#include "xfs_trace.h"
 
 
  41
  42/*
  43 * xfs_da_btree.c
  44 *
  45 * Routines to implement directories as Btrees of hashed names.
  46 */
  47
  48/*========================================================================
  49 * Function prototypes for the kernel.
  50 *========================================================================*/
  51
  52/*
  53 * Routines used for growing the Btree.
  54 */
  55STATIC int xfs_da_root_split(xfs_da_state_t *state,
  56					    xfs_da_state_blk_t *existing_root,
  57					    xfs_da_state_blk_t *new_child);
  58STATIC int xfs_da_node_split(xfs_da_state_t *state,
  59					    xfs_da_state_blk_t *existing_blk,
  60					    xfs_da_state_blk_t *split_blk,
  61					    xfs_da_state_blk_t *blk_to_add,
  62					    int treelevel,
  63					    int *result);
  64STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
  65					 xfs_da_state_blk_t *node_blk_1,
  66					 xfs_da_state_blk_t *node_blk_2);
  67STATIC void xfs_da_node_add(xfs_da_state_t *state,
  68				   xfs_da_state_blk_t *old_node_blk,
  69				   xfs_da_state_blk_t *new_node_blk);
  70
  71/*
  72 * Routines used for shrinking the Btree.
  73 */
  74STATIC int xfs_da_root_join(xfs_da_state_t *state,
  75					   xfs_da_state_blk_t *root_blk);
  76STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
  77STATIC void xfs_da_node_remove(xfs_da_state_t *state,
  78					      xfs_da_state_blk_t *drop_blk);
  79STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
  80					 xfs_da_state_blk_t *src_node_blk,
  81					 xfs_da_state_blk_t *dst_node_blk);
  82
  83/*
  84 * Utility routines.
  85 */
  86STATIC uint	xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
  87STATIC int	xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
  88STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
  89STATIC int	xfs_da_blk_unlink(xfs_da_state_t *state,
  90				  xfs_da_state_blk_t *drop_blk,
  91				  xfs_da_state_blk_t *save_blk);
  92STATIC void	xfs_da_state_kill_altpath(xfs_da_state_t *state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  93
  94/*========================================================================
  95 * Routines used for growing the Btree.
  96 *========================================================================*/
  97
  98/*
  99 * Create the initial contents of an intermediate node.
 100 */
 101int
 102xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
 103				 xfs_dabuf_t **bpp, int whichfork)
 
 
 
 
 104{
 105	xfs_da_intnode_t *node;
 106	xfs_dabuf_t *bp;
 107	int error;
 108	xfs_trans_t *tp;
 
 
 
 109
 110	trace_xfs_da_node_create(args);
 
 111
 112	tp = args->trans;
 113	error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
 114	if (error)
 115		return(error);
 116	ASSERT(bp != NULL);
 117	node = bp->data;
 118	node->hdr.info.forw = 0;
 119	node->hdr.info.back = 0;
 120	node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
 121	node->hdr.info.pad = 0;
 122	node->hdr.count = 0;
 123	node->hdr.level = cpu_to_be16(level);
 
 
 
 
 
 
 
 124
 125	xfs_da_log_buf(tp, bp,
 126		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
 
 127
 128	*bpp = bp;
 129	return(0);
 130}
 131
 132/*
 133 * Split a leaf node, rebalance, then possibly split
 134 * intermediate nodes, rebalance, etc.
 135 */
 136int							/* error */
 137xfs_da_split(xfs_da_state_t *state)
 
 138{
 139	xfs_da_state_blk_t *oldblk, *newblk, *addblk;
 140	xfs_da_intnode_t *node;
 141	xfs_dabuf_t *bp;
 142	int max, action, error, i;
 
 
 
 
 
 143
 144	trace_xfs_da_split(state->args);
 145
 146	/*
 147	 * Walk back up the tree splitting/inserting/adjusting as necessary.
 148	 * If we need to insert and there isn't room, split the node, then
 149	 * decide which fragment to insert the new block from below into.
 150	 * Note that we may split the root this way, but we need more fixup.
 151	 */
 152	max = state->path.active - 1;
 153	ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
 154	ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
 155	       state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
 156
 157	addblk = &state->path.blk[max];		/* initial dummy value */
 158	for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
 159		oldblk = &state->path.blk[i];
 160		newblk = &state->altpath.blk[i];
 161
 162		/*
 163		 * If a leaf node then
 164		 *     Allocate a new leaf node, then rebalance across them.
 165		 * else if an intermediate node then
 166		 *     We split on the last layer, must we split the node?
 167		 */
 168		switch (oldblk->magic) {
 169		case XFS_ATTR_LEAF_MAGIC:
 170			error = xfs_attr_leaf_split(state, oldblk, newblk);
 171			if ((error != 0) && (error != ENOSPC)) {
 172				return(error);	/* GROT: attr is inconsistent */
 173			}
 174			if (!error) {
 175				addblk = newblk;
 176				break;
 177			}
 178			/*
 179			 * Entry wouldn't fit, split the leaf again.
 180			 */
 181			state->extravalid = 1;
 182			if (state->inleaf) {
 183				state->extraafter = 0;	/* before newblk */
 184				trace_xfs_attr_leaf_split_before(state->args);
 185				error = xfs_attr_leaf_split(state, oldblk,
 186							    &state->extrablk);
 187			} else {
 188				state->extraafter = 1;	/* after newblk */
 189				trace_xfs_attr_leaf_split_after(state->args);
 190				error = xfs_attr_leaf_split(state, newblk,
 191							    &state->extrablk);
 192			}
 193			if (error)
 194				return(error);	/* GROT: attr inconsistent */
 195			addblk = newblk;
 196			break;
 197		case XFS_DIR2_LEAFN_MAGIC:
 198			error = xfs_dir2_leafn_split(state, oldblk, newblk);
 199			if (error)
 200				return error;
 201			addblk = newblk;
 202			break;
 203		case XFS_DA_NODE_MAGIC:
 204			error = xfs_da_node_split(state, oldblk, newblk, addblk,
 205							 max - i, &action);
 206			xfs_da_buf_done(addblk->bp);
 207			addblk->bp = NULL;
 208			if (error)
 209				return(error);	/* GROT: dir is inconsistent */
 210			/*
 211			 * Record the newly split block for the next time thru?
 212			 */
 213			if (action)
 214				addblk = newblk;
 215			else
 216				addblk = NULL;
 217			break;
 218		}
 219
 220		/*
 221		 * Update the btree to show the new hashval for this child.
 222		 */
 223		xfs_da_fixhashpath(state, &state->path);
 224		/*
 225		 * If we won't need this block again, it's getting dropped
 226		 * from the active path by the loop control, so we need
 227		 * to mark it done now.
 228		 */
 229		if (i > 0 || !addblk)
 230			xfs_da_buf_done(oldblk->bp);
 231	}
 232	if (!addblk)
 233		return(0);
 234
 235	/*
 236	 * Split the root node.
 237	 */
 238	ASSERT(state->path.active == 0);
 239	oldblk = &state->path.blk[0];
 240	error = xfs_da_root_split(state, oldblk, addblk);
 241	if (error) {
 242		xfs_da_buf_done(oldblk->bp);
 243		xfs_da_buf_done(addblk->bp);
 244		addblk->bp = NULL;
 245		return(error);	/* GROT: dir is inconsistent */
 246	}
 247
 248	/*
 249	 * Update pointers to the node which used to be block 0 and
 250	 * just got bumped because of the addition of a new root node.
 251	 * There might be three blocks involved if a double split occurred,
 252	 * and the original block 0 could be at any position in the list.
 
 
 
 
 
 253	 */
 254
 255	node = oldblk->bp->data;
 256	if (node->hdr.info.forw) {
 257		if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
 258			bp = addblk->bp;
 259		} else {
 260			ASSERT(state->extravalid);
 261			bp = state->extrablk.bp;
 262		}
 263		node = bp->data;
 264		node->hdr.info.back = cpu_to_be32(oldblk->blkno);
 265		xfs_da_log_buf(state->args->trans, bp,
 266		    XFS_DA_LOGRANGE(node, &node->hdr.info,
 267		    sizeof(node->hdr.info)));
 268	}
 269	node = oldblk->bp->data;
 270	if (node->hdr.info.back) {
 271		if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
 272			bp = addblk->bp;
 273		} else {
 274			ASSERT(state->extravalid);
 275			bp = state->extrablk.bp;
 276		}
 277		node = bp->data;
 278		node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
 279		xfs_da_log_buf(state->args->trans, bp,
 280		    XFS_DA_LOGRANGE(node, &node->hdr.info,
 281		    sizeof(node->hdr.info)));
 282	}
 283	xfs_da_buf_done(oldblk->bp);
 284	xfs_da_buf_done(addblk->bp);
 285	addblk->bp = NULL;
 286	return(0);
 287}
 288
 289/*
 290 * Split the root.  We have to create a new root and point to the two
 291 * parts (the split old root) that we just created.  Copy block zero to
 292 * the EOF, extending the inode in process.
 293 */
 294STATIC int						/* error */
 295xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
 296				 xfs_da_state_blk_t *blk2)
 297{
 298	xfs_da_intnode_t *node, *oldroot;
 299	xfs_da_args_t *args;
 300	xfs_dablk_t blkno;
 301	xfs_dabuf_t *bp;
 302	int error, size;
 303	xfs_inode_t *dp;
 304	xfs_trans_t *tp;
 305	xfs_mount_t *mp;
 306	xfs_dir2_leaf_t *leaf;
 
 
 
 
 
 
 
 307
 308	trace_xfs_da_root_split(state->args);
 309
 310	/*
 311	 * Copy the existing (incorrect) block from the root node position
 312	 * to a free space somewhere.
 313	 */
 314	args = state->args;
 315	ASSERT(args != NULL);
 316	error = xfs_da_grow_inode(args, &blkno);
 317	if (error)
 318		return(error);
 
 319	dp = args->dp;
 320	tp = args->trans;
 321	mp = state->mp;
 322	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
 323	if (error)
 324		return(error);
 325	ASSERT(bp != NULL);
 326	node = bp->data;
 327	oldroot = blk1->bp->data;
 328	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
 329		size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
 330			     (char *)oldroot);
 
 
 
 
 
 
 
 
 
 
 331	} else {
 332		ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
 
 
 333		leaf = (xfs_dir2_leaf_t *)oldroot;
 334		size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
 335			     (char *)leaf);
 
 
 
 
 
 
 
 
 
 
 
 336	}
 
 
 
 
 
 
 
 337	memcpy(node, oldroot, size);
 338	xfs_da_log_buf(tp, bp, 0, size - 1);
 339	xfs_da_buf_done(blk1->bp);
 
 
 
 
 
 
 
 
 340	blk1->bp = bp;
 341	blk1->blkno = blkno;
 342
 343	/*
 344	 * Set up the new root node.
 345	 */
 346	error = xfs_da_node_create(args,
 347		(args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
 348		be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
 349	if (error)
 350		return(error);
 351	node = bp->data;
 352	node->btree[0].hashval = cpu_to_be32(blk1->hashval);
 353	node->btree[0].before = cpu_to_be32(blk1->blkno);
 354	node->btree[1].hashval = cpu_to_be32(blk2->hashval);
 355	node->btree[1].before = cpu_to_be32(blk2->blkno);
 356	node->hdr.count = cpu_to_be16(2);
 
 
 
 
 357
 358#ifdef DEBUG
 359	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
 
 360		ASSERT(blk1->blkno >= mp->m_dirleafblk &&
 361		       blk1->blkno < mp->m_dirfreeblk);
 362		ASSERT(blk2->blkno >= mp->m_dirleafblk &&
 363		       blk2->blkno < mp->m_dirfreeblk);
 364	}
 365#endif
 366
 367	/* Header is already logged by xfs_da_node_create */
 368	xfs_da_log_buf(tp, bp,
 369		XFS_DA_LOGRANGE(node, node->btree,
 370			sizeof(xfs_da_node_entry_t) * 2));
 371	xfs_da_buf_done(bp);
 372
 373	return(0);
 374}
 375
 376/*
 377 * Split the node, rebalance, then add the new entry.
 378 */
 379STATIC int						/* error */
 380xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
 381				 xfs_da_state_blk_t *newblk,
 382				 xfs_da_state_blk_t *addblk,
 383				 int treelevel, int *result)
 384{
 385	xfs_da_intnode_t *node;
 386	xfs_dablk_t blkno;
 387	int newcount, error;
 388	int useextra;
 
 
 
 
 
 
 389
 390	trace_xfs_da_node_split(state->args);
 391
 392	node = oldblk->bp->data;
 393	ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 394
 395	/*
 396	 * With V2 dirs the extra block is data or freespace.
 397	 */
 398	useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
 399	newcount = 1 + useextra;
 400	/*
 401	 * Do we have to split the node?
 402	 */
 403	if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
 404		/*
 405		 * Allocate a new node, add to the doubly linked chain of
 406		 * nodes, then move some of our excess entries into it.
 407		 */
 408		error = xfs_da_grow_inode(state->args, &blkno);
 409		if (error)
 410			return(error);	/* GROT: dir is inconsistent */
 411
 412		error = xfs_da_node_create(state->args, blkno, treelevel,
 413					   &newblk->bp, state->args->whichfork);
 414		if (error)
 415			return(error);	/* GROT: dir is inconsistent */
 416		newblk->blkno = blkno;
 417		newblk->magic = XFS_DA_NODE_MAGIC;
 418		xfs_da_node_rebalance(state, oldblk, newblk);
 419		error = xfs_da_blk_link(state, oldblk, newblk);
 420		if (error)
 421			return(error);
 422		*result = 1;
 423	} else {
 424		*result = 0;
 425	}
 426
 427	/*
 428	 * Insert the new entry(s) into the correct block
 429	 * (updating last hashval in the process).
 430	 *
 431	 * xfs_da_node_add() inserts BEFORE the given index,
 432	 * and as a result of using node_lookup_int() we always
 433	 * point to a valid entry (not after one), but a split
 434	 * operation always results in a new block whose hashvals
 435	 * FOLLOW the current block.
 436	 *
 437	 * If we had double-split op below us, then add the extra block too.
 438	 */
 439	node = oldblk->bp->data;
 440	if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
 
 441		oldblk->index++;
 442		xfs_da_node_add(state, oldblk, addblk);
 443		if (useextra) {
 444			if (state->extraafter)
 445				oldblk->index++;
 446			xfs_da_node_add(state, oldblk, &state->extrablk);
 447			state->extravalid = 0;
 448		}
 449	} else {
 450		newblk->index++;
 451		xfs_da_node_add(state, newblk, addblk);
 452		if (useextra) {
 453			if (state->extraafter)
 454				newblk->index++;
 455			xfs_da_node_add(state, newblk, &state->extrablk);
 456			state->extravalid = 0;
 457		}
 458	}
 459
 460	return(0);
 461}
 462
 463/*
 464 * Balance the btree elements between two intermediate nodes,
 465 * usually one full and one empty.
 466 *
 467 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
 468 */
 469STATIC void
 470xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
 471				     xfs_da_state_blk_t *blk2)
 472{
 473	xfs_da_intnode_t *node1, *node2, *tmpnode;
 474	xfs_da_node_entry_t *btree_s, *btree_d;
 475	int count, tmp;
 476	xfs_trans_t *tp;
 
 
 
 
 
 
 
 
 
 
 
 
 477
 478	trace_xfs_da_node_rebalance(state->args);
 479
 480	node1 = blk1->bp->data;
 481	node2 = blk2->bp->data;
 
 
 
 
 
 482	/*
 483	 * Figure out how many entries need to move, and in which direction.
 484	 * Swap the nodes around if that makes it simpler.
 485	 */
 486	if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
 487	    ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
 488	     (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
 489	      be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
 490		tmpnode = node1;
 491		node1 = node2;
 492		node2 = tmpnode;
 
 
 
 
 
 493	}
 494	ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 495	ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 496	count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
 497	if (count == 0)
 498		return;
 499	tp = state->args->trans;
 500	/*
 501	 * Two cases: high-to-low and low-to-high.
 502	 */
 503	if (count > 0) {
 504		/*
 505		 * Move elements in node2 up to make a hole.
 506		 */
 507		if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
 
 508			tmp *= (uint)sizeof(xfs_da_node_entry_t);
 509			btree_s = &node2->btree[0];
 510			btree_d = &node2->btree[count];
 511			memmove(btree_d, btree_s, tmp);
 512		}
 513
 514		/*
 515		 * Move the req'd B-tree elements from high in node1 to
 516		 * low in node2.
 517		 */
 518		be16_add_cpu(&node2->hdr.count, count);
 519		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 520		btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
 521		btree_d = &node2->btree[0];
 522		memcpy(btree_d, btree_s, tmp);
 523		be16_add_cpu(&node1->hdr.count, -count);
 524	} else {
 525		/*
 526		 * Move the req'd B-tree elements from low in node2 to
 527		 * high in node1.
 528		 */
 529		count = -count;
 530		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 531		btree_s = &node2->btree[0];
 532		btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
 533		memcpy(btree_d, btree_s, tmp);
 534		be16_add_cpu(&node1->hdr.count, count);
 535		xfs_da_log_buf(tp, blk1->bp,
 
 536			XFS_DA_LOGRANGE(node1, btree_d, tmp));
 537
 538		/*
 539		 * Move elements in node2 down to fill the hole.
 540		 */
 541		tmp  = be16_to_cpu(node2->hdr.count) - count;
 542		tmp *= (uint)sizeof(xfs_da_node_entry_t);
 543		btree_s = &node2->btree[count];
 544		btree_d = &node2->btree[0];
 545		memmove(btree_d, btree_s, tmp);
 546		be16_add_cpu(&node2->hdr.count, -count);
 547	}
 548
 549	/*
 550	 * Log header of node 1 and all current bits of node 2.
 551	 */
 552	xfs_da_log_buf(tp, blk1->bp,
 553		XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
 554	xfs_da_log_buf(tp, blk2->bp,
 
 
 
 555		XFS_DA_LOGRANGE(node2, &node2->hdr,
 556			sizeof(node2->hdr) +
 557			sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
 558
 559	/*
 560	 * Record the last hashval from each block for upward propagation.
 561	 * (note: don't use the swapped node pointers)
 562	 */
 563	node1 = blk1->bp->data;
 564	node2 = blk2->bp->data;
 565	blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
 566	blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
 
 
 
 
 
 
 567
 568	/*
 569	 * Adjust the expected index for insertion.
 570	 */
 571	if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
 572		blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
 573		blk1->index = be16_to_cpu(node1->hdr.count) + 1;	/* make it invalid */
 574	}
 575}
 576
 577/*
 578 * Add a new entry to an intermediate node.
 579 */
 580STATIC void
 581xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
 582			       xfs_da_state_blk_t *newblk)
 583{
 584	xfs_da_intnode_t *node;
 585	xfs_da_node_entry_t *btree;
 586	int tmp;
 
 
 
 
 587
 588	trace_xfs_da_node_add(state->args);
 589
 590	node = oldblk->bp->data;
 591	ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 592	ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
 
 
 593	ASSERT(newblk->blkno != 0);
 594	if (state->args->whichfork == XFS_DATA_FORK)
 595		ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
 596		       newblk->blkno < state->mp->m_dirfreeblk);
 597
 598	/*
 599	 * We may need to make some room before we insert the new node.
 600	 */
 601	tmp = 0;
 602	btree = &node->btree[ oldblk->index ];
 603	if (oldblk->index < be16_to_cpu(node->hdr.count)) {
 604		tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
 605		memmove(btree + 1, btree, tmp);
 606	}
 607	btree->hashval = cpu_to_be32(newblk->hashval);
 608	btree->before = cpu_to_be32(newblk->blkno);
 609	xfs_da_log_buf(state->args->trans, oldblk->bp,
 610		XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
 611	be16_add_cpu(&node->hdr.count, 1);
 612	xfs_da_log_buf(state->args->trans, oldblk->bp,
 613		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
 
 
 614
 615	/*
 616	 * Copy the last hash value from the oldblk to propagate upwards.
 617	 */
 618	oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
 619}
 620
 621/*========================================================================
 622 * Routines used for shrinking the Btree.
 623 *========================================================================*/
 624
 625/*
 626 * Deallocate an empty leaf node, remove it from its parent,
 627 * possibly deallocating that block, etc...
 628 */
 629int
 630xfs_da_join(xfs_da_state_t *state)
 
 631{
 632	xfs_da_state_blk_t *drop_blk, *save_blk;
 633	int action, error;
 
 
 634
 635	trace_xfs_da_join(state->args);
 636
 637	action = 0;
 638	drop_blk = &state->path.blk[ state->path.active-1 ];
 639	save_blk = &state->altpath.blk[ state->path.active-1 ];
 640	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
 641	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
 642	       drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
 643
 644	/*
 645	 * Walk back up the tree joining/deallocating as necessary.
 646	 * When we stop dropping blocks, break out.
 647	 */
 648	for (  ; state->path.active >= 2; drop_blk--, save_blk--,
 649		 state->path.active--) {
 650		/*
 651		 * See if we can combine the block with a neighbor.
 652		 *   (action == 0) => no options, just leave
 653		 *   (action == 1) => coalesce, then unlink
 654		 *   (action == 2) => block empty, unlink it
 655		 */
 656		switch (drop_blk->magic) {
 657		case XFS_ATTR_LEAF_MAGIC:
 658			error = xfs_attr_leaf_toosmall(state, &action);
 659			if (error)
 660				return(error);
 661			if (action == 0)
 662				return(0);
 663			xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
 664			break;
 665		case XFS_DIR2_LEAFN_MAGIC:
 666			error = xfs_dir2_leafn_toosmall(state, &action);
 667			if (error)
 668				return error;
 669			if (action == 0)
 670				return 0;
 671			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
 672			break;
 673		case XFS_DA_NODE_MAGIC:
 674			/*
 675			 * Remove the offending node, fixup hashvals,
 676			 * check for a toosmall neighbor.
 677			 */
 678			xfs_da_node_remove(state, drop_blk);
 679			xfs_da_fixhashpath(state, &state->path);
 680			error = xfs_da_node_toosmall(state, &action);
 681			if (error)
 682				return(error);
 683			if (action == 0)
 684				return 0;
 685			xfs_da_node_unbalance(state, drop_blk, save_blk);
 686			break;
 687		}
 688		xfs_da_fixhashpath(state, &state->altpath);
 689		error = xfs_da_blk_unlink(state, drop_blk, save_blk);
 690		xfs_da_state_kill_altpath(state);
 691		if (error)
 692			return(error);
 693		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
 694							 drop_blk->bp);
 695		drop_blk->bp = NULL;
 696		if (error)
 697			return(error);
 698	}
 699	/*
 700	 * We joined all the way to the top.  If it turns out that
 701	 * we only have one entry in the root, make the child block
 702	 * the new root.
 703	 */
 704	xfs_da_node_remove(state, drop_blk);
 705	xfs_da_fixhashpath(state, &state->path);
 706	error = xfs_da_root_join(state, &state->path.blk[0]);
 707	return(error);
 708}
 709
 710#ifdef	DEBUG
 711static void
 712xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
 713{
 714	__be16	magic = blkinfo->magic;
 715
 716	if (level == 1) {
 717		ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
 718		       magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
 719	} else
 720		ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 
 
 
 
 721	ASSERT(!blkinfo->forw);
 722	ASSERT(!blkinfo->back);
 723}
 724#else	/* !DEBUG */
 725#define	xfs_da_blkinfo_onlychild_validate(blkinfo, level)
 726#endif	/* !DEBUG */
 727
 728/*
 729 * We have only one entry in the root.  Copy the only remaining child of
 730 * the old root to block 0 as the new root node.
 731 */
 732STATIC int
 733xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
 734{
 735	xfs_da_intnode_t *oldroot;
 736	xfs_da_args_t *args;
 737	xfs_dablk_t child;
 738	xfs_dabuf_t *bp;
 739	int error;
 
 
 
 
 
 740
 741	trace_xfs_da_root_join(state->args);
 742
 743	args = state->args;
 744	ASSERT(args != NULL);
 745	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
 746	oldroot = root_blk->bp->data;
 747	ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 748	ASSERT(!oldroot->hdr.info.forw);
 749	ASSERT(!oldroot->hdr.info.back);
 
 
 750
 751	/*
 752	 * If the root has more than one child, then don't do anything.
 753	 */
 754	if (be16_to_cpu(oldroot->hdr.count) > 1)
 755		return(0);
 756
 757	/*
 758	 * Read in the (only) child block, then copy those bytes into
 759	 * the root block's buffer and free the original child block.
 760	 */
 761	child = be32_to_cpu(oldroot->btree[0].before);
 
 762	ASSERT(child != 0);
 763	error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
 764					     args->whichfork);
 765	if (error)
 766		return(error);
 767	ASSERT(bp != NULL);
 768	xfs_da_blkinfo_onlychild_validate(bp->data,
 769					be16_to_cpu(oldroot->hdr.level));
 770
 771	memcpy(root_blk->bp->data, bp->data, state->blocksize);
 772	xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 773	error = xfs_da_shrink_inode(args, child, bp);
 774	return(error);
 775}
 776
 777/*
 778 * Check a node block and its neighbors to see if the block should be
 779 * collapsed into one or the other neighbor.  Always keep the block
 780 * with the smaller block number.
 781 * If the current block is over 50% full, don't try to join it, return 0.
 782 * If the block is empty, fill in the state structure and return 2.
 783 * If it can be collapsed, fill in the state structure and return 1.
 784 * If nothing can be done, return 0.
 785 */
 786STATIC int
 787xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
 788{
 789	xfs_da_intnode_t *node;
 790	xfs_da_state_blk_t *blk;
 791	xfs_da_blkinfo_t *info;
 792	int count, forward, error, retval, i;
 793	xfs_dablk_t blkno;
 794	xfs_dabuf_t *bp;
 
 
 
 
 
 
 
 
 
 
 795
 796	/*
 797	 * Check for the degenerate case of the block being over 50% full.
 798	 * If so, it's not worth even looking to see if we might be able
 799	 * to coalesce with a sibling.
 800	 */
 801	blk = &state->path.blk[ state->path.active-1 ];
 802	info = blk->bp->data;
 803	ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 804	node = (xfs_da_intnode_t *)info;
 805	count = be16_to_cpu(node->hdr.count);
 806	if (count > (state->node_ents >> 1)) {
 807		*action = 0;	/* blk over 50%, don't try to join */
 808		return(0);	/* blk over 50%, don't try to join */
 809	}
 810
 811	/*
 812	 * Check for the degenerate case of the block being empty.
 813	 * If the block is empty, we'll simply delete it, no need to
 814	 * coalesce it with a sibling block.  We choose (arbitrarily)
 815	 * to merge with the forward block unless it is NULL.
 816	 */
 817	if (count == 0) {
 818		/*
 819		 * Make altpath point to the block we want to keep and
 820		 * path point to the block we want to drop (this one).
 821		 */
 822		forward = (info->forw != 0);
 823		memcpy(&state->altpath, &state->path, sizeof(state->path));
 824		error = xfs_da_path_shift(state, &state->altpath, forward,
 825						 0, &retval);
 826		if (error)
 827			return(error);
 828		if (retval) {
 829			*action = 0;
 830		} else {
 831			*action = 2;
 832		}
 833		return(0);
 834	}
 835
 836	/*
 837	 * Examine each sibling block to see if we can coalesce with
 838	 * at least 25% free space to spare.  We need to figure out
 839	 * whether to merge with the forward or the backward block.
 840	 * We prefer coalescing with the lower numbered sibling so as
 841	 * to shrink a directory over time.
 842	 */
 
 
 
 
 843	/* start with smaller blk num */
 844	forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
 845	for (i = 0; i < 2; forward = !forward, i++) {
 
 846		if (forward)
 847			blkno = be32_to_cpu(info->forw);
 848		else
 849			blkno = be32_to_cpu(info->back);
 850		if (blkno == 0)
 851			continue;
 852		error = xfs_da_read_buf(state->args->trans, state->args->dp,
 853					blkno, -1, &bp, state->args->whichfork);
 854		if (error)
 855			return(error);
 856		ASSERT(bp != NULL);
 857
 858		node = (xfs_da_intnode_t *)info;
 859		count  = state->node_ents;
 860		count -= state->node_ents >> 2;
 861		count -= be16_to_cpu(node->hdr.count);
 862		node = bp->data;
 863		ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 864		count -= be16_to_cpu(node->hdr.count);
 865		xfs_da_brelse(state->args->trans, bp);
 866		if (count >= 0)
 867			break;	/* fits with at least 25% to spare */
 868	}
 869	if (i >= 2) {
 870		*action = 0;
 871		return(0);
 872	}
 873
 874	/*
 875	 * Make altpath point to the block we want to keep (the lower
 876	 * numbered block) and path point to the block we want to drop.
 877	 */
 878	memcpy(&state->altpath, &state->path, sizeof(state->path));
 879	if (blkno < blk->blkno) {
 880		error = xfs_da_path_shift(state, &state->altpath, forward,
 881						 0, &retval);
 882		if (error) {
 883			return(error);
 884		}
 885		if (retval) {
 886			*action = 0;
 887			return(0);
 888		}
 889	} else {
 890		error = xfs_da_path_shift(state, &state->path, forward,
 891						 0, &retval);
 892		if (error) {
 893			return(error);
 894		}
 895		if (retval) {
 896			*action = 0;
 897			return(0);
 898		}
 899	}
 900	*action = 1;
 901	return(0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 902}
 903
 904/*
 905 * Walk back up the tree adjusting hash values as necessary,
 906 * when we stop making changes, return.
 907 */
 908void
 909xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
 910{
 911	xfs_da_state_blk_t *blk;
 912	xfs_da_intnode_t *node;
 913	xfs_da_node_entry_t *btree;
 914	xfs_dahash_t lasthash=0;
 915	int level, count;
 
 
 
 
 
 
 916
 917	level = path->active-1;
 918	blk = &path->blk[ level ];
 919	switch (blk->magic) {
 920	case XFS_ATTR_LEAF_MAGIC:
 921		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
 922		if (count == 0)
 923			return;
 924		break;
 925	case XFS_DIR2_LEAFN_MAGIC:
 926		lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
 927		if (count == 0)
 928			return;
 929		break;
 930	case XFS_DA_NODE_MAGIC:
 931		lasthash = xfs_da_node_lasthash(blk->bp, &count);
 932		if (count == 0)
 933			return;
 934		break;
 935	}
 936	for (blk--, level--; level >= 0; blk--, level--) {
 937		node = blk->bp->data;
 938		ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 939		btree = &node->btree[ blk->index ];
 940		if (be32_to_cpu(btree->hashval) == lasthash)
 
 
 941			break;
 942		blk->hashval = lasthash;
 943		btree->hashval = cpu_to_be32(lasthash);
 944		xfs_da_log_buf(state->args->trans, blk->bp,
 945				  XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
 
 946
 947		lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
 948	}
 949}
 950
 951/*
 952 * Remove an entry from an intermediate node.
 953 */
 954STATIC void
 955xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
 956{
 957	xfs_da_intnode_t *node;
 958	xfs_da_node_entry_t *btree;
 959	int tmp;
 
 
 
 
 
 960
 961	trace_xfs_da_node_remove(state->args);
 962
 963	node = drop_blk->bp->data;
 964	ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
 
 965	ASSERT(drop_blk->index >= 0);
 966
 967	/*
 968	 * Copy over the offending entry, or just zero it out.
 969	 */
 970	btree = &node->btree[drop_blk->index];
 971	if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
 972		tmp  = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
 
 973		tmp *= (uint)sizeof(xfs_da_node_entry_t);
 974		memmove(btree, btree + 1, tmp);
 975		xfs_da_log_buf(state->args->trans, drop_blk->bp,
 976		    XFS_DA_LOGRANGE(node, btree, tmp));
 977		btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
 978	}
 979	memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
 980	xfs_da_log_buf(state->args->trans, drop_blk->bp,
 981	    XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
 982	be16_add_cpu(&node->hdr.count, -1);
 983	xfs_da_log_buf(state->args->trans, drop_blk->bp,
 984	    XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
 
 985
 986	/*
 987	 * Copy the last hash value from the block to propagate upwards.
 988	 */
 989	btree--;
 990	drop_blk->hashval = be32_to_cpu(btree->hashval);
 991}
 992
 993/*
 994 * Unbalance the btree elements between two intermediate nodes,
 995 * move all Btree elements from one node into another.
 996 */
 997STATIC void
 998xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
 999				     xfs_da_state_blk_t *save_blk)
1000{
1001	xfs_da_intnode_t *drop_node, *save_node;
1002	xfs_da_node_entry_t *btree;
1003	int tmp;
1004	xfs_trans_t *tp;
 
 
 
 
 
 
 
 
1005
1006	trace_xfs_da_node_unbalance(state->args);
1007
1008	drop_node = drop_blk->bp->data;
1009	save_node = save_blk->bp->data;
1010	ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1011	ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 
 
1012	tp = state->args->trans;
1013
1014	/*
1015	 * If the dying block has lower hashvals, then move all the
1016	 * elements in the remaining block up to make a hole.
1017	 */
1018	if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
1019	    (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
1020	     be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
1021	{
1022		btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1023		tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1024		memmove(btree, &save_node->btree[0], tmp);
1025		btree = &save_node->btree[0];
1026		xfs_da_log_buf(tp, save_blk->bp,
1027			XFS_DA_LOGRANGE(save_node, btree,
1028				(be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1029				sizeof(xfs_da_node_entry_t)));
 
1030	} else {
1031		btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1032		xfs_da_log_buf(tp, save_blk->bp,
1033			XFS_DA_LOGRANGE(save_node, btree,
1034				be16_to_cpu(drop_node->hdr.count) *
1035				sizeof(xfs_da_node_entry_t)));
1036	}
1037
1038	/*
1039	 * Move all the B-tree elements from drop_blk to save_blk.
1040	 */
1041	tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1042	memcpy(btree, &drop_node->btree[0], tmp);
1043	be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1044
1045	xfs_da_log_buf(tp, save_blk->bp,
 
1046		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1047			sizeof(save_node->hdr)));
1048
1049	/*
1050	 * Save the last hashval in the remaining block for upward propagation.
1051	 */
1052	save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1053}
1054
1055/*========================================================================
1056 * Routines used for finding things in the Btree.
1057 *========================================================================*/
1058
1059/*
1060 * Walk down the Btree looking for a particular filename, filling
1061 * in the state structure as we go.
1062 *
1063 * We will set the state structure to point to each of the elements
1064 * in each of the nodes where either the hashval is or should be.
1065 *
1066 * We support duplicate hashval's so for each entry in the current
1067 * node that could contain the desired hashval, descend.  This is a
1068 * pruned depth-first tree search.
1069 */
1070int							/* error */
1071xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1072{
1073	xfs_da_state_blk_t *blk;
1074	xfs_da_blkinfo_t *curr;
1075	xfs_da_intnode_t *node;
1076	xfs_da_node_entry_t *btree;
1077	xfs_dablk_t blkno;
1078	int probe, span, max, error, retval;
1079	xfs_dahash_t hashval, btreehashval;
1080	xfs_da_args_t *args;
 
 
 
 
 
 
 
 
 
1081
1082	args = state->args;
1083
1084	/*
1085	 * Descend thru the B-tree searching each level for the right
1086	 * node to use, until the right hashval is found.
1087	 */
1088	blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1089	for (blk = &state->path.blk[0], state->path.active = 1;
1090			 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1091			 blk++, state->path.active++) {
1092		/*
1093		 * Read the next node down in the tree.
1094		 */
1095		blk->blkno = blkno;
1096		error = xfs_da_read_buf(args->trans, args->dp, blkno,
1097					-1, &blk->bp, args->whichfork);
1098		if (error) {
1099			blk->blkno = 0;
1100			state->path.active--;
1101			return(error);
1102		}
1103		curr = blk->bp->data;
1104		blk->magic = be16_to_cpu(curr->magic);
1105		ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1106		       blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1107		       blk->magic == XFS_ATTR_LEAF_MAGIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108
1109		/*
1110		 * Search an intermediate node for a match.
1111		 */
1112		if (blk->magic == XFS_DA_NODE_MAGIC) {
1113			node = blk->bp->data;
1114			max = be16_to_cpu(node->hdr.count);
1115			blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1116
1117			/*
1118			 * Binary search.  (note: small blocks will skip loop)
1119			 */
1120			probe = span = max / 2;
1121			hashval = args->hashval;
1122			for (btree = &node->btree[probe]; span > 4;
1123				   btree = &node->btree[probe]) {
1124				span /= 2;
1125				btreehashval = be32_to_cpu(btree->hashval);
1126				if (btreehashval < hashval)
1127					probe += span;
1128				else if (btreehashval > hashval)
1129					probe -= span;
1130				else
1131					break;
1132			}
1133			ASSERT((probe >= 0) && (probe < max));
1134			ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1135
1136			/*
1137			 * Since we may have duplicate hashval's, find the first
1138			 * matching hashval in the node.
1139			 */
1140			while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1141				btree--;
1142				probe--;
1143			}
1144			while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1145				btree++;
1146				probe++;
1147			}
 
 
 
 
 
 
1148
1149			/*
1150			 * Pick the right block to descend on.
1151			 */
1152			if (probe == max) {
1153				blk->index = max-1;
1154				blkno = be32_to_cpu(node->btree[max-1].before);
1155			} else {
1156				blk->index = probe;
1157				blkno = be32_to_cpu(btree->before);
1158			}
1159		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1160			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1161			break;
1162		} else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1163			blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1164			break;
 
 
 
 
 
 
1165		}
1166	}
1167
1168	/*
1169	 * A leaf block that ends in the hashval that we are interested in
1170	 * (final hashval == search hashval) means that the next block may
1171	 * contain more entries with the same hashval, shift upward to the
1172	 * next leaf and keep searching.
1173	 */
1174	for (;;) {
1175		if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1176			retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1177							&blk->index, state);
1178		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1179			retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1180			blk->index = args->index;
1181			args->blkno = blk->blkno;
1182		} else {
1183			ASSERT(0);
1184			return XFS_ERROR(EFSCORRUPTED);
1185		}
1186		if (((retval == ENOENT) || (retval == ENOATTR)) &&
1187		    (blk->hashval == args->hashval)) {
1188			error = xfs_da_path_shift(state, &state->path, 1, 1,
1189							 &retval);
1190			if (error)
1191				return(error);
1192			if (retval == 0) {
1193				continue;
1194			} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1195				/* path_shift() gives ENOENT */
1196				retval = XFS_ERROR(ENOATTR);
1197			}
1198		}
1199		break;
1200	}
1201	*result = retval;
1202	return(0);
1203}
1204
1205/*========================================================================
1206 * Utility routines.
1207 *========================================================================*/
1208
1209/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210 * Link a new block into a doubly linked list of blocks (of whatever type).
1211 */
1212int							/* error */
1213xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1214			       xfs_da_state_blk_t *new_blk)
1215{
1216	xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1217	xfs_da_args_t *args;
1218	int before=0, error;
1219	xfs_dabuf_t *bp;
 
 
 
 
 
 
1220
1221	/*
1222	 * Set up environment.
1223	 */
1224	args = state->args;
1225	ASSERT(args != NULL);
1226	old_info = old_blk->bp->data;
1227	new_info = new_blk->bp->data;
1228	ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1229	       old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1230	       old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1231	ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1232	ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1233	ASSERT(old_blk->magic == new_blk->magic);
1234
1235	switch (old_blk->magic) {
1236	case XFS_ATTR_LEAF_MAGIC:
1237		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1238		break;
1239	case XFS_DIR2_LEAFN_MAGIC:
1240		before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1241		break;
1242	case XFS_DA_NODE_MAGIC:
1243		before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1244		break;
1245	}
1246
1247	/*
1248	 * Link blocks in appropriate order.
1249	 */
1250	if (before) {
1251		/*
1252		 * Link new block in before existing block.
1253		 */
1254		trace_xfs_da_link_before(args);
1255		new_info->forw = cpu_to_be32(old_blk->blkno);
1256		new_info->back = old_info->back;
1257		if (old_info->back) {
1258			error = xfs_da_read_buf(args->trans, args->dp,
1259						be32_to_cpu(old_info->back),
1260						-1, &bp, args->whichfork);
1261			if (error)
1262				return(error);
1263			ASSERT(bp != NULL);
1264			tmp_info = bp->data;
1265			ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1266			ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1267			tmp_info->forw = cpu_to_be32(new_blk->blkno);
1268			xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1269			xfs_da_buf_done(bp);
1270		}
1271		old_info->back = cpu_to_be32(new_blk->blkno);
1272	} else {
1273		/*
1274		 * Link new block in after existing block.
1275		 */
1276		trace_xfs_da_link_after(args);
1277		new_info->forw = old_info->forw;
1278		new_info->back = cpu_to_be32(old_blk->blkno);
1279		if (old_info->forw) {
1280			error = xfs_da_read_buf(args->trans, args->dp,
1281						be32_to_cpu(old_info->forw),
1282						-1, &bp, args->whichfork);
1283			if (error)
1284				return(error);
1285			ASSERT(bp != NULL);
1286			tmp_info = bp->data;
1287			ASSERT(tmp_info->magic == old_info->magic);
1288			ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1289			tmp_info->back = cpu_to_be32(new_blk->blkno);
1290			xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1291			xfs_da_buf_done(bp);
1292		}
1293		old_info->forw = cpu_to_be32(new_blk->blkno);
1294	}
1295
1296	xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1297	xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1298	return(0);
1299}
1300
1301/*
1302 * Compare two intermediate nodes for "order".
1303 */
1304STATIC int
1305xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1306{
1307	xfs_da_intnode_t *node1, *node2;
1308
1309	node1 = node1_bp->data;
1310	node2 = node2_bp->data;
1311	ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1312	       node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1313	if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1314	    ((be32_to_cpu(node2->btree[0].hashval) <
1315	      be32_to_cpu(node1->btree[0].hashval)) ||
1316	     (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1317	      be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1318		return(1);
1319	}
1320	return(0);
1321}
1322
1323/*
1324 * Pick up the last hashvalue from an intermediate node.
1325 */
1326STATIC uint
1327xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1328{
1329	xfs_da_intnode_t *node;
1330
1331	node = bp->data;
1332	ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1333	if (count)
1334		*count = be16_to_cpu(node->hdr.count);
1335	if (!node->hdr.count)
1336		return(0);
1337	return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1338}
1339
1340/*
1341 * Unlink a block from a doubly linked list of blocks.
1342 */
1343STATIC int						/* error */
1344xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1345				 xfs_da_state_blk_t *save_blk)
1346{
1347	xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1348	xfs_da_args_t *args;
1349	xfs_dabuf_t *bp;
1350	int error;
 
 
 
 
1351
1352	/*
1353	 * Set up environment.
1354	 */
1355	args = state->args;
1356	ASSERT(args != NULL);
1357	save_info = save_blk->bp->data;
1358	drop_info = drop_blk->bp->data;
1359	ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1360	       save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1361	       save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1362	ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1363	ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1364	ASSERT(save_blk->magic == drop_blk->magic);
1365	ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1366	       (be32_to_cpu(save_info->back) == drop_blk->blkno));
1367	ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1368	       (be32_to_cpu(drop_info->back) == save_blk->blkno));
1369
1370	/*
1371	 * Unlink the leaf block from the doubly linked chain of leaves.
1372	 */
1373	if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1374		trace_xfs_da_unlink_back(args);
1375		save_info->back = drop_info->back;
1376		if (drop_info->back) {
1377			error = xfs_da_read_buf(args->trans, args->dp,
1378						be32_to_cpu(drop_info->back),
1379						-1, &bp, args->whichfork);
1380			if (error)
1381				return(error);
1382			ASSERT(bp != NULL);
1383			tmp_info = bp->data;
1384			ASSERT(tmp_info->magic == save_info->magic);
1385			ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1386			tmp_info->forw = cpu_to_be32(save_blk->blkno);
1387			xfs_da_log_buf(args->trans, bp, 0,
1388						    sizeof(*tmp_info) - 1);
1389			xfs_da_buf_done(bp);
1390		}
1391	} else {
1392		trace_xfs_da_unlink_forward(args);
1393		save_info->forw = drop_info->forw;
1394		if (drop_info->forw) {
1395			error = xfs_da_read_buf(args->trans, args->dp,
1396						be32_to_cpu(drop_info->forw),
1397						-1, &bp, args->whichfork);
1398			if (error)
1399				return(error);
1400			ASSERT(bp != NULL);
1401			tmp_info = bp->data;
1402			ASSERT(tmp_info->magic == save_info->magic);
1403			ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1404			tmp_info->back = cpu_to_be32(save_blk->blkno);
1405			xfs_da_log_buf(args->trans, bp, 0,
1406						    sizeof(*tmp_info) - 1);
1407			xfs_da_buf_done(bp);
1408		}
1409	}
1410
1411	xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1412	return(0);
1413}
1414
1415/*
1416 * Move a path "forward" or "!forward" one block at the current level.
1417 *
1418 * This routine will adjust a "path" to point to the next block
1419 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1420 * Btree, including updating pointers to the intermediate nodes between
1421 * the new bottom and the root.
1422 */
1423int							/* error */
1424xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1425				 int forward, int release, int *result)
1426{
1427	xfs_da_state_blk_t *blk;
1428	xfs_da_blkinfo_t *info;
1429	xfs_da_intnode_t *node;
1430	xfs_da_args_t *args;
1431	xfs_dablk_t blkno=0;
1432	int level, error;
 
 
 
 
 
 
 
 
 
 
1433
1434	/*
1435	 * Roll up the Btree looking for the first block where our
1436	 * current index is not at the edge of the block.  Note that
1437	 * we skip the bottom layer because we want the sibling block.
1438	 */
1439	args = state->args;
1440	ASSERT(args != NULL);
1441	ASSERT(path != NULL);
1442	ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1443	level = (path->active-1) - 1;	/* skip bottom layer in path */
1444	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1445		ASSERT(blk->bp != NULL);
1446		node = blk->bp->data;
1447		ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1448		if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
 
1449			blk->index++;
1450			blkno = be32_to_cpu(node->btree[blk->index].before);
1451			break;
1452		} else if (!forward && (blk->index > 0)) {
1453			blk->index--;
1454			blkno = be32_to_cpu(node->btree[blk->index].before);
1455			break;
1456		}
1457	}
1458	if (level < 0) {
1459		*result = XFS_ERROR(ENOENT);	/* we're out of our tree */
1460		ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1461		return(0);
1462	}
1463
1464	/*
1465	 * Roll down the edge of the subtree until we reach the
1466	 * same depth we were at originally.
1467	 */
1468	for (blk++, level++; level < path->active; blk++, level++) {
1469		/*
1470		 * Release the old block.
1471		 * (if it's dirty, trans won't actually let go)
1472		 */
1473		if (release)
1474			xfs_da_brelse(args->trans, blk->bp);
1475
1476		/*
1477		 * Read the next child block.
1478		 */
1479		blk->blkno = blkno;
1480		error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
1481						     &blk->bp, args->whichfork);
1482		if (error)
1483			return(error);
1484		ASSERT(blk->bp != NULL);
1485		info = blk->bp->data;
1486		ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
 
1487		       info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1488		       info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1489		blk->magic = be16_to_cpu(info->magic);
1490		if (blk->magic == XFS_DA_NODE_MAGIC) {
 
 
 
 
 
 
 
 
 
 
1491			node = (xfs_da_intnode_t *)info;
1492			blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
 
 
1493			if (forward)
1494				blk->index = 0;
1495			else
1496				blk->index = be16_to_cpu(node->hdr.count)-1;
1497			blkno = be32_to_cpu(node->btree[blk->index].before);
1498		} else {
 
 
 
1499			ASSERT(level == path->active-1);
1500			blk->index = 0;
1501			switch(blk->magic) {
1502			case XFS_ATTR_LEAF_MAGIC:
1503				blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1504								      NULL);
1505				break;
1506			case XFS_DIR2_LEAFN_MAGIC:
1507				blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1508								       NULL);
1509				break;
1510			default:
1511				ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1512				       blk->magic == XFS_DIR2_LEAFN_MAGIC);
1513				break;
1514			}
1515		}
1516	}
1517	*result = 0;
1518	return(0);
1519}
1520
1521
1522/*========================================================================
1523 * Utility routines.
1524 *========================================================================*/
1525
1526/*
1527 * Implement a simple hash on a character string.
1528 * Rotate the hash value by 7 bits, then XOR each character in.
1529 * This is implemented with some source-level loop unrolling.
1530 */
1531xfs_dahash_t
1532xfs_da_hashname(const __uint8_t *name, int namelen)
1533{
1534	xfs_dahash_t hash;
1535
1536	/*
1537	 * Do four characters at a time as long as we can.
1538	 */
1539	for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1540		hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1541		       (name[3] << 0) ^ rol32(hash, 7 * 4);
1542
1543	/*
1544	 * Now do the rest of the characters.
1545	 */
1546	switch (namelen) {
1547	case 3:
1548		return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1549		       rol32(hash, 7 * 3);
1550	case 2:
1551		return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1552	case 1:
1553		return (name[0] << 0) ^ rol32(hash, 7 * 1);
1554	default: /* case 0: */
1555		return hash;
1556	}
1557}
1558
1559enum xfs_dacmp
1560xfs_da_compname(
1561	struct xfs_da_args *args,
1562	const unsigned char *name,
1563	int		len)
1564{
1565	return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1566					XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1567}
1568
1569static xfs_dahash_t
1570xfs_default_hashname(
1571	struct xfs_name	*name)
1572{
1573	return xfs_da_hashname(name->name, name->len);
1574}
1575
1576const struct xfs_nameops xfs_default_nameops = {
1577	.hashname	= xfs_default_hashname,
1578	.compname	= xfs_da_compname
1579};
1580
1581int
1582xfs_da_grow_inode_int(
1583	struct xfs_da_args	*args,
1584	xfs_fileoff_t		*bno,
1585	int			count)
1586{
1587	struct xfs_trans	*tp = args->trans;
1588	struct xfs_inode	*dp = args->dp;
1589	int			w = args->whichfork;
1590	xfs_drfsbno_t		nblks = dp->i_d.di_nblocks;
1591	struct xfs_bmbt_irec	map, *mapp;
1592	int			nmap, error, got, i, mapi;
1593
1594	/*
1595	 * Find a spot in the file space to put the new block.
1596	 */
1597	error = xfs_bmap_first_unused(tp, dp, count, bno, w);
1598	if (error)
1599		return error;
1600
1601	/*
1602	 * Try mapping it in one filesystem block.
1603	 */
1604	nmap = 1;
1605	ASSERT(args->firstblock != NULL);
1606	error = xfs_bmapi_write(tp, dp, *bno, count,
1607			xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
1608			args->firstblock, args->total, &map, &nmap,
1609			args->flist);
1610	if (error)
1611		return error;
1612
1613	ASSERT(nmap <= 1);
1614	if (nmap == 1) {
1615		mapp = &map;
1616		mapi = 1;
1617	} else if (nmap == 0 && count > 1) {
1618		xfs_fileoff_t		b;
1619		int			c;
1620
1621		/*
1622		 * If we didn't get it and the block might work if fragmented,
1623		 * try without the CONTIG flag.  Loop until we get it all.
1624		 */
1625		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1626		for (b = *bno, mapi = 0; b < *bno + count; ) {
1627			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1628			c = (int)(*bno + count - b);
1629			error = xfs_bmapi_write(tp, dp, b, c,
1630					xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1631					args->firstblock, args->total,
1632					&mapp[mapi], &nmap, args->flist);
1633			if (error)
1634				goto out_free_map;
1635			if (nmap < 1)
1636				break;
1637			mapi += nmap;
1638			b = mapp[mapi - 1].br_startoff +
1639			    mapp[mapi - 1].br_blockcount;
1640		}
1641	} else {
1642		mapi = 0;
1643		mapp = NULL;
1644	}
1645
1646	/*
1647	 * Count the blocks we got, make sure it matches the total.
1648	 */
1649	for (i = 0, got = 0; i < mapi; i++)
1650		got += mapp[i].br_blockcount;
1651	if (got != count || mapp[0].br_startoff != *bno ||
1652	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1653	    *bno + count) {
1654		error = XFS_ERROR(ENOSPC);
1655		goto out_free_map;
1656	}
1657
1658	/* account for newly allocated blocks in reserved blocks total */
1659	args->total -= dp->i_d.di_nblocks - nblks;
1660
1661out_free_map:
1662	if (mapp != &map)
1663		kmem_free(mapp);
1664	return error;
1665}
1666
1667/*
1668 * Add a block to the btree ahead of the file.
1669 * Return the new block number to the caller.
1670 */
1671int
1672xfs_da_grow_inode(
1673	struct xfs_da_args	*args,
1674	xfs_dablk_t		*new_blkno)
1675{
1676	xfs_fileoff_t		bno;
1677	int			count;
1678	int			error;
1679
1680	trace_xfs_da_grow_inode(args);
1681
1682	if (args->whichfork == XFS_DATA_FORK) {
1683		bno = args->dp->i_mount->m_dirleafblk;
1684		count = args->dp->i_mount->m_dirblkfsbs;
1685	} else {
1686		bno = 0;
1687		count = 1;
1688	}
1689
1690	error = xfs_da_grow_inode_int(args, &bno, count);
1691	if (!error)
1692		*new_blkno = (xfs_dablk_t)bno;
1693	return error;
1694}
1695
1696/*
1697 * Ick.  We need to always be able to remove a btree block, even
1698 * if there's no space reservation because the filesystem is full.
1699 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1700 * It swaps the target block with the last block in the file.  The
1701 * last block in the file can always be removed since it can't cause
1702 * a bmap btree split to do that.
1703 */
1704STATIC int
1705xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1706		      xfs_dabuf_t **dead_bufp)
 
 
1707{
1708	xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1709	xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
1710	xfs_fileoff_t lastoff;
1711	xfs_inode_t *ip;
1712	xfs_trans_t *tp;
1713	xfs_mount_t *mp;
1714	int error, w, entno, level, dead_level;
1715	xfs_da_blkinfo_t *dead_info, *sib_info;
1716	xfs_da_intnode_t *par_node, *dead_node;
1717	xfs_dir2_leaf_t *dead_leaf2;
1718	xfs_dahash_t dead_hash;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1719
1720	trace_xfs_da_swap_lastblock(args);
1721
1722	dead_buf = *dead_bufp;
1723	dead_blkno = *dead_blknop;
1724	tp = args->trans;
1725	ip = args->dp;
1726	w = args->whichfork;
1727	ASSERT(w == XFS_DATA_FORK);
1728	mp = ip->i_mount;
1729	lastoff = mp->m_dirfreeblk;
1730	error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1731	if (error)
1732		return error;
1733	if (unlikely(lastoff == 0)) {
1734		XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1735				 mp);
1736		return XFS_ERROR(EFSCORRUPTED);
1737	}
1738	/*
1739	 * Read the last block in the btree space.
1740	 */
1741	last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1742	if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
 
1743		return error;
1744	/*
1745	 * Copy the last block into the dead buffer and log it.
1746	 */
1747	memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
1748	xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1749	dead_info = dead_buf->data;
1750	/*
1751	 * Get values from the moved block.
1752	 */
1753	if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
 
 
 
 
1754		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
 
 
1755		dead_level = 0;
1756		dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1757	} else {
1758		ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
 
1759		dead_node = (xfs_da_intnode_t *)dead_info;
1760		dead_level = be16_to_cpu(dead_node->hdr.level);
1761		dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
 
 
1762	}
1763	sib_buf = par_buf = NULL;
1764	/*
1765	 * If the moved block has a left sibling, fix up the pointers.
1766	 */
1767	if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1768		if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
 
1769			goto done;
1770		sib_info = sib_buf->data;
1771		if (unlikely(
1772		    be32_to_cpu(sib_info->forw) != last_blkno ||
1773		    sib_info->magic != dead_info->magic)) {
1774			XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1775					 XFS_ERRLEVEL_LOW, mp);
1776			error = XFS_ERROR(EFSCORRUPTED);
1777			goto done;
1778		}
1779		sib_info->forw = cpu_to_be32(dead_blkno);
1780		xfs_da_log_buf(tp, sib_buf,
1781			XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1782					sizeof(sib_info->forw)));
1783		xfs_da_buf_done(sib_buf);
1784		sib_buf = NULL;
1785	}
1786	/*
1787	 * If the moved block has a right sibling, fix up the pointers.
1788	 */
1789	if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1790		if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
 
1791			goto done;
1792		sib_info = sib_buf->data;
1793		if (unlikely(
1794		       be32_to_cpu(sib_info->back) != last_blkno ||
1795		       sib_info->magic != dead_info->magic)) {
1796			XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1797					 XFS_ERRLEVEL_LOW, mp);
1798			error = XFS_ERROR(EFSCORRUPTED);
1799			goto done;
1800		}
1801		sib_info->back = cpu_to_be32(dead_blkno);
1802		xfs_da_log_buf(tp, sib_buf,
1803			XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1804					sizeof(sib_info->back)));
1805		xfs_da_buf_done(sib_buf);
1806		sib_buf = NULL;
1807	}
1808	par_blkno = mp->m_dirleafblk;
1809	level = -1;
1810	/*
1811	 * Walk down the tree looking for the parent of the moved block.
1812	 */
1813	for (;;) {
1814		if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
 
1815			goto done;
1816		par_node = par_buf->data;
1817		if (unlikely(par_node->hdr.info.magic !=
1818		    cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1819		    (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1820			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1821					 XFS_ERRLEVEL_LOW, mp);
1822			error = XFS_ERROR(EFSCORRUPTED);
1823			goto done;
1824		}
1825		level = be16_to_cpu(par_node->hdr.level);
 
1826		for (entno = 0;
1827		     entno < be16_to_cpu(par_node->hdr.count) &&
1828		     be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1829		     entno++)
1830			continue;
1831		if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1832			XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1833					 XFS_ERRLEVEL_LOW, mp);
1834			error = XFS_ERROR(EFSCORRUPTED);
1835			goto done;
1836		}
1837		par_blkno = be32_to_cpu(par_node->btree[entno].before);
1838		if (level == dead_level + 1)
1839			break;
1840		xfs_da_brelse(tp, par_buf);
1841		par_buf = NULL;
1842	}
1843	/*
1844	 * We're in the right parent block.
1845	 * Look for the right entry.
1846	 */
1847	for (;;) {
1848		for (;
1849		     entno < be16_to_cpu(par_node->hdr.count) &&
1850		     be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1851		     entno++)
1852			continue;
1853		if (entno < be16_to_cpu(par_node->hdr.count))
1854			break;
1855		par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1856		xfs_da_brelse(tp, par_buf);
1857		par_buf = NULL;
1858		if (unlikely(par_blkno == 0)) {
1859			XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1860					 XFS_ERRLEVEL_LOW, mp);
1861			error = XFS_ERROR(EFSCORRUPTED);
1862			goto done;
1863		}
1864		if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
 
1865			goto done;
1866		par_node = par_buf->data;
1867		if (unlikely(
1868		    be16_to_cpu(par_node->hdr.level) != level ||
1869		    par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
1870			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1871					 XFS_ERRLEVEL_LOW, mp);
1872			error = XFS_ERROR(EFSCORRUPTED);
1873			goto done;
1874		}
 
1875		entno = 0;
1876	}
1877	/*
1878	 * Update the parent entry pointing to the moved block.
1879	 */
1880	par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1881	xfs_da_log_buf(tp, par_buf,
1882		XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1883				sizeof(par_node->btree[entno].before)));
1884	xfs_da_buf_done(par_buf);
1885	xfs_da_buf_done(dead_buf);
1886	*dead_blknop = last_blkno;
1887	*dead_bufp = last_buf;
1888	return 0;
1889done:
1890	if (par_buf)
1891		xfs_da_brelse(tp, par_buf);
1892	if (sib_buf)
1893		xfs_da_brelse(tp, sib_buf);
1894	xfs_da_brelse(tp, last_buf);
1895	return error;
1896}
1897
1898/*
1899 * Remove a btree block from a directory or attribute.
1900 */
1901int
1902xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1903		    xfs_dabuf_t *dead_buf)
 
 
1904{
1905	xfs_inode_t *dp;
1906	int done, error, w, count;
1907	xfs_trans_t *tp;
1908	xfs_mount_t *mp;
1909
1910	trace_xfs_da_shrink_inode(args);
1911
1912	dp = args->dp;
1913	w = args->whichfork;
1914	tp = args->trans;
1915	mp = dp->i_mount;
1916	if (w == XFS_DATA_FORK)
1917		count = mp->m_dirblkfsbs;
1918	else
1919		count = 1;
1920	for (;;) {
1921		/*
1922		 * Remove extents.  If we get ENOSPC for a dir we have to move
1923		 * the last block to the place we want to kill.
1924		 */
1925		if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1926				xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1927				0, args->firstblock, args->flist,
1928				&done)) == ENOSPC) {
1929			if (w != XFS_DATA_FORK)
1930				break;
1931			if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
1932					&dead_buf)))
 
1933				break;
1934		} else {
1935			break;
1936		}
1937	}
1938	xfs_da_binval(tp, dead_buf);
1939	return error;
1940}
1941
1942/*
1943 * See if the mapping(s) for this btree block are valid, i.e.
1944 * don't contain holes, are logically contiguous, and cover the whole range.
1945 */
1946STATIC int
1947xfs_da_map_covers_blocks(
1948	int		nmap,
1949	xfs_bmbt_irec_t	*mapp,
1950	xfs_dablk_t	bno,
1951	int		count)
1952{
1953	int		i;
1954	xfs_fileoff_t	off;
1955
1956	for (i = 0, off = bno; i < nmap; i++) {
1957		if (mapp[i].br_startblock == HOLESTARTBLOCK ||
1958		    mapp[i].br_startblock == DELAYSTARTBLOCK) {
1959			return 0;
1960		}
1961		if (off != mapp[i].br_startoff) {
1962			return 0;
1963		}
1964		off += mapp[i].br_blockcount;
1965	}
1966	return off == bno + count;
1967}
1968
1969/*
1970 * Make a dabuf.
1971 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1972 */
1973STATIC int
1974xfs_da_do_buf(
1975	xfs_trans_t	*trans,
1976	xfs_inode_t	*dp,
1977	xfs_dablk_t	bno,
1978	xfs_daddr_t	*mappedbnop,
1979	xfs_dabuf_t	**bpp,
1980	int		whichfork,
1981	int		caller)
1982{
1983	xfs_buf_t	*bp = NULL;
1984	xfs_buf_t	**bplist;
1985	int		error=0;
1986	int		i;
1987	xfs_bmbt_irec_t	map;
1988	xfs_bmbt_irec_t	*mapp;
1989	xfs_daddr_t	mappedbno;
1990	xfs_mount_t	*mp;
1991	int		nbplist=0;
1992	int		nfsb;
1993	int		nmap;
1994	xfs_dabuf_t	*rbp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1995
1996	mp = dp->i_mount;
1997	nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1998	mappedbno = *mappedbnop;
1999	/*
2000	 * Caller doesn't have a mapping.  -2 means don't complain
2001	 * if we land in a hole.
2002	 */
2003	if (mappedbno == -1 || mappedbno == -2) {
2004		/*
2005		 * Optimize the one-block case.
2006		 */
2007		if (nfsb == 1)
2008			mapp = &map;
2009		else
2010			mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
2011
2012		nmap = nfsb;
2013		error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, mapp,
2014				       &nmap, xfs_bmapi_aflag(whichfork));
2015		if (error)
2016			goto exit0;
2017	} else {
2018		map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2019		map.br_startoff = (xfs_fileoff_t)bno;
2020		map.br_blockcount = nfsb;
2021		mapp = &map;
2022		nmap = 1;
2023	}
2024	if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
2025		error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
 
2026		if (unlikely(error == EFSCORRUPTED)) {
2027			if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
 
2028				xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2029					__func__, (long long)bno,
2030					(long long)dp->i_ino);
2031				for (i = 0; i < nmap; i++) {
2032					xfs_alert(mp,
2033"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2034						i,
2035						(long long)mapp[i].br_startoff,
2036						(long long)mapp[i].br_startblock,
2037						(long long)mapp[i].br_blockcount,
2038						mapp[i].br_state);
2039				}
2040			}
2041			XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2042					 XFS_ERRLEVEL_LOW, mp);
2043		}
2044		goto exit0;
2045	}
2046	if (caller != 3 && nmap > 1) {
2047		bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
2048		nbplist = 0;
2049	} else
2050		bplist = NULL;
2051	/*
2052	 * Turn the mapping(s) into buffer(s).
2053	 */
2054	for (i = 0; i < nmap; i++) {
2055		int	nmapped;
2056
2057		mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
2058		if (i == 0)
2059			*mappedbnop = mappedbno;
2060		nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
2061		switch (caller) {
2062		case 0:
2063			bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
2064				mappedbno, nmapped, 0);
2065			error = bp ? bp->b_error : XFS_ERROR(EIO);
2066			break;
2067		case 1:
2068		case 2:
2069			bp = NULL;
2070			error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
2071				mappedbno, nmapped, 0, &bp);
2072			break;
2073		case 3:
2074			xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
2075			error = 0;
2076			bp = NULL;
2077			break;
2078		}
2079		if (error) {
2080			if (bp)
2081				xfs_trans_brelse(trans, bp);
2082			goto exit1;
2083		}
2084		if (!bp)
2085			continue;
2086		if (caller == 1) {
2087			if (whichfork == XFS_ATTR_FORK)
2088				xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2089			else
2090				xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2091		}
2092		if (bplist) {
2093			bplist[nbplist++] = bp;
2094		}
2095	}
2096	/*
2097	 * Build a dabuf structure.
2098	 */
2099	if (bplist) {
2100		rbp = xfs_da_buf_make(nbplist, bplist);
2101	} else if (bp)
2102		rbp = xfs_da_buf_make(1, &bp);
2103	else
2104		rbp = NULL;
2105	/*
2106	 * For read_buf, check the magic number.
2107	 */
2108	if (caller == 1) {
2109		xfs_dir2_data_hdr_t	*hdr = rbp->data;
2110		xfs_dir2_free_t		*free = rbp->data;
2111		xfs_da_blkinfo_t	*info = rbp->data;
2112		uint			magic, magic1;
2113
2114		magic = be16_to_cpu(info->magic);
2115		magic1 = be32_to_cpu(hdr->magic);
2116		if (unlikely(
2117		    XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2118				   (magic != XFS_ATTR_LEAF_MAGIC) &&
2119				   (magic != XFS_DIR2_LEAF1_MAGIC) &&
2120				   (magic != XFS_DIR2_LEAFN_MAGIC) &&
2121				   (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2122				   (magic1 != XFS_DIR2_DATA_MAGIC) &&
2123				   (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2124				mp, XFS_ERRTAG_DA_READ_BUF,
2125				XFS_RANDOM_DA_READ_BUF))) {
2126			trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2127			XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2128					     XFS_ERRLEVEL_LOW, mp, info);
2129			error = XFS_ERROR(EFSCORRUPTED);
2130			xfs_da_brelse(trans, rbp);
2131			nbplist = 0;
2132			goto exit1;
2133		}
2134	}
2135	if (bplist) {
2136		kmem_free(bplist);
2137	}
2138	if (mapp != &map) {
2139		kmem_free(mapp);
2140	}
2141	if (bpp)
2142		*bpp = rbp;
2143	return 0;
2144exit1:
2145	if (bplist) {
2146		for (i = 0; i < nbplist; i++)
2147			xfs_trans_brelse(trans, bplist[i]);
2148		kmem_free(bplist);
2149	}
2150exit0:
2151	if (mapp != &map)
2152		kmem_free(mapp);
2153	if (bpp)
2154		*bpp = NULL;
2155	return error;
2156}
2157
2158/*
2159 * Get a buffer for the dir/attr block.
2160 */
2161int
2162xfs_da_get_buf(
2163	xfs_trans_t	*trans,
2164	xfs_inode_t	*dp,
2165	xfs_dablk_t	bno,
2166	xfs_daddr_t		mappedbno,
2167	xfs_dabuf_t	**bpp,
2168	int		whichfork)
2169{
2170	return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
2171}
2172
2173/*
2174 * Get a buffer for the dir/attr block, fill in the contents.
2175 */
2176int
2177xfs_da_read_buf(
2178	xfs_trans_t	*trans,
2179	xfs_inode_t	*dp,
2180	xfs_dablk_t	bno,
2181	xfs_daddr_t		mappedbno,
2182	xfs_dabuf_t	**bpp,
2183	int		whichfork)
2184{
2185	return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
2186}
2187
2188/*
2189 * Readahead the dir/attr block.
2190 */
2191xfs_daddr_t
2192xfs_da_reada_buf(
2193	xfs_trans_t	*trans,
2194	xfs_inode_t	*dp,
2195	xfs_dablk_t	bno,
2196	int		whichfork)
2197{
2198	xfs_daddr_t		rval;
2199
2200	rval = -1;
2201	if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
2202		return -1;
2203	else
2204		return rval;
2205}
 
 
 
 
 
2206
2207kmem_zone_t *xfs_da_state_zone;	/* anchor for state struct zone */
2208kmem_zone_t *xfs_dabuf_zone;		/* dabuf zone */
 
 
 
 
 
2209
2210/*
2211 * Allocate a dir-state structure.
2212 * We don't put them on the stack since they're large.
2213 */
2214xfs_da_state_t *
2215xfs_da_state_alloc(void)
2216{
2217	return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2218}
2219
2220/*
2221 * Kill the altpath contents of a da-state structure.
2222 */
2223STATIC void
2224xfs_da_state_kill_altpath(xfs_da_state_t *state)
2225{
2226	int	i;
2227
2228	for (i = 0; i < state->altpath.active; i++) {
2229		if (state->altpath.blk[i].bp) {
2230			if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2231				xfs_da_buf_done(state->altpath.blk[i].bp);
2232			state->altpath.blk[i].bp = NULL;
2233		}
2234	}
2235	state->altpath.active = 0;
2236}
2237
2238/*
2239 * Free a da-state structure.
2240 */
2241void
2242xfs_da_state_free(xfs_da_state_t *state)
2243{
2244	int	i;
 
 
 
 
 
 
 
 
 
 
 
2245
2246	xfs_da_state_kill_altpath(state);
2247	for (i = 0; i < state->path.active; i++) {
2248		if (state->path.blk[i].bp)
2249			xfs_da_buf_done(state->path.blk[i].bp);
 
 
 
 
 
 
2250	}
2251	if (state->extravalid && state->extrablk.bp)
2252		xfs_da_buf_done(state->extrablk.bp);
2253#ifdef DEBUG
2254	memset((char *)state, 0, sizeof(*state));
2255#endif /* DEBUG */
2256	kmem_zone_free(xfs_da_state_zone, state);
2257}
2258
2259/*
2260 * Create a dabuf.
2261 */
2262/* ARGSUSED */
2263STATIC xfs_dabuf_t *
2264xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
2265{
2266	xfs_buf_t	*bp;
2267	xfs_dabuf_t	*dabuf;
2268	int		i;
2269	int		off;
2270
2271	if (nbuf == 1)
2272		dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2273	else
2274		dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2275	dabuf->dirty = 0;
2276	if (nbuf == 1) {
2277		dabuf->nbuf = 1;
2278		bp = bps[0];
2279		dabuf->bbcount = bp->b_length;
2280		dabuf->data = bp->b_addr;
2281		dabuf->bps[0] = bp;
2282	} else {
2283		dabuf->nbuf = nbuf;
2284		for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2285			dabuf->bps[i] = bp = bps[i];
2286			dabuf->bbcount += bp->b_length;
2287		}
2288		dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2289		for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
2290			bp = bps[i];
2291			memcpy((char *)dabuf->data + off, bp->b_addr,
2292				BBTOB(bp->b_length));
2293		}
2294	}
2295	return dabuf;
2296}
2297
2298/*
2299 * Un-dirty a dabuf.
2300 */
2301STATIC void
2302xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2303{
2304	xfs_buf_t	*bp;
2305	int		i;
2306	int		off;
 
2307
2308	if (dabuf->dirty) {
2309		ASSERT(dabuf->nbuf > 1);
2310		dabuf->dirty = 0;
2311		for (i = off = 0; i < dabuf->nbuf;
2312				i++, off += BBTOB(bp->b_length)) {
2313			bp = dabuf->bps[i];
2314			memcpy(bp->b_addr, dabuf->data + off,
2315						BBTOB(bp->b_length));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2316		}
2317	}
2318}
2319
2320/*
2321 * Release a dabuf.
2322 */
2323void
2324xfs_da_buf_done(xfs_dabuf_t *dabuf)
2325{
2326	ASSERT(dabuf);
2327	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2328	if (dabuf->dirty)
2329		xfs_da_buf_clean(dabuf);
2330	if (dabuf->nbuf > 1) {
2331		kmem_free(dabuf->data);
2332		kmem_free(dabuf);
2333	} else {
2334		kmem_zone_free(xfs_dabuf_zone, dabuf);
2335	}
2336}
2337
2338/*
2339 * Log transaction from a dabuf.
2340 */
2341void
2342xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2343{
2344	xfs_buf_t	*bp;
2345	uint		f;
2346	int		i;
2347	uint		l;
2348	int		off;
2349
2350	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2351	if (dabuf->nbuf == 1) {
2352		ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
2353		xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2354		return;
2355	}
2356	dabuf->dirty = 1;
2357	ASSERT(first <= last);
2358	for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
2359		bp = dabuf->bps[i];
2360		f = off;
2361		l = f + BBTOB(bp->b_length) - 1;
2362		if (f < first)
2363			f = first;
2364		if (l > last)
2365			l = last;
2366		if (f <= l)
2367			xfs_trans_log_buf(tp, bp, f - off, l - off);
2368		/*
2369		 * B_DONE is set by xfs_trans_log buf.
2370		 * If we don't set it on a new buffer (get not read)
2371		 * then if we don't put anything in the buffer it won't
2372		 * be set, and at commit it it released into the cache,
2373		 * and then a read will fail.
2374		 */
2375		else if (!(XFS_BUF_ISDONE(bp)))
2376		  XFS_BUF_DONE(bp);
2377	}
2378	ASSERT(last < off);
2379}
2380
2381/*
2382 * Release dabuf from a transaction.
2383 * Have to free up the dabuf before the buffers are released,
2384 * since the synchronization on the dabuf is really the lock on the buffer.
2385 */
2386void
2387xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
 
 
 
 
 
 
2388{
2389	xfs_buf_t	*bp;
2390	xfs_buf_t	**bplist;
2391	int		i;
2392	int		nbuf;
2393
2394	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2395	if ((nbuf = dabuf->nbuf) == 1) {
2396		bplist = &bp;
2397		bp = dabuf->bps[0];
2398	} else {
2399		bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2400		memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
 
 
2401	}
2402	xfs_da_buf_done(dabuf);
2403	for (i = 0; i < nbuf; i++)
2404		xfs_trans_brelse(tp, bplist[i]);
2405	if (bplist != &bp)
2406		kmem_free(bplist);
2407}
2408
2409/*
2410 * Invalidate dabuf from a transaction.
2411 */
2412void
2413xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2414{
2415	xfs_buf_t	*bp;
2416	xfs_buf_t	**bplist;
2417	int		i;
2418	int		nbuf;
2419
2420	ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2421	if ((nbuf = dabuf->nbuf) == 1) {
2422		bplist = &bp;
2423		bp = dabuf->bps[0];
2424	} else {
2425		bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2426		memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2427	}
2428	xfs_da_buf_done(dabuf);
2429	for (i = 0; i < nbuf; i++)
2430		xfs_trans_binval(tp, bplist[i]);
2431	if (bplist != &bp)
2432		kmem_free(bplist);
2433}
2434
2435/*
2436 * Get the first daddr from a dabuf.
2437 */
2438xfs_daddr_t
2439xfs_da_blkno(xfs_dabuf_t *dabuf)
2440{
2441	ASSERT(dabuf->nbuf);
2442	ASSERT(dabuf->data);
2443	return XFS_BUF_ADDR(dabuf->bps[0]);
2444}
v3.15
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * Copyright (c) 2013 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_bit.h"
 
 
  26#include "xfs_sb.h"
  27#include "xfs_ag.h"
  28#include "xfs_mount.h"
  29#include "xfs_da_format.h"
  30#include "xfs_da_btree.h"
 
  31#include "xfs_dir2.h"
 
  32#include "xfs_dir2_priv.h"
 
  33#include "xfs_inode.h"
  34#include "xfs_trans.h"
  35#include "xfs_inode_item.h"
  36#include "xfs_alloc.h"
  37#include "xfs_bmap.h"
  38#include "xfs_attr.h"
  39#include "xfs_attr_leaf.h"
  40#include "xfs_error.h"
  41#include "xfs_trace.h"
  42#include "xfs_cksum.h"
  43#include "xfs_buf_item.h"
  44
  45/*
  46 * xfs_da_btree.c
  47 *
  48 * Routines to implement directories as Btrees of hashed names.
  49 */
  50
  51/*========================================================================
  52 * Function prototypes for the kernel.
  53 *========================================================================*/
  54
  55/*
  56 * Routines used for growing the Btree.
  57 */
  58STATIC int xfs_da3_root_split(xfs_da_state_t *state,
  59					    xfs_da_state_blk_t *existing_root,
  60					    xfs_da_state_blk_t *new_child);
  61STATIC int xfs_da3_node_split(xfs_da_state_t *state,
  62					    xfs_da_state_blk_t *existing_blk,
  63					    xfs_da_state_blk_t *split_blk,
  64					    xfs_da_state_blk_t *blk_to_add,
  65					    int treelevel,
  66					    int *result);
  67STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
  68					 xfs_da_state_blk_t *node_blk_1,
  69					 xfs_da_state_blk_t *node_blk_2);
  70STATIC void xfs_da3_node_add(xfs_da_state_t *state,
  71				   xfs_da_state_blk_t *old_node_blk,
  72				   xfs_da_state_blk_t *new_node_blk);
  73
  74/*
  75 * Routines used for shrinking the Btree.
  76 */
  77STATIC int xfs_da3_root_join(xfs_da_state_t *state,
  78					   xfs_da_state_blk_t *root_blk);
  79STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
  80STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
  81					      xfs_da_state_blk_t *drop_blk);
  82STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
  83					 xfs_da_state_blk_t *src_node_blk,
  84					 xfs_da_state_blk_t *dst_node_blk);
  85
  86/*
  87 * Utility routines.
  88 */
  89STATIC int	xfs_da3_blk_unlink(xfs_da_state_t *state,
 
 
 
  90				  xfs_da_state_blk_t *drop_blk,
  91				  xfs_da_state_blk_t *save_blk);
  92
  93
  94kmem_zone_t *xfs_da_state_zone;	/* anchor for state struct zone */
  95
  96/*
  97 * Allocate a dir-state structure.
  98 * We don't put them on the stack since they're large.
  99 */
 100xfs_da_state_t *
 101xfs_da_state_alloc(void)
 102{
 103	return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
 104}
 105
 106/*
 107 * Kill the altpath contents of a da-state structure.
 108 */
 109STATIC void
 110xfs_da_state_kill_altpath(xfs_da_state_t *state)
 111{
 112	int	i;
 113
 114	for (i = 0; i < state->altpath.active; i++)
 115		state->altpath.blk[i].bp = NULL;
 116	state->altpath.active = 0;
 117}
 118
 119/*
 120 * Free a da-state structure.
 121 */
 122void
 123xfs_da_state_free(xfs_da_state_t *state)
 124{
 125	xfs_da_state_kill_altpath(state);
 126#ifdef DEBUG
 127	memset((char *)state, 0, sizeof(*state));
 128#endif /* DEBUG */
 129	kmem_zone_free(xfs_da_state_zone, state);
 130}
 131
 132static bool
 133xfs_da3_node_verify(
 134	struct xfs_buf		*bp)
 135{
 136	struct xfs_mount	*mp = bp->b_target->bt_mount;
 137	struct xfs_da_intnode	*hdr = bp->b_addr;
 138	struct xfs_da3_icnode_hdr ichdr;
 139	const struct xfs_dir_ops *ops;
 140
 141	ops = xfs_dir_get_ops(mp, NULL);
 142
 143	ops->node_hdr_from_disk(&ichdr, hdr);
 144
 145	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 146		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 147
 148		if (ichdr.magic != XFS_DA3_NODE_MAGIC)
 149			return false;
 150
 151		if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
 152			return false;
 153		if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
 154			return false;
 155	} else {
 156		if (ichdr.magic != XFS_DA_NODE_MAGIC)
 157			return false;
 158	}
 159	if (ichdr.level == 0)
 160		return false;
 161	if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
 162		return false;
 163	if (ichdr.count == 0)
 164		return false;
 165
 166	/*
 167	 * we don't know if the node is for and attribute or directory tree,
 168	 * so only fail if the count is outside both bounds
 169	 */
 170	if (ichdr.count > mp->m_dir_node_ents &&
 171	    ichdr.count > mp->m_attr_node_ents)
 172		return false;
 173
 174	/* XXX: hash order check? */
 175
 176	return true;
 177}
 178
 179static void
 180xfs_da3_node_write_verify(
 181	struct xfs_buf	*bp)
 182{
 183	struct xfs_mount	*mp = bp->b_target->bt_mount;
 184	struct xfs_buf_log_item	*bip = bp->b_fspriv;
 185	struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 186
 187	if (!xfs_da3_node_verify(bp)) {
 188		xfs_buf_ioerror(bp, EFSCORRUPTED);
 189		xfs_verifier_error(bp);
 190		return;
 191	}
 192
 193	if (!xfs_sb_version_hascrc(&mp->m_sb))
 194		return;
 195
 196	if (bip)
 197		hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
 198
 199	xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
 200}
 201
 202/*
 203 * leaf/node format detection on trees is sketchy, so a node read can be done on
 204 * leaf level blocks when detection identifies the tree as a node format tree
 205 * incorrectly. In this case, we need to swap the verifier to match the correct
 206 * format of the block being read.
 207 */
 208static void
 209xfs_da3_node_read_verify(
 210	struct xfs_buf		*bp)
 211{
 212	struct xfs_da_blkinfo	*info = bp->b_addr;
 213
 214	switch (be16_to_cpu(info->magic)) {
 215		case XFS_DA3_NODE_MAGIC:
 216			if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
 217				xfs_buf_ioerror(bp, EFSBADCRC);
 218				break;
 219			}
 220			/* fall through */
 221		case XFS_DA_NODE_MAGIC:
 222			if (!xfs_da3_node_verify(bp)) {
 223				xfs_buf_ioerror(bp, EFSCORRUPTED);
 224				break;
 225			}
 226			return;
 227		case XFS_ATTR_LEAF_MAGIC:
 228		case XFS_ATTR3_LEAF_MAGIC:
 229			bp->b_ops = &xfs_attr3_leaf_buf_ops;
 230			bp->b_ops->verify_read(bp);
 231			return;
 232		case XFS_DIR2_LEAFN_MAGIC:
 233		case XFS_DIR3_LEAFN_MAGIC:
 234			bp->b_ops = &xfs_dir3_leafn_buf_ops;
 235			bp->b_ops->verify_read(bp);
 236			return;
 237		default:
 238			break;
 239	}
 240
 241	/* corrupt block */
 242	xfs_verifier_error(bp);
 243}
 244
 245const struct xfs_buf_ops xfs_da3_node_buf_ops = {
 246	.verify_read = xfs_da3_node_read_verify,
 247	.verify_write = xfs_da3_node_write_verify,
 248};
 249
 250int
 251xfs_da3_node_read(
 252	struct xfs_trans	*tp,
 253	struct xfs_inode	*dp,
 254	xfs_dablk_t		bno,
 255	xfs_daddr_t		mappedbno,
 256	struct xfs_buf		**bpp,
 257	int			which_fork)
 258{
 259	int			err;
 260
 261	err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
 262					which_fork, &xfs_da3_node_buf_ops);
 263	if (!err && tp) {
 264		struct xfs_da_blkinfo	*info = (*bpp)->b_addr;
 265		int			type;
 266
 267		switch (be16_to_cpu(info->magic)) {
 268		case XFS_DA_NODE_MAGIC:
 269		case XFS_DA3_NODE_MAGIC:
 270			type = XFS_BLFT_DA_NODE_BUF;
 271			break;
 272		case XFS_ATTR_LEAF_MAGIC:
 273		case XFS_ATTR3_LEAF_MAGIC:
 274			type = XFS_BLFT_ATTR_LEAF_BUF;
 275			break;
 276		case XFS_DIR2_LEAFN_MAGIC:
 277		case XFS_DIR3_LEAFN_MAGIC:
 278			type = XFS_BLFT_DIR_LEAFN_BUF;
 279			break;
 280		default:
 281			type = 0;
 282			ASSERT(0);
 283			break;
 284		}
 285		xfs_trans_buf_set_type(tp, *bpp, type);
 286	}
 287	return err;
 288}
 289
 290/*========================================================================
 291 * Routines used for growing the Btree.
 292 *========================================================================*/
 293
 294/*
 295 * Create the initial contents of an intermediate node.
 296 */
 297int
 298xfs_da3_node_create(
 299	struct xfs_da_args	*args,
 300	xfs_dablk_t		blkno,
 301	int			level,
 302	struct xfs_buf		**bpp,
 303	int			whichfork)
 304{
 305	struct xfs_da_intnode	*node;
 306	struct xfs_trans	*tp = args->trans;
 307	struct xfs_mount	*mp = tp->t_mountp;
 308	struct xfs_da3_icnode_hdr ichdr = {0};
 309	struct xfs_buf		*bp;
 310	int			error;
 311	struct xfs_inode	*dp = args->dp;
 312
 313	trace_xfs_da_node_create(args);
 314	ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
 315
 316	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
 
 317	if (error)
 318		return(error);
 319	bp->b_ops = &xfs_da3_node_buf_ops;
 320	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
 321	node = bp->b_addr;
 322
 323	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 324		struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
 325
 326		ichdr.magic = XFS_DA3_NODE_MAGIC;
 327		hdr3->info.blkno = cpu_to_be64(bp->b_bn);
 328		hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
 329		uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
 330	} else {
 331		ichdr.magic = XFS_DA_NODE_MAGIC;
 332	}
 333	ichdr.level = level;
 334
 335	dp->d_ops->node_hdr_to_disk(node, &ichdr);
 336	xfs_trans_log_buf(tp, bp,
 337		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 338
 339	*bpp = bp;
 340	return(0);
 341}
 342
 343/*
 344 * Split a leaf node, rebalance, then possibly split
 345 * intermediate nodes, rebalance, etc.
 346 */
 347int							/* error */
 348xfs_da3_split(
 349	struct xfs_da_state	*state)
 350{
 351	struct xfs_da_state_blk	*oldblk;
 352	struct xfs_da_state_blk	*newblk;
 353	struct xfs_da_state_blk	*addblk;
 354	struct xfs_da_intnode	*node;
 355	struct xfs_buf		*bp;
 356	int			max;
 357	int			action = 0;
 358	int			error;
 359	int			i;
 360
 361	trace_xfs_da_split(state->args);
 362
 363	/*
 364	 * Walk back up the tree splitting/inserting/adjusting as necessary.
 365	 * If we need to insert and there isn't room, split the node, then
 366	 * decide which fragment to insert the new block from below into.
 367	 * Note that we may split the root this way, but we need more fixup.
 368	 */
 369	max = state->path.active - 1;
 370	ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
 371	ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
 372	       state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
 373
 374	addblk = &state->path.blk[max];		/* initial dummy value */
 375	for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
 376		oldblk = &state->path.blk[i];
 377		newblk = &state->altpath.blk[i];
 378
 379		/*
 380		 * If a leaf node then
 381		 *     Allocate a new leaf node, then rebalance across them.
 382		 * else if an intermediate node then
 383		 *     We split on the last layer, must we split the node?
 384		 */
 385		switch (oldblk->magic) {
 386		case XFS_ATTR_LEAF_MAGIC:
 387			error = xfs_attr3_leaf_split(state, oldblk, newblk);
 388			if ((error != 0) && (error != ENOSPC)) {
 389				return(error);	/* GROT: attr is inconsistent */
 390			}
 391			if (!error) {
 392				addblk = newblk;
 393				break;
 394			}
 395			/*
 396			 * Entry wouldn't fit, split the leaf again.
 397			 */
 398			state->extravalid = 1;
 399			if (state->inleaf) {
 400				state->extraafter = 0;	/* before newblk */
 401				trace_xfs_attr_leaf_split_before(state->args);
 402				error = xfs_attr3_leaf_split(state, oldblk,
 403							    &state->extrablk);
 404			} else {
 405				state->extraafter = 1;	/* after newblk */
 406				trace_xfs_attr_leaf_split_after(state->args);
 407				error = xfs_attr3_leaf_split(state, newblk,
 408							    &state->extrablk);
 409			}
 410			if (error)
 411				return(error);	/* GROT: attr inconsistent */
 412			addblk = newblk;
 413			break;
 414		case XFS_DIR2_LEAFN_MAGIC:
 415			error = xfs_dir2_leafn_split(state, oldblk, newblk);
 416			if (error)
 417				return error;
 418			addblk = newblk;
 419			break;
 420		case XFS_DA_NODE_MAGIC:
 421			error = xfs_da3_node_split(state, oldblk, newblk, addblk,
 422							 max - i, &action);
 
 423			addblk->bp = NULL;
 424			if (error)
 425				return(error);	/* GROT: dir is inconsistent */
 426			/*
 427			 * Record the newly split block for the next time thru?
 428			 */
 429			if (action)
 430				addblk = newblk;
 431			else
 432				addblk = NULL;
 433			break;
 434		}
 435
 436		/*
 437		 * Update the btree to show the new hashval for this child.
 438		 */
 439		xfs_da3_fixhashpath(state, &state->path);
 
 
 
 
 
 
 
 440	}
 441	if (!addblk)
 442		return(0);
 443
 444	/*
 445	 * Split the root node.
 446	 */
 447	ASSERT(state->path.active == 0);
 448	oldblk = &state->path.blk[0];
 449	error = xfs_da3_root_split(state, oldblk, addblk);
 450	if (error) {
 
 
 451		addblk->bp = NULL;
 452		return(error);	/* GROT: dir is inconsistent */
 453	}
 454
 455	/*
 456	 * Update pointers to the node which used to be block 0 and
 457	 * just got bumped because of the addition of a new root node.
 458	 * There might be three blocks involved if a double split occurred,
 459	 * and the original block 0 could be at any position in the list.
 460	 *
 461	 * Note: the magic numbers and sibling pointers are in the same
 462	 * physical place for both v2 and v3 headers (by design). Hence it
 463	 * doesn't matter which version of the xfs_da_intnode structure we use
 464	 * here as the result will be the same using either structure.
 465	 */
 466	node = oldblk->bp->b_addr;
 
 467	if (node->hdr.info.forw) {
 468		if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
 469			bp = addblk->bp;
 470		} else {
 471			ASSERT(state->extravalid);
 472			bp = state->extrablk.bp;
 473		}
 474		node = bp->b_addr;
 475		node->hdr.info.back = cpu_to_be32(oldblk->blkno);
 476		xfs_trans_log_buf(state->args->trans, bp,
 477		    XFS_DA_LOGRANGE(node, &node->hdr.info,
 478		    sizeof(node->hdr.info)));
 479	}
 480	node = oldblk->bp->b_addr;
 481	if (node->hdr.info.back) {
 482		if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
 483			bp = addblk->bp;
 484		} else {
 485			ASSERT(state->extravalid);
 486			bp = state->extrablk.bp;
 487		}
 488		node = bp->b_addr;
 489		node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
 490		xfs_trans_log_buf(state->args->trans, bp,
 491		    XFS_DA_LOGRANGE(node, &node->hdr.info,
 492		    sizeof(node->hdr.info)));
 493	}
 
 
 494	addblk->bp = NULL;
 495	return(0);
 496}
 497
 498/*
 499 * Split the root.  We have to create a new root and point to the two
 500 * parts (the split old root) that we just created.  Copy block zero to
 501 * the EOF, extending the inode in process.
 502 */
 503STATIC int						/* error */
 504xfs_da3_root_split(
 505	struct xfs_da_state	*state,
 506	struct xfs_da_state_blk	*blk1,
 507	struct xfs_da_state_blk	*blk2)
 508{
 509	struct xfs_da_intnode	*node;
 510	struct xfs_da_intnode	*oldroot;
 511	struct xfs_da_node_entry *btree;
 512	struct xfs_da3_icnode_hdr nodehdr;
 513	struct xfs_da_args	*args;
 514	struct xfs_buf		*bp;
 515	struct xfs_inode	*dp;
 516	struct xfs_trans	*tp;
 517	struct xfs_mount	*mp;
 518	struct xfs_dir2_leaf	*leaf;
 519	xfs_dablk_t		blkno;
 520	int			level;
 521	int			error;
 522	int			size;
 523
 524	trace_xfs_da_root_split(state->args);
 525
 526	/*
 527	 * Copy the existing (incorrect) block from the root node position
 528	 * to a free space somewhere.
 529	 */
 530	args = state->args;
 
 531	error = xfs_da_grow_inode(args, &blkno);
 532	if (error)
 533		return error;
 534
 535	dp = args->dp;
 536	tp = args->trans;
 537	mp = state->mp;
 538	error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
 539	if (error)
 540		return error;
 541	node = bp->b_addr;
 542	oldroot = blk1->bp->b_addr;
 543	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
 544	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
 545		struct xfs_da3_icnode_hdr nodehdr;
 546
 547		dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot);
 548		btree = dp->d_ops->node_tree_p(oldroot);
 549		size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
 550		level = nodehdr.level;
 551
 552		/*
 553		 * we are about to copy oldroot to bp, so set up the type
 554		 * of bp while we know exactly what it will be.
 555		 */
 556		xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
 557	} else {
 558		struct xfs_dir3_icleaf_hdr leafhdr;
 559		struct xfs_dir2_leaf_entry *ents;
 560
 561		leaf = (xfs_dir2_leaf_t *)oldroot;
 562		dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
 563		ents = dp->d_ops->leaf_ents_p(leaf);
 564
 565		ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
 566		       leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
 567		size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
 568		level = 0;
 569
 570		/*
 571		 * we are about to copy oldroot to bp, so set up the type
 572		 * of bp while we know exactly what it will be.
 573		 */
 574		xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
 575	}
 576
 577	/*
 578	 * we can copy most of the information in the node from one block to
 579	 * another, but for CRC enabled headers we have to make sure that the
 580	 * block specific identifiers are kept intact. We update the buffer
 581	 * directly for this.
 582	 */
 583	memcpy(node, oldroot, size);
 584	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
 585	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
 586		struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
 587
 588		node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
 589	}
 590	xfs_trans_log_buf(tp, bp, 0, size - 1);
 591
 592	bp->b_ops = blk1->bp->b_ops;
 593	xfs_trans_buf_copy_type(bp, blk1->bp);
 594	blk1->bp = bp;
 595	blk1->blkno = blkno;
 596
 597	/*
 598	 * Set up the new root node.
 599	 */
 600	error = xfs_da3_node_create(args,
 601		(args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
 602		level + 1, &bp, args->whichfork);
 603	if (error)
 604		return error;
 605
 606	node = bp->b_addr;
 607	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 608	btree = dp->d_ops->node_tree_p(node);
 609	btree[0].hashval = cpu_to_be32(blk1->hashval);
 610	btree[0].before = cpu_to_be32(blk1->blkno);
 611	btree[1].hashval = cpu_to_be32(blk2->hashval);
 612	btree[1].before = cpu_to_be32(blk2->blkno);
 613	nodehdr.count = 2;
 614	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 615
 616#ifdef DEBUG
 617	if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
 618	    oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
 619		ASSERT(blk1->blkno >= mp->m_dirleafblk &&
 620		       blk1->blkno < mp->m_dirfreeblk);
 621		ASSERT(blk2->blkno >= mp->m_dirleafblk &&
 622		       blk2->blkno < mp->m_dirfreeblk);
 623	}
 624#endif
 625
 626	/* Header is already logged by xfs_da_node_create */
 627	xfs_trans_log_buf(tp, bp,
 628		XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
 
 
 629
 630	return 0;
 631}
 632
 633/*
 634 * Split the node, rebalance, then add the new entry.
 635 */
 636STATIC int						/* error */
 637xfs_da3_node_split(
 638	struct xfs_da_state	*state,
 639	struct xfs_da_state_blk	*oldblk,
 640	struct xfs_da_state_blk	*newblk,
 641	struct xfs_da_state_blk	*addblk,
 642	int			treelevel,
 643	int			*result)
 644{
 645	struct xfs_da_intnode	*node;
 646	struct xfs_da3_icnode_hdr nodehdr;
 647	xfs_dablk_t		blkno;
 648	int			newcount;
 649	int			error;
 650	int			useextra;
 651	struct xfs_inode	*dp = state->args->dp;
 652
 653	trace_xfs_da_node_split(state->args);
 654
 655	node = oldblk->bp->b_addr;
 656	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 657
 658	/*
 659	 * With V2 dirs the extra block is data or freespace.
 660	 */
 661	useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
 662	newcount = 1 + useextra;
 663	/*
 664	 * Do we have to split the node?
 665	 */
 666	if (nodehdr.count + newcount > state->node_ents) {
 667		/*
 668		 * Allocate a new node, add to the doubly linked chain of
 669		 * nodes, then move some of our excess entries into it.
 670		 */
 671		error = xfs_da_grow_inode(state->args, &blkno);
 672		if (error)
 673			return(error);	/* GROT: dir is inconsistent */
 674
 675		error = xfs_da3_node_create(state->args, blkno, treelevel,
 676					   &newblk->bp, state->args->whichfork);
 677		if (error)
 678			return(error);	/* GROT: dir is inconsistent */
 679		newblk->blkno = blkno;
 680		newblk->magic = XFS_DA_NODE_MAGIC;
 681		xfs_da3_node_rebalance(state, oldblk, newblk);
 682		error = xfs_da3_blk_link(state, oldblk, newblk);
 683		if (error)
 684			return(error);
 685		*result = 1;
 686	} else {
 687		*result = 0;
 688	}
 689
 690	/*
 691	 * Insert the new entry(s) into the correct block
 692	 * (updating last hashval in the process).
 693	 *
 694	 * xfs_da3_node_add() inserts BEFORE the given index,
 695	 * and as a result of using node_lookup_int() we always
 696	 * point to a valid entry (not after one), but a split
 697	 * operation always results in a new block whose hashvals
 698	 * FOLLOW the current block.
 699	 *
 700	 * If we had double-split op below us, then add the extra block too.
 701	 */
 702	node = oldblk->bp->b_addr;
 703	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 704	if (oldblk->index <= nodehdr.count) {
 705		oldblk->index++;
 706		xfs_da3_node_add(state, oldblk, addblk);
 707		if (useextra) {
 708			if (state->extraafter)
 709				oldblk->index++;
 710			xfs_da3_node_add(state, oldblk, &state->extrablk);
 711			state->extravalid = 0;
 712		}
 713	} else {
 714		newblk->index++;
 715		xfs_da3_node_add(state, newblk, addblk);
 716		if (useextra) {
 717			if (state->extraafter)
 718				newblk->index++;
 719			xfs_da3_node_add(state, newblk, &state->extrablk);
 720			state->extravalid = 0;
 721		}
 722	}
 723
 724	return(0);
 725}
 726
 727/*
 728 * Balance the btree elements between two intermediate nodes,
 729 * usually one full and one empty.
 730 *
 731 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
 732 */
 733STATIC void
 734xfs_da3_node_rebalance(
 735	struct xfs_da_state	*state,
 736	struct xfs_da_state_blk	*blk1,
 737	struct xfs_da_state_blk	*blk2)
 738{
 739	struct xfs_da_intnode	*node1;
 740	struct xfs_da_intnode	*node2;
 741	struct xfs_da_intnode	*tmpnode;
 742	struct xfs_da_node_entry *btree1;
 743	struct xfs_da_node_entry *btree2;
 744	struct xfs_da_node_entry *btree_s;
 745	struct xfs_da_node_entry *btree_d;
 746	struct xfs_da3_icnode_hdr nodehdr1;
 747	struct xfs_da3_icnode_hdr nodehdr2;
 748	struct xfs_trans	*tp;
 749	int			count;
 750	int			tmp;
 751	int			swap = 0;
 752	struct xfs_inode	*dp = state->args->dp;
 753
 754	trace_xfs_da_node_rebalance(state->args);
 755
 756	node1 = blk1->bp->b_addr;
 757	node2 = blk2->bp->b_addr;
 758	dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 759	dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 760	btree1 = dp->d_ops->node_tree_p(node1);
 761	btree2 = dp->d_ops->node_tree_p(node2);
 762
 763	/*
 764	 * Figure out how many entries need to move, and in which direction.
 765	 * Swap the nodes around if that makes it simpler.
 766	 */
 767	if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
 768	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
 769	     (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
 770			be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
 771		tmpnode = node1;
 772		node1 = node2;
 773		node2 = tmpnode;
 774		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 775		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 776		btree1 = dp->d_ops->node_tree_p(node1);
 777		btree2 = dp->d_ops->node_tree_p(node2);
 778		swap = 1;
 779	}
 780
 781	count = (nodehdr1.count - nodehdr2.count) / 2;
 
 782	if (count == 0)
 783		return;
 784	tp = state->args->trans;
 785	/*
 786	 * Two cases: high-to-low and low-to-high.
 787	 */
 788	if (count > 0) {
 789		/*
 790		 * Move elements in node2 up to make a hole.
 791		 */
 792		tmp = nodehdr2.count;
 793		if (tmp > 0) {
 794			tmp *= (uint)sizeof(xfs_da_node_entry_t);
 795			btree_s = &btree2[0];
 796			btree_d = &btree2[count];
 797			memmove(btree_d, btree_s, tmp);
 798		}
 799
 800		/*
 801		 * Move the req'd B-tree elements from high in node1 to
 802		 * low in node2.
 803		 */
 804		nodehdr2.count += count;
 805		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 806		btree_s = &btree1[nodehdr1.count - count];
 807		btree_d = &btree2[0];
 808		memcpy(btree_d, btree_s, tmp);
 809		nodehdr1.count -= count;
 810	} else {
 811		/*
 812		 * Move the req'd B-tree elements from low in node2 to
 813		 * high in node1.
 814		 */
 815		count = -count;
 816		tmp = count * (uint)sizeof(xfs_da_node_entry_t);
 817		btree_s = &btree2[0];
 818		btree_d = &btree1[nodehdr1.count];
 819		memcpy(btree_d, btree_s, tmp);
 820		nodehdr1.count += count;
 821
 822		xfs_trans_log_buf(tp, blk1->bp,
 823			XFS_DA_LOGRANGE(node1, btree_d, tmp));
 824
 825		/*
 826		 * Move elements in node2 down to fill the hole.
 827		 */
 828		tmp  = nodehdr2.count - count;
 829		tmp *= (uint)sizeof(xfs_da_node_entry_t);
 830		btree_s = &btree2[count];
 831		btree_d = &btree2[0];
 832		memmove(btree_d, btree_s, tmp);
 833		nodehdr2.count -= count;
 834	}
 835
 836	/*
 837	 * Log header of node 1 and all current bits of node 2.
 838	 */
 839	dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
 840	xfs_trans_log_buf(tp, blk1->bp,
 841		XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
 842
 843	dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
 844	xfs_trans_log_buf(tp, blk2->bp,
 845		XFS_DA_LOGRANGE(node2, &node2->hdr,
 846				dp->d_ops->node_hdr_size +
 847				(sizeof(btree2[0]) * nodehdr2.count)));
 848
 849	/*
 850	 * Record the last hashval from each block for upward propagation.
 851	 * (note: don't use the swapped node pointers)
 852	 */
 853	if (swap) {
 854		node1 = blk1->bp->b_addr;
 855		node2 = blk2->bp->b_addr;
 856		dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
 857		dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
 858		btree1 = dp->d_ops->node_tree_p(node1);
 859		btree2 = dp->d_ops->node_tree_p(node2);
 860	}
 861	blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
 862	blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
 863
 864	/*
 865	 * Adjust the expected index for insertion.
 866	 */
 867	if (blk1->index >= nodehdr1.count) {
 868		blk2->index = blk1->index - nodehdr1.count;
 869		blk1->index = nodehdr1.count + 1;	/* make it invalid */
 870	}
 871}
 872
 873/*
 874 * Add a new entry to an intermediate node.
 875 */
 876STATIC void
 877xfs_da3_node_add(
 878	struct xfs_da_state	*state,
 879	struct xfs_da_state_blk	*oldblk,
 880	struct xfs_da_state_blk	*newblk)
 881{
 882	struct xfs_da_intnode	*node;
 883	struct xfs_da3_icnode_hdr nodehdr;
 884	struct xfs_da_node_entry *btree;
 885	int			tmp;
 886	struct xfs_inode	*dp = state->args->dp;
 887
 888	trace_xfs_da_node_add(state->args);
 889
 890	node = oldblk->bp->b_addr;
 891	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
 892	btree = dp->d_ops->node_tree_p(node);
 893
 894	ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
 895	ASSERT(newblk->blkno != 0);
 896	if (state->args->whichfork == XFS_DATA_FORK)
 897		ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
 898		       newblk->blkno < state->mp->m_dirfreeblk);
 899
 900	/*
 901	 * We may need to make some room before we insert the new node.
 902	 */
 903	tmp = 0;
 904	if (oldblk->index < nodehdr.count) {
 905		tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
 906		memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
 907	}
 908	btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
 909	btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
 910	xfs_trans_log_buf(state->args->trans, oldblk->bp,
 911		XFS_DA_LOGRANGE(node, &btree[oldblk->index],
 912				tmp + sizeof(*btree)));
 913
 914	nodehdr.count += 1;
 915	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
 916	xfs_trans_log_buf(state->args->trans, oldblk->bp,
 917		XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
 918
 919	/*
 920	 * Copy the last hash value from the oldblk to propagate upwards.
 921	 */
 922	oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
 923}
 924
 925/*========================================================================
 926 * Routines used for shrinking the Btree.
 927 *========================================================================*/
 928
 929/*
 930 * Deallocate an empty leaf node, remove it from its parent,
 931 * possibly deallocating that block, etc...
 932 */
 933int
 934xfs_da3_join(
 935	struct xfs_da_state	*state)
 936{
 937	struct xfs_da_state_blk	*drop_blk;
 938	struct xfs_da_state_blk	*save_blk;
 939	int			action = 0;
 940	int			error;
 941
 942	trace_xfs_da_join(state->args);
 943
 
 944	drop_blk = &state->path.blk[ state->path.active-1 ];
 945	save_blk = &state->altpath.blk[ state->path.active-1 ];
 946	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
 947	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
 948	       drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
 949
 950	/*
 951	 * Walk back up the tree joining/deallocating as necessary.
 952	 * When we stop dropping blocks, break out.
 953	 */
 954	for (  ; state->path.active >= 2; drop_blk--, save_blk--,
 955		 state->path.active--) {
 956		/*
 957		 * See if we can combine the block with a neighbor.
 958		 *   (action == 0) => no options, just leave
 959		 *   (action == 1) => coalesce, then unlink
 960		 *   (action == 2) => block empty, unlink it
 961		 */
 962		switch (drop_blk->magic) {
 963		case XFS_ATTR_LEAF_MAGIC:
 964			error = xfs_attr3_leaf_toosmall(state, &action);
 965			if (error)
 966				return(error);
 967			if (action == 0)
 968				return(0);
 969			xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
 970			break;
 971		case XFS_DIR2_LEAFN_MAGIC:
 972			error = xfs_dir2_leafn_toosmall(state, &action);
 973			if (error)
 974				return error;
 975			if (action == 0)
 976				return 0;
 977			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
 978			break;
 979		case XFS_DA_NODE_MAGIC:
 980			/*
 981			 * Remove the offending node, fixup hashvals,
 982			 * check for a toosmall neighbor.
 983			 */
 984			xfs_da3_node_remove(state, drop_blk);
 985			xfs_da3_fixhashpath(state, &state->path);
 986			error = xfs_da3_node_toosmall(state, &action);
 987			if (error)
 988				return(error);
 989			if (action == 0)
 990				return 0;
 991			xfs_da3_node_unbalance(state, drop_blk, save_blk);
 992			break;
 993		}
 994		xfs_da3_fixhashpath(state, &state->altpath);
 995		error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
 996		xfs_da_state_kill_altpath(state);
 997		if (error)
 998			return(error);
 999		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1000							 drop_blk->bp);
1001		drop_blk->bp = NULL;
1002		if (error)
1003			return(error);
1004	}
1005	/*
1006	 * We joined all the way to the top.  If it turns out that
1007	 * we only have one entry in the root, make the child block
1008	 * the new root.
1009	 */
1010	xfs_da3_node_remove(state, drop_blk);
1011	xfs_da3_fixhashpath(state, &state->path);
1012	error = xfs_da3_root_join(state, &state->path.blk[0]);
1013	return(error);
1014}
1015
1016#ifdef	DEBUG
1017static void
1018xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1019{
1020	__be16	magic = blkinfo->magic;
1021
1022	if (level == 1) {
1023		ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1024		       magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1025		       magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1026		       magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1027	} else {
1028		ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1029		       magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1030	}
1031	ASSERT(!blkinfo->forw);
1032	ASSERT(!blkinfo->back);
1033}
1034#else	/* !DEBUG */
1035#define	xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1036#endif	/* !DEBUG */
1037
1038/*
1039 * We have only one entry in the root.  Copy the only remaining child of
1040 * the old root to block 0 as the new root node.
1041 */
1042STATIC int
1043xfs_da3_root_join(
1044	struct xfs_da_state	*state,
1045	struct xfs_da_state_blk	*root_blk)
1046{
1047	struct xfs_da_intnode	*oldroot;
1048	struct xfs_da_args	*args;
1049	xfs_dablk_t		child;
1050	struct xfs_buf		*bp;
1051	struct xfs_da3_icnode_hdr oldroothdr;
1052	struct xfs_da_node_entry *btree;
1053	int			error;
1054	struct xfs_inode	*dp = state->args->dp;
1055
1056	trace_xfs_da_root_join(state->args);
1057
 
 
1058	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1059
1060	args = state->args;
1061	oldroot = root_blk->bp->b_addr;
1062	dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1063	ASSERT(oldroothdr.forw == 0);
1064	ASSERT(oldroothdr.back == 0);
1065
1066	/*
1067	 * If the root has more than one child, then don't do anything.
1068	 */
1069	if (oldroothdr.count > 1)
1070		return 0;
1071
1072	/*
1073	 * Read in the (only) child block, then copy those bytes into
1074	 * the root block's buffer and free the original child block.
1075	 */
1076	btree = dp->d_ops->node_tree_p(oldroot);
1077	child = be32_to_cpu(btree[0].before);
1078	ASSERT(child != 0);
1079	error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1080					     args->whichfork);
1081	if (error)
1082		return error;
1083	xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
 
 
1084
1085	/*
1086	 * This could be copying a leaf back into the root block in the case of
1087	 * there only being a single leaf block left in the tree. Hence we have
1088	 * to update the b_ops pointer as well to match the buffer type change
1089	 * that could occur. For dir3 blocks we also need to update the block
1090	 * number in the buffer header.
1091	 */
1092	memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
1093	root_blk->bp->b_ops = bp->b_ops;
1094	xfs_trans_buf_copy_type(root_blk->bp, bp);
1095	if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1096		struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1097		da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1098	}
1099	xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
1100	error = xfs_da_shrink_inode(args, child, bp);
1101	return(error);
1102}
1103
1104/*
1105 * Check a node block and its neighbors to see if the block should be
1106 * collapsed into one or the other neighbor.  Always keep the block
1107 * with the smaller block number.
1108 * If the current block is over 50% full, don't try to join it, return 0.
1109 * If the block is empty, fill in the state structure and return 2.
1110 * If it can be collapsed, fill in the state structure and return 1.
1111 * If nothing can be done, return 0.
1112 */
1113STATIC int
1114xfs_da3_node_toosmall(
1115	struct xfs_da_state	*state,
1116	int			*action)
1117{
1118	struct xfs_da_intnode	*node;
1119	struct xfs_da_state_blk	*blk;
1120	struct xfs_da_blkinfo	*info;
1121	xfs_dablk_t		blkno;
1122	struct xfs_buf		*bp;
1123	struct xfs_da3_icnode_hdr nodehdr;
1124	int			count;
1125	int			forward;
1126	int			error;
1127	int			retval;
1128	int			i;
1129	struct xfs_inode	*dp = state->args->dp;
1130
1131	trace_xfs_da_node_toosmall(state->args);
1132
1133	/*
1134	 * Check for the degenerate case of the block being over 50% full.
1135	 * If so, it's not worth even looking to see if we might be able
1136	 * to coalesce with a sibling.
1137	 */
1138	blk = &state->path.blk[ state->path.active-1 ];
1139	info = blk->bp->b_addr;
 
1140	node = (xfs_da_intnode_t *)info;
1141	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1142	if (nodehdr.count > (state->node_ents >> 1)) {
1143		*action = 0;	/* blk over 50%, don't try to join */
1144		return(0);	/* blk over 50%, don't try to join */
1145	}
1146
1147	/*
1148	 * Check for the degenerate case of the block being empty.
1149	 * If the block is empty, we'll simply delete it, no need to
1150	 * coalesce it with a sibling block.  We choose (arbitrarily)
1151	 * to merge with the forward block unless it is NULL.
1152	 */
1153	if (nodehdr.count == 0) {
1154		/*
1155		 * Make altpath point to the block we want to keep and
1156		 * path point to the block we want to drop (this one).
1157		 */
1158		forward = (info->forw != 0);
1159		memcpy(&state->altpath, &state->path, sizeof(state->path));
1160		error = xfs_da3_path_shift(state, &state->altpath, forward,
1161						 0, &retval);
1162		if (error)
1163			return(error);
1164		if (retval) {
1165			*action = 0;
1166		} else {
1167			*action = 2;
1168		}
1169		return(0);
1170	}
1171
1172	/*
1173	 * Examine each sibling block to see if we can coalesce with
1174	 * at least 25% free space to spare.  We need to figure out
1175	 * whether to merge with the forward or the backward block.
1176	 * We prefer coalescing with the lower numbered sibling so as
1177	 * to shrink a directory over time.
1178	 */
1179	count  = state->node_ents;
1180	count -= state->node_ents >> 2;
1181	count -= nodehdr.count;
1182
1183	/* start with smaller blk num */
1184	forward = nodehdr.forw < nodehdr.back;
1185	for (i = 0; i < 2; forward = !forward, i++) {
1186		struct xfs_da3_icnode_hdr thdr;
1187		if (forward)
1188			blkno = nodehdr.forw;
1189		else
1190			blkno = nodehdr.back;
1191		if (blkno == 0)
1192			continue;
1193		error = xfs_da3_node_read(state->args->trans, dp,
1194					blkno, -1, &bp, state->args->whichfork);
1195		if (error)
1196			return(error);
 
1197
1198		node = bp->b_addr;
1199		dp->d_ops->node_hdr_from_disk(&thdr, node);
1200		xfs_trans_brelse(state->args->trans, bp);
1201
1202		if (count - thdr.count >= 0)
 
 
 
 
1203			break;	/* fits with at least 25% to spare */
1204	}
1205	if (i >= 2) {
1206		*action = 0;
1207		return 0;
1208	}
1209
1210	/*
1211	 * Make altpath point to the block we want to keep (the lower
1212	 * numbered block) and path point to the block we want to drop.
1213	 */
1214	memcpy(&state->altpath, &state->path, sizeof(state->path));
1215	if (blkno < blk->blkno) {
1216		error = xfs_da3_path_shift(state, &state->altpath, forward,
1217						 0, &retval);
 
 
 
 
 
 
 
1218	} else {
1219		error = xfs_da3_path_shift(state, &state->path, forward,
1220						 0, &retval);
1221	}
1222	if (error)
1223		return error;
1224	if (retval) {
1225		*action = 0;
1226		return 0;
 
1227	}
1228	*action = 1;
1229	return 0;
1230}
1231
1232/*
1233 * Pick up the last hashvalue from an intermediate node.
1234 */
1235STATIC uint
1236xfs_da3_node_lasthash(
1237	struct xfs_inode	*dp,
1238	struct xfs_buf		*bp,
1239	int			*count)
1240{
1241	struct xfs_da_intnode	 *node;
1242	struct xfs_da_node_entry *btree;
1243	struct xfs_da3_icnode_hdr nodehdr;
1244
1245	node = bp->b_addr;
1246	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1247	if (count)
1248		*count = nodehdr.count;
1249	if (!nodehdr.count)
1250		return 0;
1251	btree = dp->d_ops->node_tree_p(node);
1252	return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1253}
1254
1255/*
1256 * Walk back up the tree adjusting hash values as necessary,
1257 * when we stop making changes, return.
1258 */
1259void
1260xfs_da3_fixhashpath(
1261	struct xfs_da_state	*state,
1262	struct xfs_da_state_path *path)
1263{
1264	struct xfs_da_state_blk	*blk;
1265	struct xfs_da_intnode	*node;
1266	struct xfs_da_node_entry *btree;
1267	xfs_dahash_t		lasthash=0;
1268	int			level;
1269	int			count;
1270	struct xfs_inode	*dp = state->args->dp;
1271
1272	trace_xfs_da_fixhashpath(state->args);
1273
1274	level = path->active-1;
1275	blk = &path->blk[ level ];
1276	switch (blk->magic) {
1277	case XFS_ATTR_LEAF_MAGIC:
1278		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1279		if (count == 0)
1280			return;
1281		break;
1282	case XFS_DIR2_LEAFN_MAGIC:
1283		lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
1284		if (count == 0)
1285			return;
1286		break;
1287	case XFS_DA_NODE_MAGIC:
1288		lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1289		if (count == 0)
1290			return;
1291		break;
1292	}
1293	for (blk--, level--; level >= 0; blk--, level--) {
1294		struct xfs_da3_icnode_hdr nodehdr;
1295
1296		node = blk->bp->b_addr;
1297		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1298		btree = dp->d_ops->node_tree_p(node);
1299		if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1300			break;
1301		blk->hashval = lasthash;
1302		btree[blk->index].hashval = cpu_to_be32(lasthash);
1303		xfs_trans_log_buf(state->args->trans, blk->bp,
1304				  XFS_DA_LOGRANGE(node, &btree[blk->index],
1305						  sizeof(*btree)));
1306
1307		lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1308	}
1309}
1310
1311/*
1312 * Remove an entry from an intermediate node.
1313 */
1314STATIC void
1315xfs_da3_node_remove(
1316	struct xfs_da_state	*state,
1317	struct xfs_da_state_blk	*drop_blk)
1318{
1319	struct xfs_da_intnode	*node;
1320	struct xfs_da3_icnode_hdr nodehdr;
1321	struct xfs_da_node_entry *btree;
1322	int			index;
1323	int			tmp;
1324	struct xfs_inode	*dp = state->args->dp;
1325
1326	trace_xfs_da_node_remove(state->args);
1327
1328	node = drop_blk->bp->b_addr;
1329	dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1330	ASSERT(drop_blk->index < nodehdr.count);
1331	ASSERT(drop_blk->index >= 0);
1332
1333	/*
1334	 * Copy over the offending entry, or just zero it out.
1335	 */
1336	index = drop_blk->index;
1337	btree = dp->d_ops->node_tree_p(node);
1338	if (index < nodehdr.count - 1) {
1339		tmp  = nodehdr.count - index - 1;
1340		tmp *= (uint)sizeof(xfs_da_node_entry_t);
1341		memmove(&btree[index], &btree[index + 1], tmp);
1342		xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1343		    XFS_DA_LOGRANGE(node, &btree[index], tmp));
1344		index = nodehdr.count - 1;
1345	}
1346	memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1347	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1348	    XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1349	nodehdr.count -= 1;
1350	dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1351	xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1352	    XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1353
1354	/*
1355	 * Copy the last hash value from the block to propagate upwards.
1356	 */
1357	drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
 
1358}
1359
1360/*
1361 * Unbalance the elements between two intermediate nodes,
1362 * move all Btree elements from one node into another.
1363 */
1364STATIC void
1365xfs_da3_node_unbalance(
1366	struct xfs_da_state	*state,
1367	struct xfs_da_state_blk	*drop_blk,
1368	struct xfs_da_state_blk	*save_blk)
1369{
1370	struct xfs_da_intnode	*drop_node;
1371	struct xfs_da_intnode	*save_node;
1372	struct xfs_da_node_entry *drop_btree;
1373	struct xfs_da_node_entry *save_btree;
1374	struct xfs_da3_icnode_hdr drop_hdr;
1375	struct xfs_da3_icnode_hdr save_hdr;
1376	struct xfs_trans	*tp;
1377	int			sindex;
1378	int			tmp;
1379	struct xfs_inode	*dp = state->args->dp;
1380
1381	trace_xfs_da_node_unbalance(state->args);
1382
1383	drop_node = drop_blk->bp->b_addr;
1384	save_node = save_blk->bp->b_addr;
1385	dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1386	dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1387	drop_btree = dp->d_ops->node_tree_p(drop_node);
1388	save_btree = dp->d_ops->node_tree_p(save_node);
1389	tp = state->args->trans;
1390
1391	/*
1392	 * If the dying block has lower hashvals, then move all the
1393	 * elements in the remaining block up to make a hole.
1394	 */
1395	if ((be32_to_cpu(drop_btree[0].hashval) <
1396			be32_to_cpu(save_btree[0].hashval)) ||
1397	    (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1398			be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1399		/* XXX: check this - is memmove dst correct? */
1400		tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1401		memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1402
1403		sindex = 0;
1404		xfs_trans_log_buf(tp, save_blk->bp,
1405			XFS_DA_LOGRANGE(save_node, &save_btree[0],
1406				(save_hdr.count + drop_hdr.count) *
1407						sizeof(xfs_da_node_entry_t)));
1408	} else {
1409		sindex = save_hdr.count;
1410		xfs_trans_log_buf(tp, save_blk->bp,
1411			XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1412				drop_hdr.count * sizeof(xfs_da_node_entry_t)));
 
1413	}
1414
1415	/*
1416	 * Move all the B-tree elements from drop_blk to save_blk.
1417	 */
1418	tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1419	memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1420	save_hdr.count += drop_hdr.count;
1421
1422	dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1423	xfs_trans_log_buf(tp, save_blk->bp,
1424		XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1425				dp->d_ops->node_hdr_size));
1426
1427	/*
1428	 * Save the last hashval in the remaining block for upward propagation.
1429	 */
1430	save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1431}
1432
1433/*========================================================================
1434 * Routines used for finding things in the Btree.
1435 *========================================================================*/
1436
1437/*
1438 * Walk down the Btree looking for a particular filename, filling
1439 * in the state structure as we go.
1440 *
1441 * We will set the state structure to point to each of the elements
1442 * in each of the nodes where either the hashval is or should be.
1443 *
1444 * We support duplicate hashval's so for each entry in the current
1445 * node that could contain the desired hashval, descend.  This is a
1446 * pruned depth-first tree search.
1447 */
1448int							/* error */
1449xfs_da3_node_lookup_int(
1450	struct xfs_da_state	*state,
1451	int			*result)
1452{
1453	struct xfs_da_state_blk	*blk;
1454	struct xfs_da_blkinfo	*curr;
1455	struct xfs_da_intnode	*node;
1456	struct xfs_da_node_entry *btree;
1457	struct xfs_da3_icnode_hdr nodehdr;
1458	struct xfs_da_args	*args;
1459	xfs_dablk_t		blkno;
1460	xfs_dahash_t		hashval;
1461	xfs_dahash_t		btreehashval;
1462	int			probe;
1463	int			span;
1464	int			max;
1465	int			error;
1466	int			retval;
1467	struct xfs_inode	*dp = state->args->dp;
1468
1469	args = state->args;
1470
1471	/*
1472	 * Descend thru the B-tree searching each level for the right
1473	 * node to use, until the right hashval is found.
1474	 */
1475	blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1476	for (blk = &state->path.blk[0], state->path.active = 1;
1477			 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1478			 blk++, state->path.active++) {
1479		/*
1480		 * Read the next node down in the tree.
1481		 */
1482		blk->blkno = blkno;
1483		error = xfs_da3_node_read(args->trans, args->dp, blkno,
1484					-1, &blk->bp, args->whichfork);
1485		if (error) {
1486			blk->blkno = 0;
1487			state->path.active--;
1488			return(error);
1489		}
1490		curr = blk->bp->b_addr;
1491		blk->magic = be16_to_cpu(curr->magic);
1492
1493		if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1494		    blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1495			blk->magic = XFS_ATTR_LEAF_MAGIC;
1496			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1497			break;
1498		}
1499
1500		if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1501		    blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1502			blk->magic = XFS_DIR2_LEAFN_MAGIC;
1503			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1504							       blk->bp, NULL);
1505			break;
1506		}
1507
1508		blk->magic = XFS_DA_NODE_MAGIC;
1509
1510
1511		/*
1512		 * Search an intermediate node for a match.
1513		 */
1514		node = blk->bp->b_addr;
1515		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1516		btree = dp->d_ops->node_tree_p(node);
 
1517
1518		max = nodehdr.count;
1519		blk->hashval = be32_to_cpu(btree[max - 1].hashval);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1520
1521		/*
1522		 * Binary search.  (note: small blocks will skip loop)
1523		 */
1524		probe = span = max / 2;
1525		hashval = args->hashval;
1526		while (span > 4) {
1527			span /= 2;
1528			btreehashval = be32_to_cpu(btree[probe].hashval);
1529			if (btreehashval < hashval)
1530				probe += span;
1531			else if (btreehashval > hashval)
1532				probe -= span;
1533			else
1534				break;
1535		}
1536		ASSERT((probe >= 0) && (probe < max));
1537		ASSERT((span <= 4) ||
1538			(be32_to_cpu(btree[probe].hashval) == hashval));
1539
1540		/*
1541		 * Since we may have duplicate hashval's, find the first
1542		 * matching hashval in the node.
1543		 */
1544		while (probe > 0 &&
1545		       be32_to_cpu(btree[probe].hashval) >= hashval) {
1546			probe--;
1547		}
1548		while (probe < max &&
1549		       be32_to_cpu(btree[probe].hashval) < hashval) {
1550			probe++;
1551		}
1552
1553		/*
1554		 * Pick the right block to descend on.
1555		 */
1556		if (probe == max) {
1557			blk->index = max - 1;
1558			blkno = be32_to_cpu(btree[max - 1].before);
1559		} else {
1560			blk->index = probe;
1561			blkno = be32_to_cpu(btree[probe].before);
1562		}
1563	}
1564
1565	/*
1566	 * A leaf block that ends in the hashval that we are interested in
1567	 * (final hashval == search hashval) means that the next block may
1568	 * contain more entries with the same hashval, shift upward to the
1569	 * next leaf and keep searching.
1570	 */
1571	for (;;) {
1572		if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1573			retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1574							&blk->index, state);
1575		} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1576			retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1577			blk->index = args->index;
1578			args->blkno = blk->blkno;
1579		} else {
1580			ASSERT(0);
1581			return XFS_ERROR(EFSCORRUPTED);
1582		}
1583		if (((retval == ENOENT) || (retval == ENOATTR)) &&
1584		    (blk->hashval == args->hashval)) {
1585			error = xfs_da3_path_shift(state, &state->path, 1, 1,
1586							 &retval);
1587			if (error)
1588				return(error);
1589			if (retval == 0) {
1590				continue;
1591			} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1592				/* path_shift() gives ENOENT */
1593				retval = XFS_ERROR(ENOATTR);
1594			}
1595		}
1596		break;
1597	}
1598	*result = retval;
1599	return(0);
1600}
1601
1602/*========================================================================
1603 * Utility routines.
1604 *========================================================================*/
1605
1606/*
1607 * Compare two intermediate nodes for "order".
1608 */
1609STATIC int
1610xfs_da3_node_order(
1611	struct xfs_inode *dp,
1612	struct xfs_buf	*node1_bp,
1613	struct xfs_buf	*node2_bp)
1614{
1615	struct xfs_da_intnode	*node1;
1616	struct xfs_da_intnode	*node2;
1617	struct xfs_da_node_entry *btree1;
1618	struct xfs_da_node_entry *btree2;
1619	struct xfs_da3_icnode_hdr node1hdr;
1620	struct xfs_da3_icnode_hdr node2hdr;
1621
1622	node1 = node1_bp->b_addr;
1623	node2 = node2_bp->b_addr;
1624	dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1625	dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1626	btree1 = dp->d_ops->node_tree_p(node1);
1627	btree2 = dp->d_ops->node_tree_p(node2);
1628
1629	if (node1hdr.count > 0 && node2hdr.count > 0 &&
1630	    ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1631	     (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1632	      be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1633		return 1;
1634	}
1635	return 0;
1636}
1637
1638/*
1639 * Link a new block into a doubly linked list of blocks (of whatever type).
1640 */
1641int							/* error */
1642xfs_da3_blk_link(
1643	struct xfs_da_state	*state,
1644	struct xfs_da_state_blk	*old_blk,
1645	struct xfs_da_state_blk	*new_blk)
1646{
1647	struct xfs_da_blkinfo	*old_info;
1648	struct xfs_da_blkinfo	*new_info;
1649	struct xfs_da_blkinfo	*tmp_info;
1650	struct xfs_da_args	*args;
1651	struct xfs_buf		*bp;
1652	int			before = 0;
1653	int			error;
1654	struct xfs_inode	*dp = state->args->dp;
1655
1656	/*
1657	 * Set up environment.
1658	 */
1659	args = state->args;
1660	ASSERT(args != NULL);
1661	old_info = old_blk->bp->b_addr;
1662	new_info = new_blk->bp->b_addr;
1663	ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1664	       old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1665	       old_blk->magic == XFS_ATTR_LEAF_MAGIC);
 
 
 
1666
1667	switch (old_blk->magic) {
1668	case XFS_ATTR_LEAF_MAGIC:
1669		before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1670		break;
1671	case XFS_DIR2_LEAFN_MAGIC:
1672		before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1673		break;
1674	case XFS_DA_NODE_MAGIC:
1675		before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1676		break;
1677	}
1678
1679	/*
1680	 * Link blocks in appropriate order.
1681	 */
1682	if (before) {
1683		/*
1684		 * Link new block in before existing block.
1685		 */
1686		trace_xfs_da_link_before(args);
1687		new_info->forw = cpu_to_be32(old_blk->blkno);
1688		new_info->back = old_info->back;
1689		if (old_info->back) {
1690			error = xfs_da3_node_read(args->trans, dp,
1691						be32_to_cpu(old_info->back),
1692						-1, &bp, args->whichfork);
1693			if (error)
1694				return(error);
1695			ASSERT(bp != NULL);
1696			tmp_info = bp->b_addr;
1697			ASSERT(tmp_info->magic == old_info->magic);
1698			ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1699			tmp_info->forw = cpu_to_be32(new_blk->blkno);
1700			xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
 
1701		}
1702		old_info->back = cpu_to_be32(new_blk->blkno);
1703	} else {
1704		/*
1705		 * Link new block in after existing block.
1706		 */
1707		trace_xfs_da_link_after(args);
1708		new_info->forw = old_info->forw;
1709		new_info->back = cpu_to_be32(old_blk->blkno);
1710		if (old_info->forw) {
1711			error = xfs_da3_node_read(args->trans, dp,
1712						be32_to_cpu(old_info->forw),
1713						-1, &bp, args->whichfork);
1714			if (error)
1715				return(error);
1716			ASSERT(bp != NULL);
1717			tmp_info = bp->b_addr;
1718			ASSERT(tmp_info->magic == old_info->magic);
1719			ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1720			tmp_info->back = cpu_to_be32(new_blk->blkno);
1721			xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
 
1722		}
1723		old_info->forw = cpu_to_be32(new_blk->blkno);
1724	}
1725
1726	xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1727	xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728	return(0);
1729}
1730
1731/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732 * Unlink a block from a doubly linked list of blocks.
1733 */
1734STATIC int						/* error */
1735xfs_da3_blk_unlink(
1736	struct xfs_da_state	*state,
1737	struct xfs_da_state_blk	*drop_blk,
1738	struct xfs_da_state_blk	*save_blk)
1739{
1740	struct xfs_da_blkinfo	*drop_info;
1741	struct xfs_da_blkinfo	*save_info;
1742	struct xfs_da_blkinfo	*tmp_info;
1743	struct xfs_da_args	*args;
1744	struct xfs_buf		*bp;
1745	int			error;
1746
1747	/*
1748	 * Set up environment.
1749	 */
1750	args = state->args;
1751	ASSERT(args != NULL);
1752	save_info = save_blk->bp->b_addr;
1753	drop_info = drop_blk->bp->b_addr;
1754	ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1755	       save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1756	       save_blk->magic == XFS_ATTR_LEAF_MAGIC);
 
 
1757	ASSERT(save_blk->magic == drop_blk->magic);
1758	ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1759	       (be32_to_cpu(save_info->back) == drop_blk->blkno));
1760	ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1761	       (be32_to_cpu(drop_info->back) == save_blk->blkno));
1762
1763	/*
1764	 * Unlink the leaf block from the doubly linked chain of leaves.
1765	 */
1766	if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1767		trace_xfs_da_unlink_back(args);
1768		save_info->back = drop_info->back;
1769		if (drop_info->back) {
1770			error = xfs_da3_node_read(args->trans, args->dp,
1771						be32_to_cpu(drop_info->back),
1772						-1, &bp, args->whichfork);
1773			if (error)
1774				return(error);
1775			ASSERT(bp != NULL);
1776			tmp_info = bp->b_addr;
1777			ASSERT(tmp_info->magic == save_info->magic);
1778			ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1779			tmp_info->forw = cpu_to_be32(save_blk->blkno);
1780			xfs_trans_log_buf(args->trans, bp, 0,
1781						    sizeof(*tmp_info) - 1);
 
1782		}
1783	} else {
1784		trace_xfs_da_unlink_forward(args);
1785		save_info->forw = drop_info->forw;
1786		if (drop_info->forw) {
1787			error = xfs_da3_node_read(args->trans, args->dp,
1788						be32_to_cpu(drop_info->forw),
1789						-1, &bp, args->whichfork);
1790			if (error)
1791				return(error);
1792			ASSERT(bp != NULL);
1793			tmp_info = bp->b_addr;
1794			ASSERT(tmp_info->magic == save_info->magic);
1795			ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1796			tmp_info->back = cpu_to_be32(save_blk->blkno);
1797			xfs_trans_log_buf(args->trans, bp, 0,
1798						    sizeof(*tmp_info) - 1);
 
1799		}
1800	}
1801
1802	xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1803	return(0);
1804}
1805
1806/*
1807 * Move a path "forward" or "!forward" one block at the current level.
1808 *
1809 * This routine will adjust a "path" to point to the next block
1810 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1811 * Btree, including updating pointers to the intermediate nodes between
1812 * the new bottom and the root.
1813 */
1814int							/* error */
1815xfs_da3_path_shift(
1816	struct xfs_da_state	*state,
1817	struct xfs_da_state_path *path,
1818	int			forward,
1819	int			release,
1820	int			*result)
1821{
1822	struct xfs_da_state_blk	*blk;
1823	struct xfs_da_blkinfo	*info;
1824	struct xfs_da_intnode	*node;
1825	struct xfs_da_args	*args;
1826	struct xfs_da_node_entry *btree;
1827	struct xfs_da3_icnode_hdr nodehdr;
1828	xfs_dablk_t		blkno = 0;
1829	int			level;
1830	int			error;
1831	struct xfs_inode	*dp = state->args->dp;
1832
1833	trace_xfs_da_path_shift(state->args);
1834
1835	/*
1836	 * Roll up the Btree looking for the first block where our
1837	 * current index is not at the edge of the block.  Note that
1838	 * we skip the bottom layer because we want the sibling block.
1839	 */
1840	args = state->args;
1841	ASSERT(args != NULL);
1842	ASSERT(path != NULL);
1843	ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1844	level = (path->active-1) - 1;	/* skip bottom layer in path */
1845	for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1846		node = blk->bp->b_addr;
1847		dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1848		btree = dp->d_ops->node_tree_p(node);
1849
1850		if (forward && (blk->index < nodehdr.count - 1)) {
1851			blk->index++;
1852			blkno = be32_to_cpu(btree[blk->index].before);
1853			break;
1854		} else if (!forward && (blk->index > 0)) {
1855			blk->index--;
1856			blkno = be32_to_cpu(btree[blk->index].before);
1857			break;
1858		}
1859	}
1860	if (level < 0) {
1861		*result = XFS_ERROR(ENOENT);	/* we're out of our tree */
1862		ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1863		return(0);
1864	}
1865
1866	/*
1867	 * Roll down the edge of the subtree until we reach the
1868	 * same depth we were at originally.
1869	 */
1870	for (blk++, level++; level < path->active; blk++, level++) {
1871		/*
1872		 * Release the old block.
1873		 * (if it's dirty, trans won't actually let go)
1874		 */
1875		if (release)
1876			xfs_trans_brelse(args->trans, blk->bp);
1877
1878		/*
1879		 * Read the next child block.
1880		 */
1881		blk->blkno = blkno;
1882		error = xfs_da3_node_read(args->trans, dp, blkno, -1,
1883					&blk->bp, args->whichfork);
1884		if (error)
1885			return(error);
1886		info = blk->bp->b_addr;
 
1887		ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1888		       info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1889		       info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1890		       info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1891		       info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1892		       info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1893
1894
1895		/*
1896		 * Note: we flatten the magic number to a single type so we
1897		 * don't have to compare against crc/non-crc types elsewhere.
1898		 */
1899		switch (be16_to_cpu(info->magic)) {
1900		case XFS_DA_NODE_MAGIC:
1901		case XFS_DA3_NODE_MAGIC:
1902			blk->magic = XFS_DA_NODE_MAGIC;
1903			node = (xfs_da_intnode_t *)info;
1904			dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1905			btree = dp->d_ops->node_tree_p(node);
1906			blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1907			if (forward)
1908				blk->index = 0;
1909			else
1910				blk->index = nodehdr.count - 1;
1911			blkno = be32_to_cpu(btree[blk->index].before);
1912			break;
1913		case XFS_ATTR_LEAF_MAGIC:
1914		case XFS_ATTR3_LEAF_MAGIC:
1915			blk->magic = XFS_ATTR_LEAF_MAGIC;
1916			ASSERT(level == path->active-1);
1917			blk->index = 0;
1918			blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1919			break;
1920		case XFS_DIR2_LEAFN_MAGIC:
1921		case XFS_DIR3_LEAFN_MAGIC:
1922			blk->magic = XFS_DIR2_LEAFN_MAGIC;
1923			ASSERT(level == path->active-1);
1924			blk->index = 0;
1925			blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1926							       blk->bp, NULL);
1927			break;
1928		default:
1929			ASSERT(0);
1930			break;
 
1931		}
1932	}
1933	*result = 0;
1934	return 0;
1935}
1936
1937
1938/*========================================================================
1939 * Utility routines.
1940 *========================================================================*/
1941
1942/*
1943 * Implement a simple hash on a character string.
1944 * Rotate the hash value by 7 bits, then XOR each character in.
1945 * This is implemented with some source-level loop unrolling.
1946 */
1947xfs_dahash_t
1948xfs_da_hashname(const __uint8_t *name, int namelen)
1949{
1950	xfs_dahash_t hash;
1951
1952	/*
1953	 * Do four characters at a time as long as we can.
1954	 */
1955	for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1956		hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1957		       (name[3] << 0) ^ rol32(hash, 7 * 4);
1958
1959	/*
1960	 * Now do the rest of the characters.
1961	 */
1962	switch (namelen) {
1963	case 3:
1964		return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1965		       rol32(hash, 7 * 3);
1966	case 2:
1967		return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1968	case 1:
1969		return (name[0] << 0) ^ rol32(hash, 7 * 1);
1970	default: /* case 0: */
1971		return hash;
1972	}
1973}
1974
1975enum xfs_dacmp
1976xfs_da_compname(
1977	struct xfs_da_args *args,
1978	const unsigned char *name,
1979	int		len)
1980{
1981	return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1982					XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1983}
1984
1985static xfs_dahash_t
1986xfs_default_hashname(
1987	struct xfs_name	*name)
1988{
1989	return xfs_da_hashname(name->name, name->len);
1990}
1991
1992const struct xfs_nameops xfs_default_nameops = {
1993	.hashname	= xfs_default_hashname,
1994	.compname	= xfs_da_compname
1995};
1996
1997int
1998xfs_da_grow_inode_int(
1999	struct xfs_da_args	*args,
2000	xfs_fileoff_t		*bno,
2001	int			count)
2002{
2003	struct xfs_trans	*tp = args->trans;
2004	struct xfs_inode	*dp = args->dp;
2005	int			w = args->whichfork;
2006	xfs_drfsbno_t		nblks = dp->i_d.di_nblocks;
2007	struct xfs_bmbt_irec	map, *mapp;
2008	int			nmap, error, got, i, mapi;
2009
2010	/*
2011	 * Find a spot in the file space to put the new block.
2012	 */
2013	error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2014	if (error)
2015		return error;
2016
2017	/*
2018	 * Try mapping it in one filesystem block.
2019	 */
2020	nmap = 1;
2021	ASSERT(args->firstblock != NULL);
2022	error = xfs_bmapi_write(tp, dp, *bno, count,
2023			xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2024			args->firstblock, args->total, &map, &nmap,
2025			args->flist);
2026	if (error)
2027		return error;
2028
2029	ASSERT(nmap <= 1);
2030	if (nmap == 1) {
2031		mapp = &map;
2032		mapi = 1;
2033	} else if (nmap == 0 && count > 1) {
2034		xfs_fileoff_t		b;
2035		int			c;
2036
2037		/*
2038		 * If we didn't get it and the block might work if fragmented,
2039		 * try without the CONTIG flag.  Loop until we get it all.
2040		 */
2041		mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2042		for (b = *bno, mapi = 0; b < *bno + count; ) {
2043			nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2044			c = (int)(*bno + count - b);
2045			error = xfs_bmapi_write(tp, dp, b, c,
2046					xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2047					args->firstblock, args->total,
2048					&mapp[mapi], &nmap, args->flist);
2049			if (error)
2050				goto out_free_map;
2051			if (nmap < 1)
2052				break;
2053			mapi += nmap;
2054			b = mapp[mapi - 1].br_startoff +
2055			    mapp[mapi - 1].br_blockcount;
2056		}
2057	} else {
2058		mapi = 0;
2059		mapp = NULL;
2060	}
2061
2062	/*
2063	 * Count the blocks we got, make sure it matches the total.
2064	 */
2065	for (i = 0, got = 0; i < mapi; i++)
2066		got += mapp[i].br_blockcount;
2067	if (got != count || mapp[0].br_startoff != *bno ||
2068	    mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2069	    *bno + count) {
2070		error = XFS_ERROR(ENOSPC);
2071		goto out_free_map;
2072	}
2073
2074	/* account for newly allocated blocks in reserved blocks total */
2075	args->total -= dp->i_d.di_nblocks - nblks;
2076
2077out_free_map:
2078	if (mapp != &map)
2079		kmem_free(mapp);
2080	return error;
2081}
2082
2083/*
2084 * Add a block to the btree ahead of the file.
2085 * Return the new block number to the caller.
2086 */
2087int
2088xfs_da_grow_inode(
2089	struct xfs_da_args	*args,
2090	xfs_dablk_t		*new_blkno)
2091{
2092	xfs_fileoff_t		bno;
2093	int			count;
2094	int			error;
2095
2096	trace_xfs_da_grow_inode(args);
2097
2098	if (args->whichfork == XFS_DATA_FORK) {
2099		bno = args->dp->i_mount->m_dirleafblk;
2100		count = args->dp->i_mount->m_dirblkfsbs;
2101	} else {
2102		bno = 0;
2103		count = 1;
2104	}
2105
2106	error = xfs_da_grow_inode_int(args, &bno, count);
2107	if (!error)
2108		*new_blkno = (xfs_dablk_t)bno;
2109	return error;
2110}
2111
2112/*
2113 * Ick.  We need to always be able to remove a btree block, even
2114 * if there's no space reservation because the filesystem is full.
2115 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2116 * It swaps the target block with the last block in the file.  The
2117 * last block in the file can always be removed since it can't cause
2118 * a bmap btree split to do that.
2119 */
2120STATIC int
2121xfs_da3_swap_lastblock(
2122	struct xfs_da_args	*args,
2123	xfs_dablk_t		*dead_blknop,
2124	struct xfs_buf		**dead_bufp)
2125{
2126	struct xfs_da_blkinfo	*dead_info;
2127	struct xfs_da_blkinfo	*sib_info;
2128	struct xfs_da_intnode	*par_node;
2129	struct xfs_da_intnode	*dead_node;
2130	struct xfs_dir2_leaf	*dead_leaf2;
2131	struct xfs_da_node_entry *btree;
2132	struct xfs_da3_icnode_hdr par_hdr;
2133	struct xfs_inode	*dp;
2134	struct xfs_trans	*tp;
2135	struct xfs_mount	*mp;
2136	struct xfs_buf		*dead_buf;
2137	struct xfs_buf		*last_buf;
2138	struct xfs_buf		*sib_buf;
2139	struct xfs_buf		*par_buf;
2140	xfs_dahash_t		dead_hash;
2141	xfs_fileoff_t		lastoff;
2142	xfs_dablk_t		dead_blkno;
2143	xfs_dablk_t		last_blkno;
2144	xfs_dablk_t		sib_blkno;
2145	xfs_dablk_t		par_blkno;
2146	int			error;
2147	int			w;
2148	int			entno;
2149	int			level;
2150	int			dead_level;
2151
2152	trace_xfs_da_swap_lastblock(args);
2153
2154	dead_buf = *dead_bufp;
2155	dead_blkno = *dead_blknop;
2156	tp = args->trans;
2157	dp = args->dp;
2158	w = args->whichfork;
2159	ASSERT(w == XFS_DATA_FORK);
2160	mp = dp->i_mount;
2161	lastoff = mp->m_dirfreeblk;
2162	error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2163	if (error)
2164		return error;
2165	if (unlikely(lastoff == 0)) {
2166		XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2167				 mp);
2168		return XFS_ERROR(EFSCORRUPTED);
2169	}
2170	/*
2171	 * Read the last block in the btree space.
2172	 */
2173	last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
2174	error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2175	if (error)
2176		return error;
2177	/*
2178	 * Copy the last block into the dead buffer and log it.
2179	 */
2180	memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
2181	xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
2182	dead_info = dead_buf->b_addr;
2183	/*
2184	 * Get values from the moved block.
2185	 */
2186	if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2187	    dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2188		struct xfs_dir3_icleaf_hdr leafhdr;
2189		struct xfs_dir2_leaf_entry *ents;
2190
2191		dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2192		dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2193		ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2194		dead_level = 0;
2195		dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2196	} else {
2197		struct xfs_da3_icnode_hdr deadhdr;
2198
2199		dead_node = (xfs_da_intnode_t *)dead_info;
2200		dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2201		btree = dp->d_ops->node_tree_p(dead_node);
2202		dead_level = deadhdr.level;
2203		dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2204	}
2205	sib_buf = par_buf = NULL;
2206	/*
2207	 * If the moved block has a left sibling, fix up the pointers.
2208	 */
2209	if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2210		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2211		if (error)
2212			goto done;
2213		sib_info = sib_buf->b_addr;
2214		if (unlikely(
2215		    be32_to_cpu(sib_info->forw) != last_blkno ||
2216		    sib_info->magic != dead_info->magic)) {
2217			XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2218					 XFS_ERRLEVEL_LOW, mp);
2219			error = XFS_ERROR(EFSCORRUPTED);
2220			goto done;
2221		}
2222		sib_info->forw = cpu_to_be32(dead_blkno);
2223		xfs_trans_log_buf(tp, sib_buf,
2224			XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2225					sizeof(sib_info->forw)));
 
2226		sib_buf = NULL;
2227	}
2228	/*
2229	 * If the moved block has a right sibling, fix up the pointers.
2230	 */
2231	if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2232		error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2233		if (error)
2234			goto done;
2235		sib_info = sib_buf->b_addr;
2236		if (unlikely(
2237		       be32_to_cpu(sib_info->back) != last_blkno ||
2238		       sib_info->magic != dead_info->magic)) {
2239			XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2240					 XFS_ERRLEVEL_LOW, mp);
2241			error = XFS_ERROR(EFSCORRUPTED);
2242			goto done;
2243		}
2244		sib_info->back = cpu_to_be32(dead_blkno);
2245		xfs_trans_log_buf(tp, sib_buf,
2246			XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2247					sizeof(sib_info->back)));
 
2248		sib_buf = NULL;
2249	}
2250	par_blkno = mp->m_dirleafblk;
2251	level = -1;
2252	/*
2253	 * Walk down the tree looking for the parent of the moved block.
2254	 */
2255	for (;;) {
2256		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2257		if (error)
2258			goto done;
2259		par_node = par_buf->b_addr;
2260		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2261		if (level >= 0 && level != par_hdr.level + 1) {
 
2262			XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2263					 XFS_ERRLEVEL_LOW, mp);
2264			error = XFS_ERROR(EFSCORRUPTED);
2265			goto done;
2266		}
2267		level = par_hdr.level;
2268		btree = dp->d_ops->node_tree_p(par_node);
2269		for (entno = 0;
2270		     entno < par_hdr.count &&
2271		     be32_to_cpu(btree[entno].hashval) < dead_hash;
2272		     entno++)
2273			continue;
2274		if (entno == par_hdr.count) {
2275			XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2276					 XFS_ERRLEVEL_LOW, mp);
2277			error = XFS_ERROR(EFSCORRUPTED);
2278			goto done;
2279		}
2280		par_blkno = be32_to_cpu(btree[entno].before);
2281		if (level == dead_level + 1)
2282			break;
2283		xfs_trans_brelse(tp, par_buf);
2284		par_buf = NULL;
2285	}
2286	/*
2287	 * We're in the right parent block.
2288	 * Look for the right entry.
2289	 */
2290	for (;;) {
2291		for (;
2292		     entno < par_hdr.count &&
2293		     be32_to_cpu(btree[entno].before) != last_blkno;
2294		     entno++)
2295			continue;
2296		if (entno < par_hdr.count)
2297			break;
2298		par_blkno = par_hdr.forw;
2299		xfs_trans_brelse(tp, par_buf);
2300		par_buf = NULL;
2301		if (unlikely(par_blkno == 0)) {
2302			XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2303					 XFS_ERRLEVEL_LOW, mp);
2304			error = XFS_ERROR(EFSCORRUPTED);
2305			goto done;
2306		}
2307		error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2308		if (error)
2309			goto done;
2310		par_node = par_buf->b_addr;
2311		dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2312		if (par_hdr.level != level) {
 
2313			XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2314					 XFS_ERRLEVEL_LOW, mp);
2315			error = XFS_ERROR(EFSCORRUPTED);
2316			goto done;
2317		}
2318		btree = dp->d_ops->node_tree_p(par_node);
2319		entno = 0;
2320	}
2321	/*
2322	 * Update the parent entry pointing to the moved block.
2323	 */
2324	btree[entno].before = cpu_to_be32(dead_blkno);
2325	xfs_trans_log_buf(tp, par_buf,
2326		XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2327				sizeof(btree[entno].before)));
 
 
2328	*dead_blknop = last_blkno;
2329	*dead_bufp = last_buf;
2330	return 0;
2331done:
2332	if (par_buf)
2333		xfs_trans_brelse(tp, par_buf);
2334	if (sib_buf)
2335		xfs_trans_brelse(tp, sib_buf);
2336	xfs_trans_brelse(tp, last_buf);
2337	return error;
2338}
2339
2340/*
2341 * Remove a btree block from a directory or attribute.
2342 */
2343int
2344xfs_da_shrink_inode(
2345	xfs_da_args_t	*args,
2346	xfs_dablk_t	dead_blkno,
2347	struct xfs_buf	*dead_buf)
2348{
2349	xfs_inode_t *dp;
2350	int done, error, w, count;
2351	xfs_trans_t *tp;
2352	xfs_mount_t *mp;
2353
2354	trace_xfs_da_shrink_inode(args);
2355
2356	dp = args->dp;
2357	w = args->whichfork;
2358	tp = args->trans;
2359	mp = dp->i_mount;
2360	if (w == XFS_DATA_FORK)
2361		count = mp->m_dirblkfsbs;
2362	else
2363		count = 1;
2364	for (;;) {
2365		/*
2366		 * Remove extents.  If we get ENOSPC for a dir we have to move
2367		 * the last block to the place we want to kill.
2368		 */
2369		error = xfs_bunmapi(tp, dp, dead_blkno, count,
2370				    xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2371				    0, args->firstblock, args->flist, &done);
2372		if (error == ENOSPC) {
2373			if (w != XFS_DATA_FORK)
2374				break;
2375			error = xfs_da3_swap_lastblock(args, &dead_blkno,
2376						      &dead_buf);
2377			if (error)
2378				break;
2379		} else {
2380			break;
2381		}
2382	}
2383	xfs_trans_binval(tp, dead_buf);
2384	return error;
2385}
2386
2387/*
2388 * See if the mapping(s) for this btree block are valid, i.e.
2389 * don't contain holes, are logically contiguous, and cover the whole range.
2390 */
2391STATIC int
2392xfs_da_map_covers_blocks(
2393	int		nmap,
2394	xfs_bmbt_irec_t	*mapp,
2395	xfs_dablk_t	bno,
2396	int		count)
2397{
2398	int		i;
2399	xfs_fileoff_t	off;
2400
2401	for (i = 0, off = bno; i < nmap; i++) {
2402		if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2403		    mapp[i].br_startblock == DELAYSTARTBLOCK) {
2404			return 0;
2405		}
2406		if (off != mapp[i].br_startoff) {
2407			return 0;
2408		}
2409		off += mapp[i].br_blockcount;
2410	}
2411	return off == bno + count;
2412}
2413
2414/*
2415 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2416 *
2417 * For the single map case, it is assumed that the caller has provided a pointer
2418 * to a valid xfs_buf_map.  For the multiple map case, this function will
2419 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2420 * map pointer with the allocated map.
2421 */
2422static int
2423xfs_buf_map_from_irec(
2424	struct xfs_mount	*mp,
2425	struct xfs_buf_map	**mapp,
2426	int			*nmaps,
2427	struct xfs_bmbt_irec	*irecs,
2428	int			nirecs)
2429{
2430	struct xfs_buf_map	*map;
2431	int			i;
2432
2433	ASSERT(*nmaps == 1);
2434	ASSERT(nirecs >= 1);
2435
2436	if (nirecs > 1) {
2437		map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2438				  KM_SLEEP | KM_NOFS);
2439		if (!map)
2440			return ENOMEM;
2441		*mapp = map;
2442	}
2443
2444	*nmaps = nirecs;
2445	map = *mapp;
2446	for (i = 0; i < *nmaps; i++) {
2447		ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2448		       irecs[i].br_startblock != HOLESTARTBLOCK);
2449		map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2450		map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2451	}
2452	return 0;
2453}
2454
2455/*
2456 * Map the block we are given ready for reading. There are three possible return
2457 * values:
2458 *	-1 - will be returned if we land in a hole and mappedbno == -2 so the
2459 *	     caller knows not to execute a subsequent read.
2460 *	 0 - if we mapped the block successfully
2461 *	>0 - positive error number if there was an error.
2462 */
2463static int
2464xfs_dabuf_map(
2465	struct xfs_trans	*trans,
2466	struct xfs_inode	*dp,
2467	xfs_dablk_t		bno,
2468	xfs_daddr_t		mappedbno,
2469	int			whichfork,
2470	struct xfs_buf_map	**map,
2471	int			*nmaps)
2472{
2473	struct xfs_mount	*mp = dp->i_mount;
2474	int			nfsb;
2475	int			error = 0;
2476	struct xfs_bmbt_irec	irec;
2477	struct xfs_bmbt_irec	*irecs = &irec;
2478	int			nirecs;
2479
2480	ASSERT(map && *map);
2481	ASSERT(*nmaps == 1);
2482
 
2483	nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
2484
2485	/*
2486	 * Caller doesn't have a mapping.  -2 means don't complain
2487	 * if we land in a hole.
2488	 */
2489	if (mappedbno == -1 || mappedbno == -2) {
2490		/*
2491		 * Optimize the one-block case.
2492		 */
2493		if (nfsb != 1)
2494			irecs = kmem_zalloc(sizeof(irec) * nfsb,
2495					    KM_SLEEP | KM_NOFS);
2496
2497		nirecs = nfsb;
2498		error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2499				       &nirecs, xfs_bmapi_aflag(whichfork));
 
2500		if (error)
2501			goto out;
2502	} else {
2503		irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2504		irecs->br_startoff = (xfs_fileoff_t)bno;
2505		irecs->br_blockcount = nfsb;
2506		irecs->br_state = 0;
2507		nirecs = 1;
2508	}
2509
2510	if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2511		error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
2512		if (unlikely(error == EFSCORRUPTED)) {
2513			if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2514				int i;
2515				xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2516					__func__, (long long)bno,
2517					(long long)dp->i_ino);
2518				for (i = 0; i < *nmaps; i++) {
2519					xfs_alert(mp,
2520"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2521						i,
2522						(long long)irecs[i].br_startoff,
2523						(long long)irecs[i].br_startblock,
2524						(long long)irecs[i].br_blockcount,
2525						irecs[i].br_state);
2526				}
2527			}
2528			XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2529					 XFS_ERRLEVEL_LOW, mp);
2530		}
2531		goto out;
2532	}
2533	error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2534out:
2535	if (irecs != &irec)
2536		kmem_free(irecs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2537	return error;
2538}
2539
2540/*
2541 * Get a buffer for the dir/attr block.
2542 */
2543int
2544xfs_da_get_buf(
2545	struct xfs_trans	*trans,
2546	struct xfs_inode	*dp,
2547	xfs_dablk_t		bno,
2548	xfs_daddr_t		mappedbno,
2549	struct xfs_buf		**bpp,
2550	int			whichfork)
2551{
2552	struct xfs_buf		*bp;
2553	struct xfs_buf_map	map;
2554	struct xfs_buf_map	*mapp;
2555	int			nmap;
2556	int			error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2557
2558	*bpp = NULL;
2559	mapp = &map;
2560	nmap = 1;
2561	error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2562				&mapp, &nmap);
2563	if (error) {
2564		/* mapping a hole is not an error, but we don't continue */
2565		if (error == -1)
2566			error = 0;
2567		goto out_free;
2568	}
2569
2570	bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2571				    mapp, nmap, 0);
2572	error = bp ? bp->b_error : XFS_ERROR(EIO);
2573	if (error) {
2574		xfs_trans_brelse(trans, bp);
2575		goto out_free;
2576	}
2577
2578	*bpp = bp;
 
 
 
 
 
 
 
 
2579
2580out_free:
2581	if (mapp != &map)
2582		kmem_free(mapp);
 
 
 
 
2583
2584	return error;
 
 
 
 
 
 
 
2585}
2586
2587/*
2588 * Get a buffer for the dir/attr block, fill in the contents.
2589 */
2590int
2591xfs_da_read_buf(
2592	struct xfs_trans	*trans,
2593	struct xfs_inode	*dp,
2594	xfs_dablk_t		bno,
2595	xfs_daddr_t		mappedbno,
2596	struct xfs_buf		**bpp,
2597	int			whichfork,
2598	const struct xfs_buf_ops *ops)
2599{
2600	struct xfs_buf		*bp;
2601	struct xfs_buf_map	map;
2602	struct xfs_buf_map	*mapp;
2603	int			nmap;
2604	int			error;
2605
2606	*bpp = NULL;
2607	mapp = &map;
2608	nmap = 1;
2609	error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2610				&mapp, &nmap);
2611	if (error) {
2612		/* mapping a hole is not an error, but we don't continue */
2613		if (error == -1)
2614			error = 0;
2615		goto out_free;
2616	}
 
 
 
 
 
 
 
2617
2618	error = xfs_trans_read_buf_map(dp->i_mount, trans,
2619					dp->i_mount->m_ddev_targp,
2620					mapp, nmap, 0, &bp, ops);
2621	if (error)
2622		goto out_free;
 
 
 
 
 
 
2623
2624	if (whichfork == XFS_ATTR_FORK)
2625		xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2626	else
2627		xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2628
2629	/*
2630	 * This verification code will be moved to a CRC verification callback
2631	 * function so just leave it here unchanged until then.
2632	 */
2633	{
2634		xfs_dir2_data_hdr_t	*hdr = bp->b_addr;
2635		xfs_dir2_free_t		*free = bp->b_addr;
2636		xfs_da_blkinfo_t	*info = bp->b_addr;
2637		uint			magic, magic1;
2638		struct xfs_mount	*mp = dp->i_mount;
2639
2640		magic = be16_to_cpu(info->magic);
2641		magic1 = be32_to_cpu(hdr->magic);
2642		if (unlikely(
2643		    XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2644				   (magic != XFS_DA3_NODE_MAGIC) &&
2645				   (magic != XFS_ATTR_LEAF_MAGIC) &&
2646				   (magic != XFS_ATTR3_LEAF_MAGIC) &&
2647				   (magic != XFS_DIR2_LEAF1_MAGIC) &&
2648				   (magic != XFS_DIR3_LEAF1_MAGIC) &&
2649				   (magic != XFS_DIR2_LEAFN_MAGIC) &&
2650				   (magic != XFS_DIR3_LEAFN_MAGIC) &&
2651				   (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2652				   (magic1 != XFS_DIR3_BLOCK_MAGIC) &&
2653				   (magic1 != XFS_DIR2_DATA_MAGIC) &&
2654				   (magic1 != XFS_DIR3_DATA_MAGIC) &&
2655				   (free->hdr.magic !=
2656					cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
2657				   (free->hdr.magic !=
2658					cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
2659				mp, XFS_ERRTAG_DA_READ_BUF,
2660				XFS_RANDOM_DA_READ_BUF))) {
2661			trace_xfs_da_btree_corrupt(bp, _RET_IP_);
2662			XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2663					     XFS_ERRLEVEL_LOW, mp, info);
2664			error = XFS_ERROR(EFSCORRUPTED);
2665			xfs_trans_brelse(trans, bp);
2666			goto out_free;
2667		}
2668	}
2669	*bpp = bp;
2670out_free:
2671	if (mapp != &map)
2672		kmem_free(mapp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2673
2674	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2675}
2676
2677/*
2678 * Readahead the dir/attr block.
 
 
2679 */
2680xfs_daddr_t
2681xfs_da_reada_buf(
2682	struct xfs_trans	*trans,
2683	struct xfs_inode	*dp,
2684	xfs_dablk_t		bno,
2685	xfs_daddr_t		mappedbno,
2686	int			whichfork,
2687	const struct xfs_buf_ops *ops)
2688{
2689	struct xfs_buf_map	map;
2690	struct xfs_buf_map	*mapp;
2691	int			nmap;
2692	int			error;
2693
2694	mapp = &map;
2695	nmap = 1;
2696	error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2697				&mapp, &nmap);
2698	if (error) {
2699		/* mapping a hole is not an error, but we don't continue */
2700		if (error == -1)
2701			error = 0;
2702		goto out_free;
2703	}
 
 
 
 
 
 
2704
2705	mappedbno = mapp[0].bm_bn;
2706	xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
 
 
 
 
 
 
 
 
2707
2708out_free:
2709	if (mapp != &map)
2710		kmem_free(mapp);
 
 
 
 
 
 
 
 
 
 
 
2711
2712	if (error)
2713		return -1;
2714	return mappedbno;
 
 
 
 
 
 
2715}