Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * dir.c
   5 *
   6 * Creates, reads, walks and deletes directory-nodes
   7 *
   8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   9 *
  10 *  Portions of this code from linux/fs/ext3/dir.c
  11 *
  12 *  Copyright (C) 1992, 1993, 1994, 1995
  13 *  Remy Card (card@masi.ibp.fr)
  14 *  Laboratoire MASI - Institut Blaise pascal
  15 *  Universite Pierre et Marie Curie (Paris VI)
  16 *
  17 *   from
  18 *
  19 *   linux/fs/minix/dir.c
  20 *
  21 *   Copyright (C) 1991, 1992 Linus Torvalds
  22 *
  23 * This program is free software; you can redistribute it and/or
  24 * modify it under the terms of the GNU General Public
  25 * License as published by the Free Software Foundation; either
  26 * version 2 of the License, or (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  31 * General Public License for more details.
  32 *
  33 * You should have received a copy of the GNU General Public
  34 * License along with this program; if not, write to the
  35 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  36 * Boston, MA 021110-1307, USA.
  37 */
  38
  39#include <linux/fs.h>
  40#include <linux/types.h>
  41#include <linux/slab.h>
  42#include <linux/highmem.h>
  43#include <linux/quotaops.h>
  44#include <linux/sort.h>
 
  45
  46#include <cluster/masklog.h>
  47
  48#include "ocfs2.h"
  49
  50#include "alloc.h"
  51#include "blockcheck.h"
  52#include "dir.h"
  53#include "dlmglue.h"
  54#include "extent_map.h"
  55#include "file.h"
  56#include "inode.h"
  57#include "journal.h"
  58#include "namei.h"
  59#include "suballoc.h"
  60#include "super.h"
  61#include "sysfile.h"
  62#include "uptodate.h"
  63#include "ocfs2_trace.h"
  64
  65#include "buffer_head_io.h"
  66
  67#define NAMEI_RA_CHUNKS  2
  68#define NAMEI_RA_BLOCKS  4
  69#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  70
  71static unsigned char ocfs2_filetype_table[] = {
  72	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
  73};
  74
  75static int ocfs2_do_extend_dir(struct super_block *sb,
  76			       handle_t *handle,
  77			       struct inode *dir,
  78			       struct buffer_head *parent_fe_bh,
  79			       struct ocfs2_alloc_context *data_ac,
  80			       struct ocfs2_alloc_context *meta_ac,
  81			       struct buffer_head **new_bh);
  82static int ocfs2_dir_indexed(struct inode *inode);
  83
  84/*
  85 * These are distinct checks because future versions of the file system will
  86 * want to have a trailing dirent structure independent of indexing.
  87 */
  88static int ocfs2_supports_dir_trailer(struct inode *dir)
  89{
  90	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  91
  92	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  93		return 0;
  94
  95	return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
  96}
  97
  98/*
  99 * "new' here refers to the point at which we're creating a new
 100 * directory via "mkdir()", but also when we're expanding an inline
 101 * directory. In either case, we don't yet have the indexing bit set
 102 * on the directory, so the standard checks will fail in when metaecc
 103 * is turned off. Only directory-initialization type functions should
 104 * use this then. Everything else wants ocfs2_supports_dir_trailer()
 105 */
 106static int ocfs2_new_dir_wants_trailer(struct inode *dir)
 107{
 108	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 109
 110	return ocfs2_meta_ecc(osb) ||
 111		ocfs2_supports_indexed_dirs(osb);
 112}
 113
 114static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
 115{
 116	return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
 117}
 118
 119#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
 120
 121/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
 122 * them more consistent? */
 123struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
 124							    void *data)
 125{
 126	char *p = data;
 127
 128	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
 129	return (struct ocfs2_dir_block_trailer *)p;
 130}
 131
 132/*
 133 * XXX: This is executed once on every dirent. We should consider optimizing
 134 * it.
 135 */
 136static int ocfs2_skip_dir_trailer(struct inode *dir,
 137				  struct ocfs2_dir_entry *de,
 138				  unsigned long offset,
 139				  unsigned long blklen)
 140{
 141	unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
 142
 143	if (!ocfs2_supports_dir_trailer(dir))
 144		return 0;
 145
 146	if (offset != toff)
 147		return 0;
 148
 149	return 1;
 150}
 151
 152static void ocfs2_init_dir_trailer(struct inode *inode,
 153				   struct buffer_head *bh, u16 rec_len)
 154{
 155	struct ocfs2_dir_block_trailer *trailer;
 156
 157	trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
 158	strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
 159	trailer->db_compat_rec_len =
 160			cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
 161	trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
 162	trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
 163	trailer->db_free_rec_len = cpu_to_le16(rec_len);
 164}
 165/*
 166 * Link an unindexed block with a dir trailer structure into the index free
 167 * list. This function will modify dirdata_bh, but assumes you've already
 168 * passed it to the journal.
 169 */
 170static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
 171				     struct buffer_head *dx_root_bh,
 172				     struct buffer_head *dirdata_bh)
 173{
 174	int ret;
 175	struct ocfs2_dx_root_block *dx_root;
 176	struct ocfs2_dir_block_trailer *trailer;
 177
 178	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
 179				      OCFS2_JOURNAL_ACCESS_WRITE);
 180	if (ret) {
 181		mlog_errno(ret);
 182		goto out;
 183	}
 184	trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
 185	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
 186
 187	trailer->db_free_next = dx_root->dr_free_blk;
 188	dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
 189
 190	ocfs2_journal_dirty(handle, dx_root_bh);
 191
 192out:
 193	return ret;
 194}
 195
 196static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
 197{
 198	return res->dl_prev_leaf_bh == NULL;
 199}
 200
 201void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
 202{
 203	brelse(res->dl_dx_root_bh);
 204	brelse(res->dl_leaf_bh);
 205	brelse(res->dl_dx_leaf_bh);
 206	brelse(res->dl_prev_leaf_bh);
 207}
 208
 209static int ocfs2_dir_indexed(struct inode *inode)
 210{
 211	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
 212		return 1;
 213	return 0;
 214}
 215
 216static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
 217{
 218	return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
 219}
 220
 221/*
 222 * Hashing code adapted from ext3
 223 */
 224#define DELTA 0x9E3779B9
 225
 226static void TEA_transform(__u32 buf[4], __u32 const in[])
 227{
 228	__u32	sum = 0;
 229	__u32	b0 = buf[0], b1 = buf[1];
 230	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
 231	int	n = 16;
 232
 233	do {
 234		sum += DELTA;
 235		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
 236		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
 237	} while (--n);
 238
 239	buf[0] += b0;
 240	buf[1] += b1;
 241}
 242
 243static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
 244{
 245	__u32	pad, val;
 246	int	i;
 247
 248	pad = (__u32)len | ((__u32)len << 8);
 249	pad |= pad << 16;
 250
 251	val = pad;
 252	if (len > num*4)
 253		len = num * 4;
 254	for (i = 0; i < len; i++) {
 255		if ((i % 4) == 0)
 256			val = pad;
 257		val = msg[i] + (val << 8);
 258		if ((i % 4) == 3) {
 259			*buf++ = val;
 260			val = pad;
 261			num--;
 262		}
 263	}
 264	if (--num >= 0)
 265		*buf++ = val;
 266	while (--num >= 0)
 267		*buf++ = pad;
 268}
 269
 270static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
 271				   struct ocfs2_dx_hinfo *hinfo)
 272{
 273	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 274	const char	*p;
 275	__u32		in[8], buf[4];
 276
 277	/*
 278	 * XXX: Is this really necessary, if the index is never looked
 279	 * at by readdir? Is a hash value of '0' a bad idea?
 280	 */
 281	if ((len == 1 && !strncmp(".", name, 1)) ||
 282	    (len == 2 && !strncmp("..", name, 2))) {
 283		buf[0] = buf[1] = 0;
 284		goto out;
 285	}
 286
 287#ifdef OCFS2_DEBUG_DX_DIRS
 288	/*
 289	 * This makes it very easy to debug indexing problems. We
 290	 * should never allow this to be selected without hand editing
 291	 * this file though.
 292	 */
 293	buf[0] = buf[1] = len;
 294	goto out;
 295#endif
 296
 297	memcpy(buf, osb->osb_dx_seed, sizeof(buf));
 298
 299	p = name;
 300	while (len > 0) {
 301		str2hashbuf(p, len, in, 4);
 302		TEA_transform(buf, in);
 303		len -= 16;
 304		p += 16;
 305	}
 306
 307out:
 308	hinfo->major_hash = buf[0];
 309	hinfo->minor_hash = buf[1];
 310}
 311
 312/*
 313 * bh passed here can be an inode block or a dir data block, depending
 314 * on the inode inline data flag.
 315 */
 316static int ocfs2_check_dir_entry(struct inode * dir,
 317				 struct ocfs2_dir_entry * de,
 318				 struct buffer_head * bh,
 319				 unsigned long offset)
 320{
 321	const char *error_msg = NULL;
 322	const int rlen = le16_to_cpu(de->rec_len);
 323
 324	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
 325		error_msg = "rec_len is smaller than minimal";
 326	else if (unlikely(rlen % 4 != 0))
 327		error_msg = "rec_len % 4 != 0";
 328	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
 329		error_msg = "rec_len is too small for name_len";
 330	else if (unlikely(
 331		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
 332		error_msg = "directory entry across blocks";
 333
 334	if (unlikely(error_msg != NULL))
 335		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
 336		     "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
 337		     (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
 338		     offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
 339		     de->name_len);
 340
 341	return error_msg == NULL ? 1 : 0;
 342}
 343
 344static inline int ocfs2_match(int len,
 345			      const char * const name,
 346			      struct ocfs2_dir_entry *de)
 347{
 348	if (len != de->name_len)
 349		return 0;
 350	if (!de->inode)
 351		return 0;
 352	return !memcmp(name, de->name, len);
 353}
 354
 355/*
 356 * Returns 0 if not found, -1 on failure, and 1 on success
 357 */
 358static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 359					struct inode *dir,
 360					const char *name, int namelen,
 361					unsigned long offset,
 362					char *first_de,
 363					unsigned int bytes,
 364					struct ocfs2_dir_entry **res_dir)
 365{
 366	struct ocfs2_dir_entry *de;
 367	char *dlimit, *de_buf;
 368	int de_len;
 369	int ret = 0;
 370
 371	de_buf = first_de;
 372	dlimit = de_buf + bytes;
 373
 374	while (de_buf < dlimit) {
 375		/* this code is executed quadratically often */
 376		/* do minimal checking `by hand' */
 377
 378		de = (struct ocfs2_dir_entry *) de_buf;
 379
 380		if (de_buf + namelen <= dlimit &&
 381		    ocfs2_match(namelen, name, de)) {
 382			/* found a match - just to be sure, do a full check */
 383			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
 384				ret = -1;
 385				goto bail;
 386			}
 387			*res_dir = de;
 388			ret = 1;
 389			goto bail;
 390		}
 391
 392		/* prevent looping on a bad block */
 393		de_len = le16_to_cpu(de->rec_len);
 394		if (de_len <= 0) {
 395			ret = -1;
 396			goto bail;
 397		}
 398
 399		de_buf += de_len;
 400		offset += de_len;
 401	}
 402
 403bail:
 404	trace_ocfs2_search_dirblock(ret);
 405	return ret;
 406}
 407
 408static struct buffer_head *ocfs2_find_entry_id(const char *name,
 409					       int namelen,
 410					       struct inode *dir,
 411					       struct ocfs2_dir_entry **res_dir)
 412{
 413	int ret, found;
 414	struct buffer_head *di_bh = NULL;
 415	struct ocfs2_dinode *di;
 416	struct ocfs2_inline_data *data;
 417
 418	ret = ocfs2_read_inode_block(dir, &di_bh);
 419	if (ret) {
 420		mlog_errno(ret);
 421		goto out;
 422	}
 423
 424	di = (struct ocfs2_dinode *)di_bh->b_data;
 425	data = &di->id2.i_data;
 426
 427	found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
 428				      data->id_data, i_size_read(dir), res_dir);
 429	if (found == 1)
 430		return di_bh;
 431
 432	brelse(di_bh);
 433out:
 434	return NULL;
 435}
 436
 437static int ocfs2_validate_dir_block(struct super_block *sb,
 438				    struct buffer_head *bh)
 439{
 440	int rc;
 441	struct ocfs2_dir_block_trailer *trailer =
 442		ocfs2_trailer_from_bh(bh, sb);
 443
 444
 445	/*
 446	 * We don't validate dirents here, that's handled
 447	 * in-place when the code walks them.
 448	 */
 449	trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
 450
 451	BUG_ON(!buffer_uptodate(bh));
 452
 453	/*
 454	 * If the ecc fails, we return the error but otherwise
 455	 * leave the filesystem running.  We know any error is
 456	 * local to this block.
 457	 *
 458	 * Note that we are safe to call this even if the directory
 459	 * doesn't have a trailer.  Filesystems without metaecc will do
 460	 * nothing, and filesystems with it will have one.
 461	 */
 462	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
 463	if (rc)
 464		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
 465		     (unsigned long long)bh->b_blocknr);
 466
 467	return rc;
 468}
 469
 470/*
 471 * Validate a directory trailer.
 472 *
 473 * We check the trailer here rather than in ocfs2_validate_dir_block()
 474 * because that function doesn't have the inode to test.
 475 */
 476static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
 477{
 478	int rc = 0;
 479	struct ocfs2_dir_block_trailer *trailer;
 480
 481	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 482	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
 483		rc = ocfs2_error(dir->i_sb,
 484				 "Invalid dirblock #%llu: signature = %.*s\n",
 485				 (unsigned long long)bh->b_blocknr, 7,
 486				 trailer->db_signature);
 487		goto out;
 488	}
 489	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
 490		rc = ocfs2_error(dir->i_sb,
 491				 "Directory block #%llu has an invalid db_blkno of %llu\n",
 492				 (unsigned long long)bh->b_blocknr,
 493				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 494		goto out;
 495	}
 496	if (le64_to_cpu(trailer->db_parent_dinode) !=
 497	    OCFS2_I(dir)->ip_blkno) {
 498		rc = ocfs2_error(dir->i_sb,
 499				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
 500				 (unsigned long long)bh->b_blocknr,
 501				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
 502				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 503		goto out;
 504	}
 505out:
 506	return rc;
 507}
 508
 509/*
 510 * This function forces all errors to -EIO for consistency with its
 511 * predecessor, ocfs2_bread().  We haven't audited what returning the
 512 * real error codes would do to callers.  We log the real codes with
 513 * mlog_errno() before we squash them.
 514 */
 515static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
 516				struct buffer_head **bh, int flags)
 517{
 518	int rc = 0;
 519	struct buffer_head *tmp = *bh;
 520
 521	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
 522				    ocfs2_validate_dir_block);
 523	if (rc) {
 524		mlog_errno(rc);
 525		goto out;
 526	}
 527
 528	if (!(flags & OCFS2_BH_READAHEAD) &&
 529	    ocfs2_supports_dir_trailer(inode)) {
 530		rc = ocfs2_check_dir_trailer(inode, tmp);
 531		if (rc) {
 532			if (!*bh)
 533				brelse(tmp);
 534			mlog_errno(rc);
 535			goto out;
 536		}
 537	}
 538
 539	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
 540	if (!*bh)
 541		*bh = tmp;
 542
 543out:
 544	return rc ? -EIO : 0;
 545}
 546
 547/*
 548 * Read the block at 'phys' which belongs to this directory
 549 * inode. This function does no virtual->physical block translation -
 550 * what's passed in is assumed to be a valid directory block.
 551 */
 552static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
 553				       struct buffer_head **bh)
 554{
 555	int ret;
 556	struct buffer_head *tmp = *bh;
 557
 558	ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
 559			       ocfs2_validate_dir_block);
 560	if (ret) {
 561		mlog_errno(ret);
 562		goto out;
 563	}
 564
 565	if (ocfs2_supports_dir_trailer(dir)) {
 566		ret = ocfs2_check_dir_trailer(dir, tmp);
 567		if (ret) {
 568			if (!*bh)
 569				brelse(tmp);
 570			mlog_errno(ret);
 571			goto out;
 572		}
 573	}
 574
 575	if (!ret && !*bh)
 576		*bh = tmp;
 577out:
 578	return ret;
 579}
 580
 581static int ocfs2_validate_dx_root(struct super_block *sb,
 582				  struct buffer_head *bh)
 583{
 584	int ret;
 585	struct ocfs2_dx_root_block *dx_root;
 586
 587	BUG_ON(!buffer_uptodate(bh));
 588
 589	dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
 590
 591	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
 592	if (ret) {
 593		mlog(ML_ERROR,
 594		     "Checksum failed for dir index root block %llu\n",
 595		     (unsigned long long)bh->b_blocknr);
 596		return ret;
 597	}
 598
 599	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
 600		ret = ocfs2_error(sb,
 601				  "Dir Index Root # %llu has bad signature %.*s\n",
 602				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
 603				  7, dx_root->dr_signature);
 604	}
 605
 606	return ret;
 607}
 608
 609static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
 610			      struct buffer_head **dx_root_bh)
 611{
 612	int ret;
 613	u64 blkno = le64_to_cpu(di->i_dx_root);
 614	struct buffer_head *tmp = *dx_root_bh;
 615
 616	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 617			       ocfs2_validate_dx_root);
 618
 619	/* If ocfs2_read_block() got us a new bh, pass it up. */
 620	if (!ret && !*dx_root_bh)
 621		*dx_root_bh = tmp;
 622
 623	return ret;
 624}
 625
 626static int ocfs2_validate_dx_leaf(struct super_block *sb,
 627				  struct buffer_head *bh)
 628{
 629	int ret;
 630	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
 631
 632	BUG_ON(!buffer_uptodate(bh));
 633
 634	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
 635	if (ret) {
 636		mlog(ML_ERROR,
 637		     "Checksum failed for dir index leaf block %llu\n",
 638		     (unsigned long long)bh->b_blocknr);
 639		return ret;
 640	}
 641
 642	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
 643		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
 644				  7, dx_leaf->dl_signature);
 645	}
 646
 647	return ret;
 648}
 649
 650static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
 651			      struct buffer_head **dx_leaf_bh)
 652{
 653	int ret;
 654	struct buffer_head *tmp = *dx_leaf_bh;
 655
 656	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 657			       ocfs2_validate_dx_leaf);
 658
 659	/* If ocfs2_read_block() got us a new bh, pass it up. */
 660	if (!ret && !*dx_leaf_bh)
 661		*dx_leaf_bh = tmp;
 662
 663	return ret;
 664}
 665
 666/*
 667 * Read a series of dx_leaf blocks. This expects all buffer_head
 668 * pointers to be NULL on function entry.
 669 */
 670static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
 671				struct buffer_head **dx_leaf_bhs)
 672{
 673	int ret;
 674
 675	ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
 676				ocfs2_validate_dx_leaf);
 677	if (ret)
 678		mlog_errno(ret);
 679
 680	return ret;
 681}
 682
 683static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
 684					       struct inode *dir,
 685					       struct ocfs2_dir_entry **res_dir)
 686{
 687	struct super_block *sb;
 688	struct buffer_head *bh_use[NAMEI_RA_SIZE];
 689	struct buffer_head *bh, *ret = NULL;
 690	unsigned long start, block, b;
 691	int ra_max = 0;		/* Number of bh's in the readahead
 692				   buffer, bh_use[] */
 693	int ra_ptr = 0;		/* Current index into readahead
 694				   buffer */
 695	int num = 0;
 696	int nblocks, i, err;
 697
 698	sb = dir->i_sb;
 699
 700	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 701	start = OCFS2_I(dir)->ip_dir_start_lookup;
 702	if (start >= nblocks)
 703		start = 0;
 704	block = start;
 705
 706restart:
 707	do {
 708		/*
 709		 * We deal with the read-ahead logic here.
 710		 */
 711		if (ra_ptr >= ra_max) {
 712			/* Refill the readahead buffer */
 713			ra_ptr = 0;
 714			b = block;
 715			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
 716				/*
 717				 * Terminate if we reach the end of the
 718				 * directory and must wrap, or if our
 719				 * search has finished at this block.
 720				 */
 721				if (b >= nblocks || (num && block == start)) {
 722					bh_use[ra_max] = NULL;
 723					break;
 724				}
 725				num++;
 726
 727				bh = NULL;
 728				err = ocfs2_read_dir_block(dir, b++, &bh,
 729							   OCFS2_BH_READAHEAD);
 730				bh_use[ra_max] = bh;
 731			}
 732		}
 733		if ((bh = bh_use[ra_ptr++]) == NULL)
 734			goto next;
 735		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
 736			/* read error, skip block & hope for the best.
 737			 * ocfs2_read_dir_block() has released the bh. */
 738			mlog(ML_ERROR, "reading directory %llu, "
 739				    "offset %lu\n",
 740				    (unsigned long long)OCFS2_I(dir)->ip_blkno,
 741				    block);
 742			goto next;
 743		}
 744		i = ocfs2_search_dirblock(bh, dir, name, namelen,
 745					  block << sb->s_blocksize_bits,
 746					  bh->b_data, sb->s_blocksize,
 747					  res_dir);
 748		if (i == 1) {
 749			OCFS2_I(dir)->ip_dir_start_lookup = block;
 750			ret = bh;
 751			goto cleanup_and_exit;
 752		} else {
 753			brelse(bh);
 754			if (i < 0)
 755				goto cleanup_and_exit;
 756		}
 757	next:
 758		if (++block >= nblocks)
 759			block = 0;
 760	} while (block != start);
 761
 762	/*
 763	 * If the directory has grown while we were searching, then
 764	 * search the last part of the directory before giving up.
 765	 */
 766	block = nblocks;
 767	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 768	if (block < nblocks) {
 769		start = 0;
 770		goto restart;
 771	}
 772
 773cleanup_and_exit:
 774	/* Clean up the read-ahead blocks */
 775	for (; ra_ptr < ra_max; ra_ptr++)
 776		brelse(bh_use[ra_ptr]);
 777
 778	trace_ocfs2_find_entry_el(ret);
 779	return ret;
 780}
 781
 782static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
 783				   struct ocfs2_extent_list *el,
 784				   u32 major_hash,
 785				   u32 *ret_cpos,
 786				   u64 *ret_phys_blkno,
 787				   unsigned int *ret_clen)
 788{
 789	int ret = 0, i, found;
 790	struct buffer_head *eb_bh = NULL;
 791	struct ocfs2_extent_block *eb;
 792	struct ocfs2_extent_rec *rec = NULL;
 793
 794	if (el->l_tree_depth) {
 795		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
 796				      &eb_bh);
 797		if (ret) {
 798			mlog_errno(ret);
 799			goto out;
 800		}
 801
 802		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
 803		el = &eb->h_list;
 804
 805		if (el->l_tree_depth) {
 806			ret = ocfs2_error(inode->i_sb,
 807					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
 808					  inode->i_ino,
 809					  (unsigned long long)eb_bh->b_blocknr);
 810			goto out;
 811		}
 812	}
 813
 814	found = 0;
 815	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
 816		rec = &el->l_recs[i];
 817
 818		if (le32_to_cpu(rec->e_cpos) <= major_hash) {
 819			found = 1;
 820			break;
 821		}
 822	}
 823
 824	if (!found) {
 825		ret = ocfs2_error(inode->i_sb,
 826				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
 827				  inode->i_ino,
 828				  le32_to_cpu(rec->e_cpos),
 829				  ocfs2_rec_clusters(el, rec));
 830		goto out;
 831	}
 832
 833	if (ret_phys_blkno)
 834		*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
 835	if (ret_cpos)
 836		*ret_cpos = le32_to_cpu(rec->e_cpos);
 837	if (ret_clen)
 838		*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
 839
 840out:
 841	brelse(eb_bh);
 842	return ret;
 843}
 844
 845/*
 846 * Returns the block index, from the start of the cluster which this
 847 * hash belongs too.
 848 */
 849static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 850						   u32 minor_hash)
 851{
 852	return minor_hash & osb->osb_dx_mask;
 853}
 854
 855static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 856					  struct ocfs2_dx_hinfo *hinfo)
 857{
 858	return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
 859}
 860
 861static int ocfs2_dx_dir_lookup(struct inode *inode,
 862			       struct ocfs2_extent_list *el,
 863			       struct ocfs2_dx_hinfo *hinfo,
 864			       u32 *ret_cpos,
 865			       u64 *ret_phys_blkno)
 866{
 867	int ret = 0;
 868	unsigned int cend, uninitialized_var(clen);
 869	u32 uninitialized_var(cpos);
 870	u64 uninitialized_var(blkno);
 871	u32 name_hash = hinfo->major_hash;
 872
 873	ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
 874				      &clen);
 875	if (ret) {
 876		mlog_errno(ret);
 877		goto out;
 878	}
 879
 880	cend = cpos + clen;
 881	if (name_hash >= cend) {
 882		/* We want the last cluster */
 883		blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
 884		cpos += clen - 1;
 885	} else {
 886		blkno += ocfs2_clusters_to_blocks(inode->i_sb,
 887						  name_hash - cpos);
 888		cpos = name_hash;
 889	}
 890
 891	/*
 892	 * We now have the cluster which should hold our entry. To
 893	 * find the exact block from the start of the cluster to
 894	 * search, we take the lower bits of the hash.
 895	 */
 896	blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
 897
 898	if (ret_phys_blkno)
 899		*ret_phys_blkno = blkno;
 900	if (ret_cpos)
 901		*ret_cpos = cpos;
 902
 903out:
 904
 905	return ret;
 906}
 907
 908static int ocfs2_dx_dir_search(const char *name, int namelen,
 909			       struct inode *dir,
 910			       struct ocfs2_dx_root_block *dx_root,
 911			       struct ocfs2_dir_lookup_result *res)
 912{
 913	int ret, i, found;
 914	u64 uninitialized_var(phys);
 915	struct buffer_head *dx_leaf_bh = NULL;
 916	struct ocfs2_dx_leaf *dx_leaf;
 917	struct ocfs2_dx_entry *dx_entry = NULL;
 918	struct buffer_head *dir_ent_bh = NULL;
 919	struct ocfs2_dir_entry *dir_ent = NULL;
 920	struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
 921	struct ocfs2_extent_list *dr_el;
 922	struct ocfs2_dx_entry_list *entry_list;
 923
 924	ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
 925
 926	if (ocfs2_dx_root_inline(dx_root)) {
 927		entry_list = &dx_root->dr_entries;
 928		goto search;
 929	}
 930
 931	dr_el = &dx_root->dr_list;
 932
 933	ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
 934	if (ret) {
 935		mlog_errno(ret);
 936		goto out;
 937	}
 938
 939	trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
 940				  namelen, name, hinfo->major_hash,
 941				  hinfo->minor_hash, (unsigned long long)phys);
 942
 943	ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
 944	if (ret) {
 945		mlog_errno(ret);
 946		goto out;
 947	}
 948
 949	dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
 950
 951	trace_ocfs2_dx_dir_search_leaf_info(
 952			le16_to_cpu(dx_leaf->dl_list.de_num_used),
 953			le16_to_cpu(dx_leaf->dl_list.de_count));
 954
 955	entry_list = &dx_leaf->dl_list;
 956
 957search:
 958	/*
 959	 * Empty leaf is legal, so no need to check for that.
 960	 */
 961	found = 0;
 962	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
 963		dx_entry = &entry_list->de_entries[i];
 964
 965		if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
 966		    || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
 967			continue;
 968
 969		/*
 970		 * Search unindexed leaf block now. We're not
 971		 * guaranteed to find anything.
 972		 */
 973		ret = ocfs2_read_dir_block_direct(dir,
 974					  le64_to_cpu(dx_entry->dx_dirent_blk),
 975					  &dir_ent_bh);
 976		if (ret) {
 977			mlog_errno(ret);
 978			goto out;
 979		}
 980
 981		/*
 982		 * XXX: We should check the unindexed block here,
 983		 * before using it.
 984		 */
 985
 986		found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
 987					      0, dir_ent_bh->b_data,
 988					      dir->i_sb->s_blocksize, &dir_ent);
 989		if (found == 1)
 990			break;
 991
 992		if (found == -1) {
 993			/* This means we found a bad directory entry. */
 994			ret = -EIO;
 995			mlog_errno(ret);
 996			goto out;
 997		}
 998
 999		brelse(dir_ent_bh);
1000		dir_ent_bh = NULL;
1001	}
1002
1003	if (found <= 0) {
1004		ret = -ENOENT;
1005		goto out;
1006	}
1007
1008	res->dl_leaf_bh = dir_ent_bh;
1009	res->dl_entry = dir_ent;
1010	res->dl_dx_leaf_bh = dx_leaf_bh;
1011	res->dl_dx_entry = dx_entry;
1012
1013	ret = 0;
1014out:
1015	if (ret) {
1016		brelse(dx_leaf_bh);
1017		brelse(dir_ent_bh);
1018	}
1019	return ret;
1020}
1021
1022static int ocfs2_find_entry_dx(const char *name, int namelen,
1023			       struct inode *dir,
1024			       struct ocfs2_dir_lookup_result *lookup)
1025{
1026	int ret;
1027	struct buffer_head *di_bh = NULL;
1028	struct ocfs2_dinode *di;
1029	struct buffer_head *dx_root_bh = NULL;
1030	struct ocfs2_dx_root_block *dx_root;
1031
1032	ret = ocfs2_read_inode_block(dir, &di_bh);
1033	if (ret) {
1034		mlog_errno(ret);
1035		goto out;
1036	}
1037
1038	di = (struct ocfs2_dinode *)di_bh->b_data;
1039
1040	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
1041	if (ret) {
1042		mlog_errno(ret);
1043		goto out;
1044	}
1045	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
1046
1047	ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
1048	if (ret) {
1049		if (ret != -ENOENT)
1050			mlog_errno(ret);
1051		goto out;
1052	}
1053
1054	lookup->dl_dx_root_bh = dx_root_bh;
1055	dx_root_bh = NULL;
1056out:
1057	brelse(di_bh);
1058	brelse(dx_root_bh);
1059	return ret;
1060}
1061
1062/*
1063 * Try to find an entry of the provided name within 'dir'.
1064 *
1065 * If nothing was found, -ENOENT is returned. Otherwise, zero is
1066 * returned and the struct 'res' will contain information useful to
1067 * other directory manipulation functions.
1068 *
1069 * Caller can NOT assume anything about the contents of the
1070 * buffer_heads - they are passed back only so that it can be passed
1071 * into any one of the manipulation functions (add entry, delete
1072 * entry, etc). As an example, bh in the extent directory case is a
1073 * data block, in the inline-data case it actually points to an inode,
1074 * in the indexed directory case, multiple buffers are involved.
1075 */
1076int ocfs2_find_entry(const char *name, int namelen,
1077		     struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
1078{
1079	struct buffer_head *bh;
1080	struct ocfs2_dir_entry *res_dir = NULL;
1081
1082	if (ocfs2_dir_indexed(dir))
1083		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
1084
1085	/*
1086	 * The unindexed dir code only uses part of the lookup
1087	 * structure, so there's no reason to push it down further
1088	 * than this.
1089	 */
1090	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1091		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
1092	else
1093		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
1094
1095	if (bh == NULL)
1096		return -ENOENT;
1097
1098	lookup->dl_leaf_bh = bh;
1099	lookup->dl_entry = res_dir;
1100	return 0;
1101}
1102
1103/*
1104 * Update inode number and type of a previously found directory entry.
1105 */
1106int ocfs2_update_entry(struct inode *dir, handle_t *handle,
1107		       struct ocfs2_dir_lookup_result *res,
1108		       struct inode *new_entry_inode)
1109{
1110	int ret;
1111	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1112	struct ocfs2_dir_entry *de = res->dl_entry;
1113	struct buffer_head *de_bh = res->dl_leaf_bh;
1114
1115	/*
1116	 * The same code works fine for both inline-data and extent
1117	 * based directories, so no need to split this up.  The only
1118	 * difference is the journal_access function.
1119	 */
1120
1121	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1122		access = ocfs2_journal_access_di;
1123
1124	ret = access(handle, INODE_CACHE(dir), de_bh,
1125		     OCFS2_JOURNAL_ACCESS_WRITE);
1126	if (ret) {
1127		mlog_errno(ret);
1128		goto out;
1129	}
1130
1131	de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
1132	ocfs2_set_de_type(de, new_entry_inode->i_mode);
1133
1134	ocfs2_journal_dirty(handle, de_bh);
1135
1136out:
1137	return ret;
1138}
1139
1140/*
1141 * __ocfs2_delete_entry deletes a directory entry by merging it with the
1142 * previous entry
1143 */
1144static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1145				struct ocfs2_dir_entry *de_del,
1146				struct buffer_head *bh, char *first_de,
1147				unsigned int bytes)
1148{
1149	struct ocfs2_dir_entry *de, *pde;
1150	int i, status = -ENOENT;
1151	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1152
1153	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1154		access = ocfs2_journal_access_di;
1155
1156	i = 0;
1157	pde = NULL;
1158	de = (struct ocfs2_dir_entry *) first_de;
1159	while (i < bytes) {
1160		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
1161			status = -EIO;
1162			mlog_errno(status);
1163			goto bail;
1164		}
1165		if (de == de_del)  {
1166			status = access(handle, INODE_CACHE(dir), bh,
1167					OCFS2_JOURNAL_ACCESS_WRITE);
1168			if (status < 0) {
1169				status = -EIO;
1170				mlog_errno(status);
1171				goto bail;
1172			}
1173			if (pde)
1174				le16_add_cpu(&pde->rec_len,
1175						le16_to_cpu(de->rec_len));
1176			de->inode = 0;
1177			dir->i_version++;
1178			ocfs2_journal_dirty(handle, bh);
1179			goto bail;
1180		}
1181		i += le16_to_cpu(de->rec_len);
1182		pde = de;
1183		de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1184	}
1185bail:
1186	return status;
1187}
1188
1189static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
1190{
1191	unsigned int hole;
1192
1193	if (le64_to_cpu(de->inode) == 0)
1194		hole = le16_to_cpu(de->rec_len);
1195	else
1196		hole = le16_to_cpu(de->rec_len) -
1197			OCFS2_DIR_REC_LEN(de->name_len);
1198
1199	return hole;
1200}
1201
1202static int ocfs2_find_max_rec_len(struct super_block *sb,
1203				  struct buffer_head *dirblock_bh)
1204{
1205	int size, this_hole, largest_hole = 0;
1206	char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
1207	struct ocfs2_dir_entry *de;
1208
1209	trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
1210	size = ocfs2_dir_trailer_blk_off(sb);
1211	limit = start + size;
1212	de_buf = start;
1213	de = (struct ocfs2_dir_entry *)de_buf;
1214	do {
1215		if (de_buf != trailer) {
1216			this_hole = ocfs2_figure_dirent_hole(de);
1217			if (this_hole > largest_hole)
1218				largest_hole = this_hole;
1219		}
1220
1221		de_buf += le16_to_cpu(de->rec_len);
1222		de = (struct ocfs2_dir_entry *)de_buf;
1223	} while (de_buf < limit);
1224
1225	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
1226		return largest_hole;
1227	return 0;
1228}
1229
1230static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
1231				       int index)
1232{
1233	int num_used = le16_to_cpu(entry_list->de_num_used);
1234
1235	if (num_used == 1 || index == (num_used - 1))
1236		goto clear;
1237
1238	memmove(&entry_list->de_entries[index],
1239		&entry_list->de_entries[index + 1],
1240		(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
1241clear:
1242	num_used--;
1243	memset(&entry_list->de_entries[num_used], 0,
1244	       sizeof(struct ocfs2_dx_entry));
1245	entry_list->de_num_used = cpu_to_le16(num_used);
1246}
1247
1248static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1249				 struct ocfs2_dir_lookup_result *lookup)
1250{
1251	int ret, index, max_rec_len, add_to_free_list = 0;
1252	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1253	struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
1254	struct ocfs2_dx_leaf *dx_leaf;
1255	struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
1256	struct ocfs2_dir_block_trailer *trailer;
1257	struct ocfs2_dx_root_block *dx_root;
1258	struct ocfs2_dx_entry_list *entry_list;
1259
1260	/*
1261	 * This function gets a bit messy because we might have to
1262	 * modify the root block, regardless of whether the indexed
1263	 * entries are stored inline.
1264	 */
1265
1266	/*
1267	 * *Only* set 'entry_list' here, based on where we're looking
1268	 * for the indexed entries. Later, we might still want to
1269	 * journal both blocks, based on free list state.
1270	 */
1271	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
1272	if (ocfs2_dx_root_inline(dx_root)) {
1273		entry_list = &dx_root->dr_entries;
1274	} else {
1275		dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
1276		entry_list = &dx_leaf->dl_list;
1277	}
1278
1279	/* Neither of these are a disk corruption - that should have
1280	 * been caught by lookup, before we got here. */
1281	BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
1282	BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
1283
1284	index = (char *)dx_entry - (char *)entry_list->de_entries;
1285	index /= sizeof(*dx_entry);
1286
1287	if (index >= le16_to_cpu(entry_list->de_num_used)) {
1288		mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
1289		     (unsigned long long)OCFS2_I(dir)->ip_blkno, index,
1290		     entry_list, dx_entry);
1291		return -EIO;
1292	}
1293
1294	/*
1295	 * We know that removal of this dirent will leave enough room
1296	 * for a new one, so add this block to the free list if it
1297	 * isn't already there.
1298	 */
1299	trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
1300	if (trailer->db_free_rec_len == 0)
1301		add_to_free_list = 1;
1302
1303	/*
1304	 * Add the block holding our index into the journal before
1305	 * removing the unindexed entry. If we get an error return
1306	 * from __ocfs2_delete_entry(), then it hasn't removed the
1307	 * entry yet. Likewise, successful return means we *must*
1308	 * remove the indexed entry.
1309	 *
1310	 * We're also careful to journal the root tree block here as
1311	 * the entry count needs to be updated. Also, we might be
1312	 * adding to the start of the free list.
1313	 */
1314	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1315				      OCFS2_JOURNAL_ACCESS_WRITE);
1316	if (ret) {
1317		mlog_errno(ret);
1318		goto out;
1319	}
1320
1321	if (!ocfs2_dx_root_inline(dx_root)) {
1322		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
1323					      lookup->dl_dx_leaf_bh,
1324					      OCFS2_JOURNAL_ACCESS_WRITE);
1325		if (ret) {
1326			mlog_errno(ret);
1327			goto out;
1328		}
1329	}
1330
1331	trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1332				    index);
1333
1334	ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1335				   leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
1336	if (ret) {
1337		mlog_errno(ret);
1338		goto out;
1339	}
1340
1341	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
1342	trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1343	if (add_to_free_list) {
1344		trailer->db_free_next = dx_root->dr_free_blk;
1345		dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
1346		ocfs2_journal_dirty(handle, dx_root_bh);
1347	}
1348
1349	/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
1350	ocfs2_journal_dirty(handle, leaf_bh);
1351
1352	le32_add_cpu(&dx_root->dr_num_entries, -1);
1353	ocfs2_journal_dirty(handle, dx_root_bh);
1354
1355	ocfs2_dx_list_remove_entry(entry_list, index);
1356
1357	if (!ocfs2_dx_root_inline(dx_root))
1358		ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
1359
1360out:
1361	return ret;
1362}
1363
1364static inline int ocfs2_delete_entry_id(handle_t *handle,
1365					struct inode *dir,
1366					struct ocfs2_dir_entry *de_del,
1367					struct buffer_head *bh)
1368{
1369	int ret;
1370	struct buffer_head *di_bh = NULL;
1371	struct ocfs2_dinode *di;
1372	struct ocfs2_inline_data *data;
1373
1374	ret = ocfs2_read_inode_block(dir, &di_bh);
1375	if (ret) {
1376		mlog_errno(ret);
1377		goto out;
1378	}
1379
1380	di = (struct ocfs2_dinode *)di_bh->b_data;
1381	data = &di->id2.i_data;
1382
1383	ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
1384				   i_size_read(dir));
1385
1386	brelse(di_bh);
1387out:
1388	return ret;
1389}
1390
1391static inline int ocfs2_delete_entry_el(handle_t *handle,
1392					struct inode *dir,
1393					struct ocfs2_dir_entry *de_del,
1394					struct buffer_head *bh)
1395{
1396	return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
1397				    bh->b_size);
1398}
1399
1400/*
1401 * Delete a directory entry. Hide the details of directory
1402 * implementation from the caller.
1403 */
1404int ocfs2_delete_entry(handle_t *handle,
1405		       struct inode *dir,
1406		       struct ocfs2_dir_lookup_result *res)
1407{
1408	if (ocfs2_dir_indexed(dir))
1409		return ocfs2_delete_entry_dx(handle, dir, res);
1410
1411	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1412		return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
1413					     res->dl_leaf_bh);
1414
1415	return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
1416				     res->dl_leaf_bh);
1417}
1418
1419/*
1420 * Check whether 'de' has enough room to hold an entry of
1421 * 'new_rec_len' bytes.
1422 */
1423static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
1424					 unsigned int new_rec_len)
1425{
1426	unsigned int de_really_used;
1427
1428	/* Check whether this is an empty record with enough space */
1429	if (le64_to_cpu(de->inode) == 0 &&
1430	    le16_to_cpu(de->rec_len) >= new_rec_len)
1431		return 1;
1432
1433	/*
1434	 * Record might have free space at the end which we can
1435	 * use.
1436	 */
1437	de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
1438	if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
1439	    return 1;
1440
1441	return 0;
1442}
1443
1444static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
1445					  struct ocfs2_dx_entry *dx_new_entry)
1446{
1447	int i;
1448
1449	i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
1450	dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
1451
1452	le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
1453}
1454
1455static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
1456				       struct ocfs2_dx_hinfo *hinfo,
1457				       u64 dirent_blk)
1458{
1459	int i;
1460	struct ocfs2_dx_entry *dx_entry;
1461
1462	i = le16_to_cpu(entry_list->de_num_used);
1463	dx_entry = &entry_list->de_entries[i];
1464
1465	memset(dx_entry, 0, sizeof(*dx_entry));
1466	dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
1467	dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
1468	dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
1469
1470	le16_add_cpu(&entry_list->de_num_used, 1);
1471}
1472
1473static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
1474				      struct ocfs2_dx_hinfo *hinfo,
1475				      u64 dirent_blk,
1476				      struct buffer_head *dx_leaf_bh)
1477{
1478	int ret;
1479	struct ocfs2_dx_leaf *dx_leaf;
1480
1481	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
1482				      OCFS2_JOURNAL_ACCESS_WRITE);
1483	if (ret) {
1484		mlog_errno(ret);
1485		goto out;
1486	}
1487
1488	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
1489	ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
1490	ocfs2_journal_dirty(handle, dx_leaf_bh);
1491
1492out:
1493	return ret;
1494}
1495
1496static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
1497					struct ocfs2_dx_hinfo *hinfo,
1498					u64 dirent_blk,
1499					struct ocfs2_dx_root_block *dx_root)
1500{
1501	ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
1502}
1503
1504static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
1505			       struct ocfs2_dir_lookup_result *lookup)
1506{
1507	int ret = 0;
1508	struct ocfs2_dx_root_block *dx_root;
1509	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1510
1511	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1512				      OCFS2_JOURNAL_ACCESS_WRITE);
1513	if (ret) {
1514		mlog_errno(ret);
1515		goto out;
1516	}
1517
1518	dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
1519	if (ocfs2_dx_root_inline(dx_root)) {
1520		ocfs2_dx_inline_root_insert(dir, handle,
1521					    &lookup->dl_hinfo,
1522					    lookup->dl_leaf_bh->b_blocknr,
1523					    dx_root);
1524	} else {
1525		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
1526						 lookup->dl_leaf_bh->b_blocknr,
1527						 lookup->dl_dx_leaf_bh);
1528		if (ret)
1529			goto out;
1530	}
1531
1532	le32_add_cpu(&dx_root->dr_num_entries, 1);
1533	ocfs2_journal_dirty(handle, dx_root_bh);
1534
1535out:
1536	return ret;
1537}
1538
1539static void ocfs2_remove_block_from_free_list(struct inode *dir,
1540				       handle_t *handle,
1541				       struct ocfs2_dir_lookup_result *lookup)
1542{
1543	struct ocfs2_dir_block_trailer *trailer, *prev;
1544	struct ocfs2_dx_root_block *dx_root;
1545	struct buffer_head *bh;
1546
1547	trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1548
1549	if (ocfs2_free_list_at_root(lookup)) {
1550		bh = lookup->dl_dx_root_bh;
1551		dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
1552		dx_root->dr_free_blk = trailer->db_free_next;
1553	} else {
1554		bh = lookup->dl_prev_leaf_bh;
1555		prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
1556		prev->db_free_next = trailer->db_free_next;
1557	}
1558
1559	trailer->db_free_rec_len = cpu_to_le16(0);
1560	trailer->db_free_next = cpu_to_le64(0);
1561
1562	ocfs2_journal_dirty(handle, bh);
1563	ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1564}
1565
1566/*
1567 * This expects that a journal write has been reserved on
1568 * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
1569 */
1570static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
1571				   struct ocfs2_dir_lookup_result *lookup)
1572{
1573	int max_rec_len;
1574	struct ocfs2_dir_block_trailer *trailer;
1575
1576	/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
1577	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
1578	if (max_rec_len) {
1579		/*
1580		 * There's still room in this block, so no need to remove it
1581		 * from the free list. In this case, we just want to update
1582		 * the rec len accounting.
1583		 */
1584		trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1585		trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1586		ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1587	} else {
1588		ocfs2_remove_block_from_free_list(dir, handle, lookup);
1589	}
1590}
1591
1592/* we don't always have a dentry for what we want to add, so people
1593 * like orphan dir can call this instead.
1594 *
1595 * The lookup context must have been filled from
1596 * ocfs2_prepare_dir_for_insert.
1597 */
1598int __ocfs2_add_entry(handle_t *handle,
1599		      struct inode *dir,
1600		      const char *name, int namelen,
1601		      struct inode *inode, u64 blkno,
1602		      struct buffer_head *parent_fe_bh,
1603		      struct ocfs2_dir_lookup_result *lookup)
1604{
1605	unsigned long offset;
1606	unsigned short rec_len;
1607	struct ocfs2_dir_entry *de, *de1;
1608	struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
1609	struct super_block *sb = dir->i_sb;
1610	int retval;
1611	unsigned int size = sb->s_blocksize;
1612	struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1613	char *data_start = insert_bh->b_data;
1614
1615	if (!namelen)
1616		return -EINVAL;
1617
1618	if (ocfs2_dir_indexed(dir)) {
1619		struct buffer_head *bh;
1620
1621		/*
1622		 * An indexed dir may require that we update the free space
1623		 * list. Reserve a write to the previous node in the list so
1624		 * that we don't fail later.
1625		 *
1626		 * XXX: This can be either a dx_root_block, or an unindexed
1627		 * directory tree leaf block.
1628		 */
1629		if (ocfs2_free_list_at_root(lookup)) {
1630			bh = lookup->dl_dx_root_bh;
1631			retval = ocfs2_journal_access_dr(handle,
1632						 INODE_CACHE(dir), bh,
1633						 OCFS2_JOURNAL_ACCESS_WRITE);
1634		} else {
1635			bh = lookup->dl_prev_leaf_bh;
1636			retval = ocfs2_journal_access_db(handle,
1637						 INODE_CACHE(dir), bh,
1638						 OCFS2_JOURNAL_ACCESS_WRITE);
1639		}
1640		if (retval) {
1641			mlog_errno(retval);
1642			return retval;
1643		}
1644	} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1645		data_start = di->id2.i_data.id_data;
1646		size = i_size_read(dir);
1647
1648		BUG_ON(insert_bh != parent_fe_bh);
1649	}
1650
1651	rec_len = OCFS2_DIR_REC_LEN(namelen);
1652	offset = 0;
1653	de = (struct ocfs2_dir_entry *) data_start;
1654	while (1) {
1655		BUG_ON((char *)de >= (size + data_start));
1656
1657		/* These checks should've already been passed by the
1658		 * prepare function, but I guess we can leave them
1659		 * here anyway. */
1660		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
1661			retval = -ENOENT;
1662			goto bail;
1663		}
1664		if (ocfs2_match(namelen, name, de)) {
1665			retval = -EEXIST;
1666			goto bail;
1667		}
1668
1669		/* We're guaranteed that we should have space, so we
1670		 * can't possibly have hit the trailer...right? */
1671		mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
1672				"Hit dir trailer trying to insert %.*s "
1673			        "(namelen %d) into directory %llu.  "
1674				"offset is %lu, trailer offset is %d\n",
1675				namelen, name, namelen,
1676				(unsigned long long)parent_fe_bh->b_blocknr,
1677				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
1678
1679		if (ocfs2_dirent_would_fit(de, rec_len)) {
1680			dir->i_mtime = dir->i_ctime = current_time(dir);
1681			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
1682			if (retval < 0) {
1683				mlog_errno(retval);
1684				goto bail;
1685			}
1686
1687			if (insert_bh == parent_fe_bh)
1688				retval = ocfs2_journal_access_di(handle,
1689								 INODE_CACHE(dir),
1690								 insert_bh,
1691								 OCFS2_JOURNAL_ACCESS_WRITE);
1692			else {
1693				retval = ocfs2_journal_access_db(handle,
1694								 INODE_CACHE(dir),
1695								 insert_bh,
1696					      OCFS2_JOURNAL_ACCESS_WRITE);
1697
1698				if (!retval && ocfs2_dir_indexed(dir))
1699					retval = ocfs2_dx_dir_insert(dir,
1700								handle,
1701								lookup);
1702			}
1703
1704			if (retval) {
1705				mlog_errno(retval);
1706				goto bail;
1707			}
1708
1709			/* By now the buffer is marked for journaling */
1710			offset += le16_to_cpu(de->rec_len);
1711			if (le64_to_cpu(de->inode)) {
1712				de1 = (struct ocfs2_dir_entry *)((char *) de +
1713					OCFS2_DIR_REC_LEN(de->name_len));
1714				de1->rec_len =
1715					cpu_to_le16(le16_to_cpu(de->rec_len) -
1716					OCFS2_DIR_REC_LEN(de->name_len));
1717				de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
1718				de = de1;
1719			}
1720			de->file_type = OCFS2_FT_UNKNOWN;
1721			if (blkno) {
1722				de->inode = cpu_to_le64(blkno);
1723				ocfs2_set_de_type(de, inode->i_mode);
1724			} else
1725				de->inode = 0;
1726			de->name_len = namelen;
1727			memcpy(de->name, name, namelen);
1728
1729			if (ocfs2_dir_indexed(dir))
1730				ocfs2_recalc_free_list(dir, handle, lookup);
1731
1732			dir->i_version++;
1733			ocfs2_journal_dirty(handle, insert_bh);
1734			retval = 0;
1735			goto bail;
1736		}
1737
1738		offset += le16_to_cpu(de->rec_len);
1739		de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
1740	}
1741
1742	/* when you think about it, the assert above should prevent us
1743	 * from ever getting here. */
1744	retval = -ENOSPC;
1745bail:
1746	if (retval)
1747		mlog_errno(retval);
1748
1749	return retval;
1750}
1751
1752static int ocfs2_dir_foreach_blk_id(struct inode *inode,
1753				    u64 *f_version,
1754				    struct dir_context *ctx)
1755{
1756	int ret, i;
1757	unsigned long offset = ctx->pos;
1758	struct buffer_head *di_bh = NULL;
1759	struct ocfs2_dinode *di;
1760	struct ocfs2_inline_data *data;
1761	struct ocfs2_dir_entry *de;
1762
1763	ret = ocfs2_read_inode_block(inode, &di_bh);
1764	if (ret) {
1765		mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
1766		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1767		goto out;
1768	}
1769
1770	di = (struct ocfs2_dinode *)di_bh->b_data;
1771	data = &di->id2.i_data;
1772
1773	while (ctx->pos < i_size_read(inode)) {
1774		/* If the dir block has changed since the last call to
1775		 * readdir(2), then we might be pointing to an invalid
1776		 * dirent right now.  Scan from the start of the block
1777		 * to make sure. */
1778		if (*f_version != inode->i_version) {
1779			for (i = 0; i < i_size_read(inode) && i < offset; ) {
1780				de = (struct ocfs2_dir_entry *)
1781					(data->id_data + i);
1782				/* It's too expensive to do a full
1783				 * dirent test each time round this
1784				 * loop, but we do have to test at
1785				 * least that it is non-zero.  A
1786				 * failure will be detected in the
1787				 * dirent test below. */
1788				if (le16_to_cpu(de->rec_len) <
1789				    OCFS2_DIR_REC_LEN(1))
1790					break;
1791				i += le16_to_cpu(de->rec_len);
1792			}
1793			ctx->pos = offset = i;
1794			*f_version = inode->i_version;
1795		}
1796
1797		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
1798		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
1799			/* On error, skip the f_pos to the end. */
1800			ctx->pos = i_size_read(inode);
1801			break;
1802		}
1803		offset += le16_to_cpu(de->rec_len);
1804		if (le64_to_cpu(de->inode)) {
1805			unsigned char d_type = DT_UNKNOWN;
1806
1807			if (de->file_type < OCFS2_FT_MAX)
1808				d_type = ocfs2_filetype_table[de->file_type];
1809
1810			if (!dir_emit(ctx, de->name, de->name_len,
1811				      le64_to_cpu(de->inode), d_type))
 
1812				goto out;
1813		}
1814		ctx->pos += le16_to_cpu(de->rec_len);
1815	}
1816out:
1817	brelse(di_bh);
1818	return 0;
1819}
1820
1821/*
1822 * NOTE: This function can be called against unindexed directories,
1823 * and indexed ones.
1824 */
1825static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1826				    u64 *f_version,
1827				    struct dir_context *ctx,
1828				    bool persist)
1829{
1830	unsigned long offset, blk, last_ra_blk = 0;
1831	int i;
1832	struct buffer_head * bh, * tmp;
1833	struct ocfs2_dir_entry * de;
1834	struct super_block * sb = inode->i_sb;
1835	unsigned int ra_sectors = 16;
1836	int stored = 0;
1837
1838	bh = NULL;
1839
1840	offset = ctx->pos & (sb->s_blocksize - 1);
1841
1842	while (ctx->pos < i_size_read(inode)) {
1843		blk = ctx->pos >> sb->s_blocksize_bits;
1844		if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
1845			/* Skip the corrupt dirblock and keep trying */
1846			ctx->pos += sb->s_blocksize - offset;
1847			continue;
1848		}
1849
1850		/* The idea here is to begin with 8k read-ahead and to stay
1851		 * 4k ahead of our current position.
1852		 *
1853		 * TODO: Use the pagecache for this. We just need to
1854		 * make sure it's cluster-safe... */
1855		if (!last_ra_blk
1856		    || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
1857			for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
1858			     i > 0; i--) {
1859				tmp = NULL;
1860				if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
1861							  OCFS2_BH_READAHEAD))
1862					brelse(tmp);
1863			}
1864			last_ra_blk = blk;
1865			ra_sectors = 8;
1866		}
1867
1868		/* If the dir block has changed since the last call to
1869		 * readdir(2), then we might be pointing to an invalid
1870		 * dirent right now.  Scan from the start of the block
1871		 * to make sure. */
1872		if (*f_version != inode->i_version) {
1873			for (i = 0; i < sb->s_blocksize && i < offset; ) {
1874				de = (struct ocfs2_dir_entry *) (bh->b_data + i);
1875				/* It's too expensive to do a full
1876				 * dirent test each time round this
1877				 * loop, but we do have to test at
1878				 * least that it is non-zero.  A
1879				 * failure will be detected in the
1880				 * dirent test below. */
1881				if (le16_to_cpu(de->rec_len) <
1882				    OCFS2_DIR_REC_LEN(1))
1883					break;
1884				i += le16_to_cpu(de->rec_len);
1885			}
1886			offset = i;
1887			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
1888				| offset;
1889			*f_version = inode->i_version;
1890		}
1891
1892		while (ctx->pos < i_size_read(inode)
1893		       && offset < sb->s_blocksize) {
1894			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
1895			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
1896				/* On error, skip the f_pos to the
1897				   next block. */
1898				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1899				brelse(bh);
1900				continue;
1901			}
1902			if (le64_to_cpu(de->inode)) {
1903				unsigned char d_type = DT_UNKNOWN;
1904
1905				if (de->file_type < OCFS2_FT_MAX)
1906					d_type = ocfs2_filetype_table[de->file_type];
1907				if (!dir_emit(ctx, de->name,
1908						de->name_len,
1909						le64_to_cpu(de->inode),
1910						d_type)) {
1911					brelse(bh);
1912					return 0;
1913				}
1914				stored++;
1915			}
1916			offset += le16_to_cpu(de->rec_len);
1917			ctx->pos += le16_to_cpu(de->rec_len);
1918		}
1919		offset = 0;
1920		brelse(bh);
1921		bh = NULL;
1922		if (!persist && stored)
1923			break;
1924	}
1925	return 0;
1926}
1927
1928static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
1929				 struct dir_context *ctx,
1930				 bool persist)
1931{
1932	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1933		return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
1934	return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
1935}
1936
1937/*
1938 * This is intended to be called from inside other kernel functions,
1939 * so we fake some arguments.
1940 */
1941int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
1942{
1943	u64 version = inode->i_version;
1944	ocfs2_dir_foreach_blk(inode, &version, ctx, true);
1945	return 0;
1946}
1947
1948/*
1949 * ocfs2_readdir()
1950 *
1951 */
1952int ocfs2_readdir(struct file *file, struct dir_context *ctx)
1953{
1954	int error = 0;
1955	struct inode *inode = file_inode(file);
1956	int lock_level = 0;
1957
1958	trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
1959
1960	error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level);
1961	if (lock_level && error >= 0) {
1962		/* We release EX lock which used to update atime
1963		 * and get PR lock again to reduce contention
1964		 * on commonly accessed directories. */
1965		ocfs2_inode_unlock(inode, 1);
1966		lock_level = 0;
1967		error = ocfs2_inode_lock(inode, NULL, 0);
1968	}
1969	if (error < 0) {
1970		if (error != -ENOENT)
1971			mlog_errno(error);
1972		/* we haven't got any yet, so propagate the error. */
1973		goto bail_nolock;
1974	}
1975
1976	error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
1977
1978	ocfs2_inode_unlock(inode, lock_level);
1979	if (error)
1980		mlog_errno(error);
1981
1982bail_nolock:
1983
1984	return error;
1985}
1986
1987/*
1988 * NOTE: this should always be called with parent dir i_mutex taken.
1989 */
1990int ocfs2_find_files_on_disk(const char *name,
1991			     int namelen,
1992			     u64 *blkno,
1993			     struct inode *inode,
1994			     struct ocfs2_dir_lookup_result *lookup)
1995{
1996	int status = -ENOENT;
1997
1998	trace_ocfs2_find_files_on_disk(namelen, name, blkno,
1999				(unsigned long long)OCFS2_I(inode)->ip_blkno);
2000
2001	status = ocfs2_find_entry(name, namelen, inode, lookup);
2002	if (status)
2003		goto leave;
2004
2005	*blkno = le64_to_cpu(lookup->dl_entry->inode);
2006
2007	status = 0;
2008leave:
2009
2010	return status;
2011}
2012
2013/*
2014 * Convenience function for callers which just want the block number
2015 * mapped to a name and don't require the full dirent info, etc.
2016 */
2017int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
2018			       int namelen, u64 *blkno)
2019{
2020	int ret;
2021	struct ocfs2_dir_lookup_result lookup = { NULL, };
2022
2023	ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
2024	ocfs2_free_dir_lookup_result(&lookup);
2025
2026	return ret;
2027}
2028
2029/* Check for a name within a directory.
2030 *
2031 * Return 0 if the name does not exist
2032 * Return -EEXIST if the directory contains the name
2033 *
2034 * Callers should have i_mutex + a cluster lock on dir
2035 */
2036int ocfs2_check_dir_for_entry(struct inode *dir,
2037			      const char *name,
2038			      int namelen)
2039{
2040	int ret = 0;
2041	struct ocfs2_dir_lookup_result lookup = { NULL, };
2042
2043	trace_ocfs2_check_dir_for_entry(
2044		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2045
2046	if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
2047		ret = -EEXIST;
2048		mlog_errno(ret);
2049	}
2050
2051	ocfs2_free_dir_lookup_result(&lookup);
2052
2053	return ret;
2054}
2055
2056struct ocfs2_empty_dir_priv {
2057	struct dir_context ctx;
2058	unsigned seen_dot;
2059	unsigned seen_dot_dot;
2060	unsigned seen_other;
2061	unsigned dx_dir;
2062};
2063static int ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
2064				   int name_len, loff_t pos, u64 ino,
2065				   unsigned type)
2066{
2067	struct ocfs2_empty_dir_priv *p =
2068		container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
2069
2070	/*
2071	 * Check the positions of "." and ".." records to be sure
2072	 * they're in the correct place.
2073	 *
2074	 * Indexed directories don't need to proceed past the first
2075	 * two entries, so we end the scan after seeing '..'. Despite
2076	 * that, we allow the scan to proceed In the event that we
2077	 * have a corrupted indexed directory (no dot or dot dot
2078	 * entries). This allows us to double check for existing
2079	 * entries which might not have been found in the index.
2080	 */
2081	if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
2082		p->seen_dot = 1;
2083		return 0;
2084	}
2085
2086	if (name_len == 2 && !strncmp("..", name, 2) &&
2087	    pos == OCFS2_DIR_REC_LEN(1)) {
2088		p->seen_dot_dot = 1;
2089
2090		if (p->dx_dir && p->seen_dot)
2091			return 1;
2092
2093		return 0;
2094	}
2095
2096	p->seen_other = 1;
2097	return 1;
2098}
2099
2100static int ocfs2_empty_dir_dx(struct inode *inode,
2101			      struct ocfs2_empty_dir_priv *priv)
2102{
2103	int ret;
2104	struct buffer_head *di_bh = NULL;
2105	struct buffer_head *dx_root_bh = NULL;
2106	struct ocfs2_dinode *di;
2107	struct ocfs2_dx_root_block *dx_root;
2108
2109	priv->dx_dir = 1;
2110
2111	ret = ocfs2_read_inode_block(inode, &di_bh);
2112	if (ret) {
2113		mlog_errno(ret);
2114		goto out;
2115	}
2116	di = (struct ocfs2_dinode *)di_bh->b_data;
2117
2118	ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
2119	if (ret) {
2120		mlog_errno(ret);
2121		goto out;
2122	}
2123	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2124
2125	if (le32_to_cpu(dx_root->dr_num_entries) != 2)
2126		priv->seen_other = 1;
2127
2128out:
2129	brelse(di_bh);
2130	brelse(dx_root_bh);
2131	return ret;
2132}
2133
2134/*
2135 * routine to check that the specified directory is empty (for rmdir)
2136 *
2137 * Returns 1 if dir is empty, zero otherwise.
2138 *
2139 * XXX: This is a performance problem for unindexed directories.
2140 */
2141int ocfs2_empty_dir(struct inode *inode)
2142{
2143	int ret;
2144	struct ocfs2_empty_dir_priv priv = {
2145		.ctx.actor = ocfs2_empty_dir_filldir,
2146	};
2147
2148	if (ocfs2_dir_indexed(inode)) {
2149		ret = ocfs2_empty_dir_dx(inode, &priv);
2150		if (ret)
2151			mlog_errno(ret);
2152		/*
2153		 * We still run ocfs2_dir_foreach to get the checks
2154		 * for "." and "..".
2155		 */
2156	}
2157
2158	ret = ocfs2_dir_foreach(inode, &priv.ctx);
2159	if (ret)
2160		mlog_errno(ret);
2161
2162	if (!priv.seen_dot || !priv.seen_dot_dot) {
2163		mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
2164		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
2165		/*
2166		 * XXX: Is it really safe to allow an unlink to continue?
2167		 */
2168		return 1;
2169	}
2170
2171	return !priv.seen_other;
2172}
2173
2174/*
2175 * Fills "." and ".." dirents in a new directory block. Returns dirent for
2176 * "..", which might be used during creation of a directory with a trailing
2177 * header. It is otherwise safe to ignore the return code.
2178 */
2179static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
2180							  struct inode *parent,
2181							  char *start,
2182							  unsigned int size)
2183{
2184	struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
2185
2186	de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
2187	de->name_len = 1;
2188	de->rec_len =
2189		cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
2190	strcpy(de->name, ".");
2191	ocfs2_set_de_type(de, S_IFDIR);
2192
2193	de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
2194	de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
2195	de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
2196	de->name_len = 2;
2197	strcpy(de->name, "..");
2198	ocfs2_set_de_type(de, S_IFDIR);
2199
2200	return de;
2201}
2202
2203/*
2204 * This works together with code in ocfs2_mknod_locked() which sets
2205 * the inline-data flag and initializes the inline-data section.
2206 */
2207static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
2208				 handle_t *handle,
2209				 struct inode *parent,
2210				 struct inode *inode,
2211				 struct buffer_head *di_bh)
2212{
2213	int ret;
2214	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2215	struct ocfs2_inline_data *data = &di->id2.i_data;
2216	unsigned int size = le16_to_cpu(data->id_count);
2217
2218	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2219				      OCFS2_JOURNAL_ACCESS_WRITE);
2220	if (ret) {
2221		mlog_errno(ret);
2222		goto out;
2223	}
2224
2225	ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
2226	ocfs2_journal_dirty(handle, di_bh);
2227
2228	i_size_write(inode, size);
2229	set_nlink(inode, 2);
2230	inode->i_blocks = ocfs2_inode_sector_count(inode);
2231
2232	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2233	if (ret < 0)
2234		mlog_errno(ret);
2235
2236out:
2237	return ret;
2238}
2239
2240static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2241				 handle_t *handle,
2242				 struct inode *parent,
2243				 struct inode *inode,
2244				 struct buffer_head *fe_bh,
2245				 struct ocfs2_alloc_context *data_ac,
2246				 struct buffer_head **ret_new_bh)
2247{
2248	int status;
2249	unsigned int size = osb->sb->s_blocksize;
2250	struct buffer_head *new_bh = NULL;
2251	struct ocfs2_dir_entry *de;
2252
2253	if (ocfs2_new_dir_wants_trailer(inode))
2254		size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2255
2256	status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
2257				     data_ac, NULL, &new_bh);
2258	if (status < 0) {
2259		mlog_errno(status);
2260		goto bail;
2261	}
2262
2263	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2264
2265	status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
2266					 OCFS2_JOURNAL_ACCESS_CREATE);
2267	if (status < 0) {
2268		mlog_errno(status);
2269		goto bail;
2270	}
2271	memset(new_bh->b_data, 0, osb->sb->s_blocksize);
2272
2273	de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
2274	if (ocfs2_new_dir_wants_trailer(inode)) {
2275		int size = le16_to_cpu(de->rec_len);
2276
2277		/*
2278		 * Figure out the size of the hole left over after
2279		 * insertion of '.' and '..'. The trailer wants this
2280		 * information.
2281		 */
2282		size -= OCFS2_DIR_REC_LEN(2);
2283		size -= sizeof(struct ocfs2_dir_block_trailer);
2284
2285		ocfs2_init_dir_trailer(inode, new_bh, size);
2286	}
2287
2288	ocfs2_journal_dirty(handle, new_bh);
2289
2290	i_size_write(inode, inode->i_sb->s_blocksize);
2291	set_nlink(inode, 2);
2292	inode->i_blocks = ocfs2_inode_sector_count(inode);
2293	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
2294	if (status < 0) {
2295		mlog_errno(status);
2296		goto bail;
2297	}
2298
2299	status = 0;
2300	if (ret_new_bh) {
2301		*ret_new_bh = new_bh;
2302		new_bh = NULL;
2303	}
2304bail:
2305	brelse(new_bh);
2306
2307	return status;
2308}
2309
2310static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2311				     handle_t *handle, struct inode *dir,
2312				     struct buffer_head *di_bh,
2313				     struct buffer_head *dirdata_bh,
2314				     struct ocfs2_alloc_context *meta_ac,
2315				     int dx_inline, u32 num_entries,
2316				     struct buffer_head **ret_dx_root_bh)
2317{
2318	int ret;
2319	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2320	u16 dr_suballoc_bit;
2321	u64 suballoc_loc, dr_blkno;
2322	unsigned int num_bits;
2323	struct buffer_head *dx_root_bh = NULL;
2324	struct ocfs2_dx_root_block *dx_root;
2325	struct ocfs2_dir_block_trailer *trailer =
2326		ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
2327
2328	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
2329				   &dr_suballoc_bit, &num_bits, &dr_blkno);
2330	if (ret) {
2331		mlog_errno(ret);
2332		goto out;
2333	}
2334
2335	trace_ocfs2_dx_dir_attach_index(
2336				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2337				(unsigned long long)dr_blkno);
2338
2339	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2340	if (dx_root_bh == NULL) {
2341		ret = -ENOMEM;
2342		goto out;
2343	}
2344	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
2345
2346	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
2347				      OCFS2_JOURNAL_ACCESS_CREATE);
2348	if (ret < 0) {
2349		mlog_errno(ret);
2350		goto out;
2351	}
2352
2353	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2354	memset(dx_root, 0, osb->sb->s_blocksize);
2355	strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2356	dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2357	dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
2358	dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2359	dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2360	dx_root->dr_blkno = cpu_to_le64(dr_blkno);
2361	dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
2362	dx_root->dr_num_entries = cpu_to_le32(num_entries);
2363	if (le16_to_cpu(trailer->db_free_rec_len))
2364		dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
2365	else
2366		dx_root->dr_free_blk = cpu_to_le64(0);
2367
2368	if (dx_inline) {
2369		dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
2370		dx_root->dr_entries.de_count =
2371			cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
2372	} else {
2373		dx_root->dr_list.l_count =
2374			cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
2375	}
2376	ocfs2_journal_dirty(handle, dx_root_bh);
2377
2378	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2379				      OCFS2_JOURNAL_ACCESS_CREATE);
2380	if (ret) {
2381		mlog_errno(ret);
2382		goto out;
2383	}
2384
2385	di->i_dx_root = cpu_to_le64(dr_blkno);
2386
2387	spin_lock(&OCFS2_I(dir)->ip_lock);
2388	OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2389	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2390	spin_unlock(&OCFS2_I(dir)->ip_lock);
2391
2392	ocfs2_journal_dirty(handle, di_bh);
2393
2394	*ret_dx_root_bh = dx_root_bh;
2395	dx_root_bh = NULL;
2396
2397out:
2398	brelse(dx_root_bh);
2399	return ret;
2400}
2401
2402static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2403				       handle_t *handle, struct inode *dir,
2404				       struct buffer_head **dx_leaves,
2405				       int num_dx_leaves, u64 start_blk)
2406{
2407	int ret, i;
2408	struct ocfs2_dx_leaf *dx_leaf;
2409	struct buffer_head *bh;
2410
2411	for (i = 0; i < num_dx_leaves; i++) {
2412		bh = sb_getblk(osb->sb, start_blk + i);
2413		if (bh == NULL) {
2414			ret = -ENOMEM;
2415			goto out;
2416		}
2417		dx_leaves[i] = bh;
2418
2419		ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
2420
2421		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
2422					      OCFS2_JOURNAL_ACCESS_CREATE);
2423		if (ret < 0) {
2424			mlog_errno(ret);
2425			goto out;
2426		}
2427
2428		dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
2429
2430		memset(dx_leaf, 0, osb->sb->s_blocksize);
2431		strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
2432		dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
2433		dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
2434		dx_leaf->dl_list.de_count =
2435			cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2436
2437		trace_ocfs2_dx_dir_format_cluster(
2438				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2439				(unsigned long long)bh->b_blocknr,
2440				le16_to_cpu(dx_leaf->dl_list.de_count));
2441
2442		ocfs2_journal_dirty(handle, bh);
2443	}
2444
2445	ret = 0;
2446out:
2447	return ret;
2448}
2449
2450/*
2451 * Allocates and formats a new cluster for use in an indexed dir
2452 * leaf. This version will not do the extent insert, so that it can be
2453 * used by operations which need careful ordering.
2454 */
2455static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
2456				      u32 cpos, handle_t *handle,
2457				      struct ocfs2_alloc_context *data_ac,
2458				      struct buffer_head **dx_leaves,
2459				      int num_dx_leaves, u64 *ret_phys_blkno)
2460{
2461	int ret;
2462	u32 phys, num;
2463	u64 phys_blkno;
2464	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2465
2466	/*
2467	 * XXX: For create, this should claim cluster for the index
2468	 * *before* the unindexed insert so that we have a better
2469	 * chance of contiguousness as the directory grows in number
2470	 * of entries.
2471	 */
2472	ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
2473	if (ret) {
2474		mlog_errno(ret);
2475		goto out;
2476	}
2477
2478	/*
2479	 * Format the new cluster first. That way, we're inserting
2480	 * valid data.
2481	 */
2482	phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
2483	ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
2484					  num_dx_leaves, phys_blkno);
2485	if (ret) {
2486		mlog_errno(ret);
2487		goto out;
2488	}
2489
2490	*ret_phys_blkno = phys_blkno;
2491out:
2492	return ret;
2493}
2494
2495static int ocfs2_dx_dir_new_cluster(struct inode *dir,
2496				    struct ocfs2_extent_tree *et,
2497				    u32 cpos, handle_t *handle,
2498				    struct ocfs2_alloc_context *data_ac,
2499				    struct ocfs2_alloc_context *meta_ac,
2500				    struct buffer_head **dx_leaves,
2501				    int num_dx_leaves)
2502{
2503	int ret;
2504	u64 phys_blkno;
2505
2506	ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
2507					 num_dx_leaves, &phys_blkno);
2508	if (ret) {
2509		mlog_errno(ret);
2510		goto out;
2511	}
2512
2513	ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
2514				  meta_ac);
2515	if (ret)
2516		mlog_errno(ret);
2517out:
2518	return ret;
2519}
2520
2521static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
2522							int *ret_num_leaves)
2523{
2524	int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
2525	struct buffer_head **dx_leaves;
2526
2527	dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
2528			    GFP_NOFS);
2529	if (dx_leaves && ret_num_leaves)
2530		*ret_num_leaves = num_dx_leaves;
2531
2532	return dx_leaves;
2533}
2534
2535static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
2536				 handle_t *handle,
2537				 struct inode *parent,
2538				 struct inode *inode,
2539				 struct buffer_head *di_bh,
2540				 struct ocfs2_alloc_context *data_ac,
2541				 struct ocfs2_alloc_context *meta_ac)
2542{
2543	int ret;
2544	struct buffer_head *leaf_bh = NULL;
2545	struct buffer_head *dx_root_bh = NULL;
2546	struct ocfs2_dx_hinfo hinfo;
2547	struct ocfs2_dx_root_block *dx_root;
2548	struct ocfs2_dx_entry_list *entry_list;
2549
2550	/*
2551	 * Our strategy is to create the directory as though it were
2552	 * unindexed, then add the index block. This works with very
2553	 * little complication since the state of a new directory is a
2554	 * very well known quantity.
2555	 *
2556	 * Essentially, we have two dirents ("." and ".."), in the 1st
2557	 * block which need indexing. These are easily inserted into
2558	 * the index block.
2559	 */
2560
2561	ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
2562				    data_ac, &leaf_bh);
2563	if (ret) {
2564		mlog_errno(ret);
2565		goto out;
2566	}
2567
2568	ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
2569					meta_ac, 1, 2, &dx_root_bh);
2570	if (ret) {
2571		mlog_errno(ret);
2572		goto out;
2573	}
2574	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2575	entry_list = &dx_root->dr_entries;
2576
2577	/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
2578	ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
2579	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2580
2581	ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
2582	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2583
2584out:
2585	brelse(dx_root_bh);
2586	brelse(leaf_bh);
2587	return ret;
2588}
2589
2590int ocfs2_fill_new_dir(struct ocfs2_super *osb,
2591		       handle_t *handle,
2592		       struct inode *parent,
2593		       struct inode *inode,
2594		       struct buffer_head *fe_bh,
2595		       struct ocfs2_alloc_context *data_ac,
2596		       struct ocfs2_alloc_context *meta_ac)
2597
2598{
2599	BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
2600
2601	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2602		return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
2603
2604	if (ocfs2_supports_indexed_dirs(osb))
2605		return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
2606					     data_ac, meta_ac);
2607
2608	return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
2609				     data_ac, NULL);
2610}
2611
2612static int ocfs2_dx_dir_index_block(struct inode *dir,
2613				    handle_t *handle,
2614				    struct buffer_head **dx_leaves,
2615				    int num_dx_leaves,
2616				    u32 *num_dx_entries,
2617				    struct buffer_head *dirent_bh)
2618{
2619	int ret = 0, namelen, i;
2620	char *de_buf, *limit;
2621	struct ocfs2_dir_entry *de;
2622	struct buffer_head *dx_leaf_bh;
2623	struct ocfs2_dx_hinfo hinfo;
2624	u64 dirent_blk = dirent_bh->b_blocknr;
2625
2626	de_buf = dirent_bh->b_data;
2627	limit = de_buf + dir->i_sb->s_blocksize;
2628
2629	while (de_buf < limit) {
2630		de = (struct ocfs2_dir_entry *)de_buf;
2631
2632		namelen = de->name_len;
2633		if (!namelen || !de->inode)
2634			goto inc;
2635
2636		ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
2637
2638		i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
2639		dx_leaf_bh = dx_leaves[i];
2640
2641		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
2642						 dirent_blk, dx_leaf_bh);
2643		if (ret) {
2644			mlog_errno(ret);
2645			goto out;
2646		}
2647
2648		*num_dx_entries = *num_dx_entries + 1;
2649
2650inc:
2651		de_buf += le16_to_cpu(de->rec_len);
2652	}
2653
2654out:
2655	return ret;
2656}
2657
2658/*
2659 * XXX: This expects dx_root_bh to already be part of the transaction.
2660 */
2661static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2662					 struct buffer_head *dx_root_bh,
2663					 struct buffer_head *dirent_bh)
2664{
2665	char *de_buf, *limit;
2666	struct ocfs2_dx_root_block *dx_root;
2667	struct ocfs2_dir_entry *de;
2668	struct ocfs2_dx_hinfo hinfo;
2669	u64 dirent_blk = dirent_bh->b_blocknr;
2670
2671	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2672
2673	de_buf = dirent_bh->b_data;
2674	limit = de_buf + dir->i_sb->s_blocksize;
2675
2676	while (de_buf < limit) {
2677		de = (struct ocfs2_dir_entry *)de_buf;
2678
2679		if (!de->name_len || !de->inode)
2680			goto inc;
2681
2682		ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2683
2684		trace_ocfs2_dx_dir_index_root_block(
2685				(unsigned long long)dir->i_ino,
2686				hinfo.major_hash, hinfo.minor_hash,
2687				de->name_len, de->name,
2688				le16_to_cpu(dx_root->dr_entries.de_num_used));
2689
2690		ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2691					   dirent_blk);
2692
2693		le32_add_cpu(&dx_root->dr_num_entries, 1);
2694inc:
2695		de_buf += le16_to_cpu(de->rec_len);
2696	}
2697}
2698
2699/*
2700 * Count the number of inline directory entries in di_bh and compare
2701 * them against the number of entries we can hold in an inline dx root
2702 * block.
2703 */
2704static int ocfs2_new_dx_should_be_inline(struct inode *dir,
2705					 struct buffer_head *di_bh)
2706{
2707	int dirent_count = 0;
2708	char *de_buf, *limit;
2709	struct ocfs2_dir_entry *de;
2710	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2711
2712	de_buf = di->id2.i_data.id_data;
2713	limit = de_buf + i_size_read(dir);
2714
2715	while (de_buf < limit) {
2716		de = (struct ocfs2_dir_entry *)de_buf;
2717
2718		if (de->name_len && de->inode)
2719			dirent_count++;
2720
2721		de_buf += le16_to_cpu(de->rec_len);
2722	}
2723
2724	/* We are careful to leave room for one extra record. */
2725	return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
2726}
2727
2728/*
2729 * Expand rec_len of the rightmost dirent in a directory block so that it
2730 * contains the end of our valid space for dirents. We do this during
2731 * expansion from an inline directory to one with extents. The first dir block
2732 * in that case is taken from the inline data portion of the inode block.
2733 *
2734 * This will also return the largest amount of contiguous space for a dirent
2735 * in the block. That value is *not* necessarily the last dirent, even after
2736 * expansion. The directory indexing code wants this value for free space
2737 * accounting. We do this here since we're already walking the entire dir
2738 * block.
2739 *
2740 * We add the dir trailer if this filesystem wants it.
2741 */
2742static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
2743					     struct inode *dir)
2744{
2745	struct super_block *sb = dir->i_sb;
2746	struct ocfs2_dir_entry *de;
2747	struct ocfs2_dir_entry *prev_de;
2748	char *de_buf, *limit;
2749	unsigned int new_size = sb->s_blocksize;
2750	unsigned int bytes, this_hole;
2751	unsigned int largest_hole = 0;
2752
2753	if (ocfs2_new_dir_wants_trailer(dir))
2754		new_size = ocfs2_dir_trailer_blk_off(sb);
2755
2756	bytes = new_size - old_size;
2757
2758	limit = start + old_size;
2759	de_buf = start;
2760	de = (struct ocfs2_dir_entry *)de_buf;
2761	do {
2762		this_hole = ocfs2_figure_dirent_hole(de);
2763		if (this_hole > largest_hole)
2764			largest_hole = this_hole;
2765
2766		prev_de = de;
2767		de_buf += le16_to_cpu(de->rec_len);
2768		de = (struct ocfs2_dir_entry *)de_buf;
2769	} while (de_buf < limit);
2770
2771	le16_add_cpu(&prev_de->rec_len, bytes);
2772
2773	/* We need to double check this after modification of the final
2774	 * dirent. */
2775	this_hole = ocfs2_figure_dirent_hole(prev_de);
2776	if (this_hole > largest_hole)
2777		largest_hole = this_hole;
2778
2779	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
2780		return largest_hole;
2781	return 0;
2782}
2783
2784/*
2785 * We allocate enough clusters to fulfill "blocks_wanted", but set
2786 * i_size to exactly one block. Ocfs2_extend_dir() will handle the
2787 * rest automatically for us.
2788 *
2789 * *first_block_bh is a pointer to the 1st data block allocated to the
2790 *  directory.
2791 */
2792static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2793				   unsigned int blocks_wanted,
2794				   struct ocfs2_dir_lookup_result *lookup,
2795				   struct buffer_head **first_block_bh)
2796{
2797	u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
2798	struct super_block *sb = dir->i_sb;
2799	int ret, i, num_dx_leaves = 0, dx_inline = 0,
2800		credits = ocfs2_inline_to_extents_credits(sb);
2801	u64 dx_insert_blkno, blkno,
2802		bytes = blocks_wanted << sb->s_blocksize_bits;
2803	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2804	struct ocfs2_inode_info *oi = OCFS2_I(dir);
2805	struct ocfs2_alloc_context *data_ac = NULL;
2806	struct ocfs2_alloc_context *meta_ac = NULL;
2807	struct buffer_head *dirdata_bh = NULL;
2808	struct buffer_head *dx_root_bh = NULL;
2809	struct buffer_head **dx_leaves = NULL;
2810	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2811	handle_t *handle;
2812	struct ocfs2_extent_tree et;
2813	struct ocfs2_extent_tree dx_et;
2814	int did_quota = 0, bytes_allocated = 0;
2815
2816	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
2817
2818	alloc = ocfs2_clusters_for_bytes(sb, bytes);
2819	dx_alloc = 0;
2820
2821	down_write(&oi->ip_alloc_sem);
2822
2823	if (ocfs2_supports_indexed_dirs(osb)) {
2824		credits += ocfs2_add_dir_index_credits(sb);
2825
2826		dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
2827		if (!dx_inline) {
2828			/* Add one more cluster for an index leaf */
2829			dx_alloc++;
2830			dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
2831								&num_dx_leaves);
2832			if (!dx_leaves) {
2833				ret = -ENOMEM;
2834				mlog_errno(ret);
2835				goto out;
2836			}
2837		}
2838
2839		/* This gets us the dx_root */
2840		ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
2841		if (ret) {
2842			mlog_errno(ret);
2843			goto out;
2844		}
2845	}
2846
2847	/*
2848	 * We should never need more than 2 clusters for the unindexed
2849	 * tree - maximum dirent size is far less than one block. In
2850	 * fact, the only time we'd need more than one cluster is if
2851	 * blocksize == clustersize and the dirent won't fit in the
2852	 * extra space that the expansion to a single block gives. As
2853	 * of today, that only happens on 4k/4k file systems.
2854	 */
2855	BUG_ON(alloc > 2);
2856
2857	ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
2858	if (ret) {
2859		mlog_errno(ret);
2860		goto out;
2861	}
2862
2863	/*
2864	 * Prepare for worst case allocation scenario of two separate
2865	 * extents in the unindexed tree.
2866	 */
2867	if (alloc == 2)
2868		credits += OCFS2_SUBALLOC_ALLOC;
2869
2870	handle = ocfs2_start_trans(osb, credits);
2871	if (IS_ERR(handle)) {
2872		ret = PTR_ERR(handle);
2873		mlog_errno(ret);
2874		goto out;
2875	}
2876
2877	ret = dquot_alloc_space_nodirty(dir,
2878		ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
2879	if (ret)
2880		goto out_commit;
2881	did_quota = 1;
2882
2883	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2884		/*
2885		 * Allocate our index cluster first, to maximize the
2886		 * possibility that unindexed leaves grow
2887		 * contiguously.
2888		 */
2889		ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
2890						 dx_leaves, num_dx_leaves,
2891						 &dx_insert_blkno);
2892		if (ret) {
2893			mlog_errno(ret);
2894			goto out_commit;
2895		}
2896		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2897	}
2898
2899	/*
2900	 * Try to claim as many clusters as the bitmap can give though
2901	 * if we only get one now, that's enough to continue. The rest
2902	 * will be claimed after the conversion to extents.
2903	 */
2904	if (ocfs2_dir_resv_allowed(osb))
2905		data_ac->ac_resv = &oi->ip_la_data_resv;
2906	ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
2907	if (ret) {
2908		mlog_errno(ret);
2909		goto out_commit;
2910	}
2911	bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2912
2913	/*
2914	 * Operations are carefully ordered so that we set up the new
2915	 * data block first. The conversion from inline data to
2916	 * extents follows.
2917	 */
2918	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2919	dirdata_bh = sb_getblk(sb, blkno);
2920	if (!dirdata_bh) {
2921		ret = -ENOMEM;
2922		mlog_errno(ret);
2923		goto out_commit;
2924	}
2925
2926	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
2927
2928	ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
2929				      OCFS2_JOURNAL_ACCESS_CREATE);
2930	if (ret) {
2931		mlog_errno(ret);
2932		goto out_commit;
2933	}
2934
2935	memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
2936	memset(dirdata_bh->b_data + i_size_read(dir), 0,
2937	       sb->s_blocksize - i_size_read(dir));
2938	i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
2939	if (ocfs2_new_dir_wants_trailer(dir)) {
2940		/*
2941		 * Prepare the dir trailer up front. It will otherwise look
2942		 * like a valid dirent. Even if inserting the index fails
2943		 * (unlikely), then all we'll have done is given first dir
2944		 * block a small amount of fragmentation.
2945		 */
2946		ocfs2_init_dir_trailer(dir, dirdata_bh, i);
2947	}
2948
2949	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2950	ocfs2_journal_dirty(handle, dirdata_bh);
2951
2952	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2953		/*
2954		 * Dx dirs with an external cluster need to do this up
2955		 * front. Inline dx root's get handled later, after
2956		 * we've allocated our root block. We get passed back
2957		 * a total number of items so that dr_num_entries can
2958		 * be correctly set once the dx_root has been
2959		 * allocated.
2960		 */
2961		ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
2962					       num_dx_leaves, &num_dx_entries,
2963					       dirdata_bh);
2964		if (ret) {
2965			mlog_errno(ret);
2966			goto out_commit;
2967		}
2968	}
2969
2970	/*
2971	 * Set extent, i_size, etc on the directory. After this, the
2972	 * inode should contain the same exact dirents as before and
2973	 * be fully accessible from system calls.
2974	 *
2975	 * We let the later dirent insert modify c/mtime - to the user
2976	 * the data hasn't changed.
2977	 */
2978	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2979				      OCFS2_JOURNAL_ACCESS_CREATE);
2980	if (ret) {
2981		mlog_errno(ret);
2982		goto out_commit;
2983	}
2984
2985	spin_lock(&oi->ip_lock);
2986	oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
2987	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2988	spin_unlock(&oi->ip_lock);
2989
2990	ocfs2_dinode_new_extent_list(dir, di);
2991
2992	i_size_write(dir, sb->s_blocksize);
2993	dir->i_mtime = dir->i_ctime = current_time(dir);
2994
2995	di->i_size = cpu_to_le64(sb->s_blocksize);
2996	di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
2997	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
2998	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2999
3000	/*
3001	 * This should never fail as our extent list is empty and all
3002	 * related blocks have been journaled already.
3003	 */
3004	ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
3005				  0, NULL);
3006	if (ret) {
3007		mlog_errno(ret);
3008		goto out_commit;
3009	}
3010
3011	/*
3012	 * Set i_blocks after the extent insert for the most up to
3013	 * date ip_clusters value.
3014	 */
3015	dir->i_blocks = ocfs2_inode_sector_count(dir);
3016
3017	ocfs2_journal_dirty(handle, di_bh);
3018
3019	if (ocfs2_supports_indexed_dirs(osb)) {
3020		ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
3021						dirdata_bh, meta_ac, dx_inline,
3022						num_dx_entries, &dx_root_bh);
3023		if (ret) {
3024			mlog_errno(ret);
3025			goto out_commit;
3026		}
3027
3028		if (dx_inline) {
3029			ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
3030						      dirdata_bh);
3031		} else {
3032			ocfs2_init_dx_root_extent_tree(&dx_et,
3033						       INODE_CACHE(dir),
3034						       dx_root_bh);
3035			ret = ocfs2_insert_extent(handle, &dx_et, 0,
3036						  dx_insert_blkno, 1, 0, NULL);
3037			if (ret)
3038				mlog_errno(ret);
3039		}
3040	}
3041
3042	/*
3043	 * We asked for two clusters, but only got one in the 1st
3044	 * pass. Claim the 2nd cluster as a separate extent.
3045	 */
3046	if (alloc > len) {
3047		ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
3048					   &len);
3049		if (ret) {
3050			mlog_errno(ret);
3051			goto out_commit;
3052		}
3053		blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
3054
3055		ret = ocfs2_insert_extent(handle, &et, 1,
3056					  blkno, len, 0, NULL);
3057		if (ret) {
3058			mlog_errno(ret);
3059			goto out_commit;
3060		}
3061		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
3062	}
3063
3064	*first_block_bh = dirdata_bh;
3065	dirdata_bh = NULL;
3066	if (ocfs2_supports_indexed_dirs(osb)) {
3067		unsigned int off;
3068
3069		if (!dx_inline) {
3070			/*
3071			 * We need to return the correct block within the
3072			 * cluster which should hold our entry.
3073			 */
3074			off = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb),
3075						    &lookup->dl_hinfo);
3076			get_bh(dx_leaves[off]);
3077			lookup->dl_dx_leaf_bh = dx_leaves[off];
3078		}
3079		lookup->dl_dx_root_bh = dx_root_bh;
3080		dx_root_bh = NULL;
3081	}
3082
3083out_commit:
3084	if (ret < 0 && did_quota)
3085		dquot_free_space_nodirty(dir, bytes_allocated);
3086
3087	ocfs2_commit_trans(osb, handle);
3088
3089out:
3090	up_write(&oi->ip_alloc_sem);
3091	if (data_ac)
3092		ocfs2_free_alloc_context(data_ac);
3093	if (meta_ac)
3094		ocfs2_free_alloc_context(meta_ac);
3095
3096	if (dx_leaves) {
3097		for (i = 0; i < num_dx_leaves; i++)
3098			brelse(dx_leaves[i]);
3099		kfree(dx_leaves);
3100	}
3101
3102	brelse(dirdata_bh);
3103	brelse(dx_root_bh);
3104
3105	return ret;
3106}
3107
3108/* returns a bh of the 1st new block in the allocation. */
3109static int ocfs2_do_extend_dir(struct super_block *sb,
3110			       handle_t *handle,
3111			       struct inode *dir,
3112			       struct buffer_head *parent_fe_bh,
3113			       struct ocfs2_alloc_context *data_ac,
3114			       struct ocfs2_alloc_context *meta_ac,
3115			       struct buffer_head **new_bh)
3116{
3117	int status;
3118	int extend, did_quota = 0;
3119	u64 p_blkno, v_blkno;
3120
3121	spin_lock(&OCFS2_I(dir)->ip_lock);
3122	extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
3123	spin_unlock(&OCFS2_I(dir)->ip_lock);
3124
3125	if (extend) {
3126		u32 offset = OCFS2_I(dir)->ip_clusters;
3127
3128		status = dquot_alloc_space_nodirty(dir,
3129					ocfs2_clusters_to_bytes(sb, 1));
3130		if (status)
3131			goto bail;
3132		did_quota = 1;
3133
3134		status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
3135					      1, 0, parent_fe_bh, handle,
3136					      data_ac, meta_ac, NULL);
3137		BUG_ON(status == -EAGAIN);
3138		if (status < 0) {
3139			mlog_errno(status);
3140			goto bail;
3141		}
3142	}
3143
3144	v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
3145	status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
3146	if (status < 0) {
3147		mlog_errno(status);
3148		goto bail;
3149	}
3150
3151	*new_bh = sb_getblk(sb, p_blkno);
3152	if (!*new_bh) {
3153		status = -ENOMEM;
3154		mlog_errno(status);
3155		goto bail;
3156	}
3157	status = 0;
3158bail:
3159	if (did_quota && status < 0)
3160		dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3161	return status;
3162}
3163
3164/*
3165 * Assumes you already have a cluster lock on the directory.
3166 *
3167 * 'blocks_wanted' is only used if we have an inline directory which
3168 * is to be turned into an extent based one. The size of the dirent to
3169 * insert might be larger than the space gained by growing to just one
3170 * block, so we may have to grow the inode by two blocks in that case.
3171 *
3172 * If the directory is already indexed, dx_root_bh must be provided.
3173 */
3174static int ocfs2_extend_dir(struct ocfs2_super *osb,
3175			    struct inode *dir,
3176			    struct buffer_head *parent_fe_bh,
3177			    unsigned int blocks_wanted,
3178			    struct ocfs2_dir_lookup_result *lookup,
3179			    struct buffer_head **new_de_bh)
3180{
3181	int status = 0;
3182	int credits, num_free_extents, drop_alloc_sem = 0;
3183	loff_t dir_i_size;
3184	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
3185	struct ocfs2_extent_list *el = &fe->id2.i_list;
3186	struct ocfs2_alloc_context *data_ac = NULL;
3187	struct ocfs2_alloc_context *meta_ac = NULL;
3188	handle_t *handle = NULL;
3189	struct buffer_head *new_bh = NULL;
3190	struct ocfs2_dir_entry * de;
3191	struct super_block *sb = osb->sb;
3192	struct ocfs2_extent_tree et;
3193	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3194
3195	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3196		/*
3197		 * This would be a code error as an inline directory should
3198		 * never have an index root.
3199		 */
3200		BUG_ON(dx_root_bh);
3201
3202		status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
3203						 blocks_wanted, lookup,
3204						 &new_bh);
3205		if (status) {
3206			mlog_errno(status);
3207			goto bail;
3208		}
3209
3210		/* Expansion from inline to an indexed directory will
3211		 * have given us this. */
3212		dx_root_bh = lookup->dl_dx_root_bh;
3213
3214		if (blocks_wanted == 1) {
3215			/*
3216			 * If the new dirent will fit inside the space
3217			 * created by pushing out to one block, then
3218			 * we can complete the operation
3219			 * here. Otherwise we have to expand i_size
3220			 * and format the 2nd block below.
3221			 */
3222			BUG_ON(new_bh == NULL);
3223			goto bail_bh;
3224		}
3225
3226		/*
3227		 * Get rid of 'new_bh' - we want to format the 2nd
3228		 * data block and return that instead.
3229		 */
3230		brelse(new_bh);
3231		new_bh = NULL;
3232
3233		down_write(&OCFS2_I(dir)->ip_alloc_sem);
3234		drop_alloc_sem = 1;
3235		dir_i_size = i_size_read(dir);
3236		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3237		goto do_extend;
3238	}
3239
3240	down_write(&OCFS2_I(dir)->ip_alloc_sem);
3241	drop_alloc_sem = 1;
3242	dir_i_size = i_size_read(dir);
3243	trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3244			       dir_i_size);
3245
3246	/* dir->i_size is always block aligned. */
3247	spin_lock(&OCFS2_I(dir)->ip_lock);
3248	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
3249		spin_unlock(&OCFS2_I(dir)->ip_lock);
3250		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
3251					      parent_fe_bh);
3252		num_free_extents = ocfs2_num_free_extents(osb, &et);
3253		if (num_free_extents < 0) {
3254			status = num_free_extents;
3255			mlog_errno(status);
3256			goto bail;
3257		}
3258
3259		if (!num_free_extents) {
3260			status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
3261			if (status < 0) {
3262				if (status != -ENOSPC)
3263					mlog_errno(status);
3264				goto bail;
3265			}
3266		}
3267
3268		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
3269		if (status < 0) {
3270			if (status != -ENOSPC)
3271				mlog_errno(status);
3272			goto bail;
3273		}
3274
3275		if (ocfs2_dir_resv_allowed(osb))
3276			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3277
3278		credits = ocfs2_calc_extend_credits(sb, el);
3279	} else {
3280		spin_unlock(&OCFS2_I(dir)->ip_lock);
3281		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3282	}
3283
3284do_extend:
3285	if (ocfs2_dir_indexed(dir))
3286		credits++; /* For attaching the new dirent block to the
3287			    * dx_root */
3288
3289	handle = ocfs2_start_trans(osb, credits);
3290	if (IS_ERR(handle)) {
3291		status = PTR_ERR(handle);
3292		handle = NULL;
3293		mlog_errno(status);
3294		goto bail;
3295	}
3296
3297	status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
3298				     data_ac, meta_ac, &new_bh);
3299	if (status < 0) {
3300		mlog_errno(status);
3301		goto bail;
3302	}
3303
3304	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
3305
3306	status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
3307					 OCFS2_JOURNAL_ACCESS_CREATE);
3308	if (status < 0) {
3309		mlog_errno(status);
3310		goto bail;
3311	}
3312	memset(new_bh->b_data, 0, sb->s_blocksize);
3313
3314	de = (struct ocfs2_dir_entry *) new_bh->b_data;
3315	de->inode = 0;
3316	if (ocfs2_supports_dir_trailer(dir)) {
3317		de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
3318
3319		ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
3320
3321		if (ocfs2_dir_indexed(dir)) {
3322			status = ocfs2_dx_dir_link_trailer(dir, handle,
3323							   dx_root_bh, new_bh);
3324			if (status) {
3325				mlog_errno(status);
3326				goto bail;
3327			}
3328		}
3329	} else {
3330		de->rec_len = cpu_to_le16(sb->s_blocksize);
3331	}
3332	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3333	ocfs2_journal_dirty(handle, new_bh);
3334
3335	dir_i_size += dir->i_sb->s_blocksize;
3336	i_size_write(dir, dir_i_size);
3337	dir->i_blocks = ocfs2_inode_sector_count(dir);
3338	status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
3339	if (status < 0) {
3340		mlog_errno(status);
3341		goto bail;
3342	}
3343
3344bail_bh:
3345	*new_de_bh = new_bh;
3346	get_bh(*new_de_bh);
3347bail:
3348	if (handle)
3349		ocfs2_commit_trans(osb, handle);
3350	if (drop_alloc_sem)
3351		up_write(&OCFS2_I(dir)->ip_alloc_sem);
3352
3353	if (data_ac)
3354		ocfs2_free_alloc_context(data_ac);
3355	if (meta_ac)
3356		ocfs2_free_alloc_context(meta_ac);
3357
3358	brelse(new_bh);
3359
3360	return status;
3361}
3362
3363static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
3364				   const char *name, int namelen,
3365				   struct buffer_head **ret_de_bh,
3366				   unsigned int *blocks_wanted)
3367{
3368	int ret;
3369	struct super_block *sb = dir->i_sb;
3370	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3371	struct ocfs2_dir_entry *de, *last_de = NULL;
3372	char *de_buf, *limit;
3373	unsigned long offset = 0;
3374	unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize;
3375
3376	/*
3377	 * This calculates how many free bytes we'd have in block zero, should
3378	 * this function force expansion to an extent tree.
3379	 */
3380	if (ocfs2_new_dir_wants_trailer(dir))
3381		free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
3382	else
3383		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
3384
3385	de_buf = di->id2.i_data.id_data;
3386	limit = de_buf + i_size_read(dir);
3387	rec_len = OCFS2_DIR_REC_LEN(namelen);
3388
3389	while (de_buf < limit) {
3390		de = (struct ocfs2_dir_entry *)de_buf;
3391
3392		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
3393			ret = -ENOENT;
3394			goto out;
3395		}
3396		if (ocfs2_match(namelen, name, de)) {
3397			ret = -EEXIST;
3398			goto out;
3399		}
3400		/*
3401		 * No need to check for a trailing dirent record here as
3402		 * they're not used for inline dirs.
3403		 */
3404
3405		if (ocfs2_dirent_would_fit(de, rec_len)) {
3406			/* Ok, we found a spot. Return this bh and let
3407			 * the caller actually fill it in. */
3408			*ret_de_bh = di_bh;
3409			get_bh(*ret_de_bh);
3410			ret = 0;
3411			goto out;
3412		}
3413
3414		last_de = de;
3415		de_buf += le16_to_cpu(de->rec_len);
3416		offset += le16_to_cpu(de->rec_len);
3417	}
3418
3419	/*
3420	 * We're going to require expansion of the directory - figure
3421	 * out how many blocks we'll need so that a place for the
3422	 * dirent can be found.
3423	 */
3424	*blocks_wanted = 1;
3425	new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
3426	if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
3427		*blocks_wanted = 2;
3428
3429	ret = -ENOSPC;
3430out:
3431	return ret;
3432}
3433
3434static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
3435				   int namelen, struct buffer_head **ret_de_bh)
3436{
3437	unsigned long offset;
3438	struct buffer_head *bh = NULL;
3439	unsigned short rec_len;
3440	struct ocfs2_dir_entry *de;
3441	struct super_block *sb = dir->i_sb;
3442	int status;
3443	int blocksize = dir->i_sb->s_blocksize;
3444
3445	status = ocfs2_read_dir_block(dir, 0, &bh, 0);
3446	if (status)
3447		goto bail;
3448
3449	rec_len = OCFS2_DIR_REC_LEN(namelen);
3450	offset = 0;
3451	de = (struct ocfs2_dir_entry *) bh->b_data;
3452	while (1) {
3453		if ((char *)de >= sb->s_blocksize + bh->b_data) {
3454			brelse(bh);
3455			bh = NULL;
3456
3457			if (i_size_read(dir) <= offset) {
3458				/*
3459				 * Caller will have to expand this
3460				 * directory.
3461				 */
3462				status = -ENOSPC;
3463				goto bail;
3464			}
3465			status = ocfs2_read_dir_block(dir,
3466					     offset >> sb->s_blocksize_bits,
3467					     &bh, 0);
3468			if (status)
3469				goto bail;
3470
3471			/* move to next block */
3472			de = (struct ocfs2_dir_entry *) bh->b_data;
3473		}
3474		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
3475			status = -ENOENT;
3476			goto bail;
3477		}
3478		if (ocfs2_match(namelen, name, de)) {
3479			status = -EEXIST;
3480			goto bail;
3481		}
3482
3483		if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
3484					   blocksize))
3485			goto next;
3486
3487		if (ocfs2_dirent_would_fit(de, rec_len)) {
3488			/* Ok, we found a spot. Return this bh and let
3489			 * the caller actually fill it in. */
3490			*ret_de_bh = bh;
3491			get_bh(*ret_de_bh);
3492			status = 0;
3493			goto bail;
3494		}
3495next:
3496		offset += le16_to_cpu(de->rec_len);
3497		de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
3498	}
3499
3500bail:
3501	brelse(bh);
3502	if (status)
3503		mlog_errno(status);
3504
3505	return status;
3506}
3507
3508static int dx_leaf_sort_cmp(const void *a, const void *b)
3509{
3510	const struct ocfs2_dx_entry *entry1 = a;
3511	const struct ocfs2_dx_entry *entry2 = b;
3512	u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
3513	u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
3514	u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
3515	u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
3516
3517	if (major_hash1 > major_hash2)
3518		return 1;
3519	if (major_hash1 < major_hash2)
3520		return -1;
3521
3522	/*
3523	 * It is not strictly necessary to sort by minor
3524	 */
3525	if (minor_hash1 > minor_hash2)
3526		return 1;
3527	if (minor_hash1 < minor_hash2)
3528		return -1;
3529	return 0;
3530}
3531
3532static void dx_leaf_sort_swap(void *a, void *b, int size)
3533{
3534	struct ocfs2_dx_entry *entry1 = a;
3535	struct ocfs2_dx_entry *entry2 = b;
3536
3537	BUG_ON(size != sizeof(*entry1));
3538
3539	swap(*entry1, *entry2);
3540}
3541
3542static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
3543{
3544	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3545	int i, num = le16_to_cpu(dl_list->de_num_used);
3546
3547	for (i = 0; i < (num - 1); i++) {
3548		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
3549		    le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
3550			return 0;
3551	}
3552
3553	return 1;
3554}
3555
3556/*
3557 * Find the optimal value to split this leaf on. This expects the leaf
3558 * entries to be in sorted order.
3559 *
3560 * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
3561 * the hash we want to insert.
3562 *
3563 * This function is only concerned with the major hash - that which
3564 * determines which cluster an item belongs to.
3565 */
3566static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
3567					u32 leaf_cpos, u32 insert_hash,
3568					u32 *split_hash)
3569{
3570	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3571	int i, num_used = le16_to_cpu(dl_list->de_num_used);
3572	int allsame;
3573
3574	/*
3575	 * There's a couple rare, but nasty corner cases we have to
3576	 * check for here. All of them involve a leaf where all value
3577	 * have the same hash, which is what we look for first.
3578	 *
3579	 * Most of the time, all of the above is false, and we simply
3580	 * pick the median value for a split.
3581	 */
3582	allsame = ocfs2_dx_leaf_same_major(dx_leaf);
3583	if (allsame) {
3584		u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
3585
3586		if (val == insert_hash) {
3587			/*
3588			 * No matter where we would choose to split,
3589			 * the new entry would want to occupy the same
3590			 * block as these. Since there's no space left
3591			 * in their existing block, we know there
3592			 * won't be space after the split.
3593			 */
3594			return -ENOSPC;
3595		}
3596
3597		if (val == leaf_cpos) {
3598			/*
3599			 * Because val is the same as leaf_cpos (which
3600			 * is the smallest value this leaf can have),
3601			 * yet is not equal to insert_hash, then we
3602			 * know that insert_hash *must* be larger than
3603			 * val (and leaf_cpos). At least cpos+1 in value.
3604			 *
3605			 * We also know then, that there cannot be an
3606			 * adjacent extent (otherwise we'd be looking
3607			 * at it). Choosing this value gives us a
3608			 * chance to get some contiguousness.
3609			 */
3610			*split_hash = leaf_cpos + 1;
3611			return 0;
3612		}
3613
3614		if (val > insert_hash) {
3615			/*
3616			 * val can not be the same as insert hash, and
3617			 * also must be larger than leaf_cpos. Also,
3618			 * we know that there can't be a leaf between
3619			 * cpos and val, otherwise the entries with
3620			 * hash 'val' would be there.
3621			 */
3622			*split_hash = val;
3623			return 0;
3624		}
3625
3626		*split_hash = insert_hash;
3627		return 0;
3628	}
3629
3630	/*
3631	 * Since the records are sorted and the checks above
3632	 * guaranteed that not all records in this block are the same,
3633	 * we simple travel forward, from the median, and pick the 1st
3634	 * record whose value is larger than leaf_cpos.
3635	 */
3636	for (i = (num_used / 2); i < num_used; i++)
3637		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
3638		    leaf_cpos)
3639			break;
3640
3641	BUG_ON(i == num_used); /* Should be impossible */
3642	*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
3643	return 0;
3644}
3645
3646/*
3647 * Transfer all entries in orig_dx_leaves whose major hash is equal to or
3648 * larger than split_hash into new_dx_leaves. We use a temporary
3649 * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
3650 *
3651 * Since the block offset inside a leaf (cluster) is a constant mask
3652 * of minor_hash, we can optimize - an item at block offset X within
3653 * the original cluster, will be at offset X within the new cluster.
3654 */
3655static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3656				       handle_t *handle,
3657				       struct ocfs2_dx_leaf *tmp_dx_leaf,
3658				       struct buffer_head **orig_dx_leaves,
3659				       struct buffer_head **new_dx_leaves,
3660				       int num_dx_leaves)
3661{
3662	int i, j, num_used;
3663	u32 major_hash;
3664	struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
3665	struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list;
3666	struct ocfs2_dx_entry *dx_entry;
3667
3668	tmp_list = &tmp_dx_leaf->dl_list;
3669
3670	for (i = 0; i < num_dx_leaves; i++) {
3671		orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
3672		orig_list = &orig_dx_leaf->dl_list;
3673		new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
3674		new_list = &new_dx_leaf->dl_list;
3675
3676		num_used = le16_to_cpu(orig_list->de_num_used);
3677
3678		memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
3679		tmp_list->de_num_used = cpu_to_le16(0);
3680		memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
3681
3682		for (j = 0; j < num_used; j++) {
3683			dx_entry = &orig_list->de_entries[j];
3684			major_hash = le32_to_cpu(dx_entry->dx_major_hash);
3685			if (major_hash >= split_hash)
3686				ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
3687							      dx_entry);
3688			else
3689				ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
3690							      dx_entry);
3691		}
3692		memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
3693
3694		ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
3695		ocfs2_journal_dirty(handle, new_dx_leaves[i]);
3696	}
3697}
3698
3699static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3700					  struct ocfs2_dx_root_block *dx_root)
3701{
3702	int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3703
3704	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3705	credits += ocfs2_quota_trans_credits(osb->sb);
3706	return credits;
3707}
3708
3709/*
3710 * Find the median value in dx_leaf_bh and allocate a new leaf to move
3711 * half our entries into.
3712 */
3713static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3714				  struct buffer_head *dx_root_bh,
3715				  struct buffer_head *dx_leaf_bh,
3716				  struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
3717				  u64 leaf_blkno)
3718{
3719	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3720	int credits, ret, i, num_used, did_quota = 0;
3721	u32 cpos, split_hash, insert_hash = hinfo->major_hash;
3722	u64 orig_leaves_start;
3723	int num_dx_leaves;
3724	struct buffer_head **orig_dx_leaves = NULL;
3725	struct buffer_head **new_dx_leaves = NULL;
3726	struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
3727	struct ocfs2_extent_tree et;
3728	handle_t *handle = NULL;
3729	struct ocfs2_dx_root_block *dx_root;
3730	struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3731
3732	trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3733				     (unsigned long long)leaf_blkno,
3734				     insert_hash);
3735
3736	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3737
3738	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3739	/*
3740	 * XXX: This is a rather large limit. We should use a more
3741	 * realistic value.
3742	 */
3743	if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
3744		return -ENOSPC;
3745
3746	num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
3747	if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
3748		mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
3749		     "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
3750		     (unsigned long long)leaf_blkno, num_used);
3751		ret = -EIO;
3752		goto out;
3753	}
3754
3755	orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
3756	if (!orig_dx_leaves) {
3757		ret = -ENOMEM;
3758		mlog_errno(ret);
3759		goto out;
3760	}
3761
3762	new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
3763	if (!new_dx_leaves) {
3764		ret = -ENOMEM;
3765		mlog_errno(ret);
3766		goto out;
3767	}
3768
3769	ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
3770	if (ret) {
3771		if (ret != -ENOSPC)
3772			mlog_errno(ret);
3773		goto out;
3774	}
3775
3776	credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
3777	handle = ocfs2_start_trans(osb, credits);
3778	if (IS_ERR(handle)) {
3779		ret = PTR_ERR(handle);
3780		handle = NULL;
3781		mlog_errno(ret);
3782		goto out;
3783	}
3784
3785	ret = dquot_alloc_space_nodirty(dir,
3786				       ocfs2_clusters_to_bytes(dir->i_sb, 1));
3787	if (ret)
3788		goto out_commit;
3789	did_quota = 1;
3790
3791	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
3792				      OCFS2_JOURNAL_ACCESS_WRITE);
3793	if (ret) {
3794		mlog_errno(ret);
3795		goto out_commit;
3796	}
3797
3798	/*
3799	 * This block is changing anyway, so we can sort it in place.
3800	 */
3801	sort(dx_leaf->dl_list.de_entries, num_used,
3802	     sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
3803	     dx_leaf_sort_swap);
3804
3805	ocfs2_journal_dirty(handle, dx_leaf_bh);
3806
3807	ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
3808					   &split_hash);
3809	if (ret) {
3810		mlog_errno(ret);
3811		goto  out_commit;
3812	}
3813
3814	trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3815
3816	/*
3817	 * We have to carefully order operations here. There are items
3818	 * which want to be in the new cluster before insert, but in
3819	 * order to put those items in the new cluster, we alter the
3820	 * old cluster. A failure to insert gets nasty.
3821	 *
3822	 * So, start by reserving writes to the old
3823	 * cluster. ocfs2_dx_dir_new_cluster will reserve writes on
3824	 * the new cluster for us, before inserting it. The insert
3825	 * won't happen if there's an error before that. Once the
3826	 * insert is done then, we can transfer from one leaf into the
3827	 * other without fear of hitting any error.
3828	 */
3829
3830	/*
3831	 * The leaf transfer wants some scratch space so that we don't
3832	 * wind up doing a bunch of expensive memmove().
3833	 */
3834	tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
3835	if (!tmp_dx_leaf) {
3836		ret = -ENOMEM;
3837		mlog_errno(ret);
3838		goto out_commit;
3839	}
3840
3841	orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
3842	ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
3843				   orig_dx_leaves);
3844	if (ret) {
3845		mlog_errno(ret);
3846		goto out_commit;
3847	}
3848
3849	cpos = split_hash;
3850	ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3851				       data_ac, meta_ac, new_dx_leaves,
3852				       num_dx_leaves);
3853	if (ret) {
3854		mlog_errno(ret);
3855		goto out_commit;
3856	}
3857
3858	for (i = 0; i < num_dx_leaves; i++) {
3859		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3860					      orig_dx_leaves[i],
3861					      OCFS2_JOURNAL_ACCESS_WRITE);
3862		if (ret) {
3863			mlog_errno(ret);
3864			goto out_commit;
3865		}
3866
3867		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3868					      new_dx_leaves[i],
3869					      OCFS2_JOURNAL_ACCESS_WRITE);
3870		if (ret) {
3871			mlog_errno(ret);
3872			goto out_commit;
3873		}
3874	}
3875
3876	ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
3877				   orig_dx_leaves, new_dx_leaves, num_dx_leaves);
3878
3879out_commit:
3880	if (ret < 0 && did_quota)
3881		dquot_free_space_nodirty(dir,
3882				ocfs2_clusters_to_bytes(dir->i_sb, 1));
3883
3884	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3885	ocfs2_commit_trans(osb, handle);
3886
3887out:
3888	if (orig_dx_leaves || new_dx_leaves) {
3889		for (i = 0; i < num_dx_leaves; i++) {
3890			if (orig_dx_leaves)
3891				brelse(orig_dx_leaves[i]);
3892			if (new_dx_leaves)
3893				brelse(new_dx_leaves[i]);
3894		}
3895		kfree(orig_dx_leaves);
3896		kfree(new_dx_leaves);
3897	}
3898
3899	if (meta_ac)
3900		ocfs2_free_alloc_context(meta_ac);
3901	if (data_ac)
3902		ocfs2_free_alloc_context(data_ac);
3903
3904	kfree(tmp_dx_leaf);
3905	return ret;
3906}
3907
3908static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
3909				   struct buffer_head *di_bh,
3910				   struct buffer_head *dx_root_bh,
3911				   const char *name, int namelen,
3912				   struct ocfs2_dir_lookup_result *lookup)
3913{
3914	int ret, rebalanced = 0;
3915	struct ocfs2_dx_root_block *dx_root;
3916	struct buffer_head *dx_leaf_bh = NULL;
3917	struct ocfs2_dx_leaf *dx_leaf;
3918	u64 blkno;
3919	u32 leaf_cpos;
3920
3921	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3922
3923restart_search:
3924	ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
3925				  &leaf_cpos, &blkno);
3926	if (ret) {
3927		mlog_errno(ret);
3928		goto out;
3929	}
3930
3931	ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
3932	if (ret) {
3933		mlog_errno(ret);
3934		goto out;
3935	}
3936
3937	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3938
3939	if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
3940	    le16_to_cpu(dx_leaf->dl_list.de_count)) {
3941		if (rebalanced) {
3942			/*
3943			 * Rebalancing should have provided us with
3944			 * space in an appropriate leaf.
3945			 *
3946			 * XXX: Is this an abnormal condition then?
3947			 * Should we print a message here?
3948			 */
3949			ret = -ENOSPC;
3950			goto out;
3951		}
3952
3953		ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
3954					     &lookup->dl_hinfo, leaf_cpos,
3955					     blkno);
3956		if (ret) {
3957			if (ret != -ENOSPC)
3958				mlog_errno(ret);
3959			goto out;
3960		}
3961
3962		/*
3963		 * Restart the lookup. The rebalance might have
3964		 * changed which block our item fits into. Mark our
3965		 * progress, so we only execute this once.
3966		 */
3967		brelse(dx_leaf_bh);
3968		dx_leaf_bh = NULL;
3969		rebalanced = 1;
3970		goto restart_search;
3971	}
3972
3973	lookup->dl_dx_leaf_bh = dx_leaf_bh;
3974	dx_leaf_bh = NULL;
3975
3976out:
3977	brelse(dx_leaf_bh);
3978	return ret;
3979}
3980
3981static int ocfs2_search_dx_free_list(struct inode *dir,
3982				     struct buffer_head *dx_root_bh,
3983				     int namelen,
3984				     struct ocfs2_dir_lookup_result *lookup)
3985{
3986	int ret = -ENOSPC;
3987	struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
3988	struct ocfs2_dir_block_trailer *db;
3989	u64 next_block;
3990	int rec_len = OCFS2_DIR_REC_LEN(namelen);
3991	struct ocfs2_dx_root_block *dx_root;
3992
3993	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3994	next_block = le64_to_cpu(dx_root->dr_free_blk);
3995
3996	while (next_block) {
3997		brelse(prev_leaf_bh);
3998		prev_leaf_bh = leaf_bh;
3999		leaf_bh = NULL;
4000
4001		ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
4002		if (ret) {
4003			mlog_errno(ret);
4004			goto out;
4005		}
4006
4007		db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
4008		if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
4009			lookup->dl_leaf_bh = leaf_bh;
4010			lookup->dl_prev_leaf_bh = prev_leaf_bh;
4011			leaf_bh = NULL;
4012			prev_leaf_bh = NULL;
4013			break;
4014		}
4015
4016		next_block = le64_to_cpu(db->db_free_next);
4017	}
4018
4019	if (!next_block)
4020		ret = -ENOSPC;
4021
4022out:
4023
4024	brelse(leaf_bh);
4025	brelse(prev_leaf_bh);
4026	return ret;
4027}
4028
4029static int ocfs2_expand_inline_dx_root(struct inode *dir,
4030				       struct buffer_head *dx_root_bh)
4031{
4032	int ret, num_dx_leaves, i, j, did_quota = 0;
4033	struct buffer_head **dx_leaves = NULL;
4034	struct ocfs2_extent_tree et;
4035	u64 insert_blkno;
4036	struct ocfs2_alloc_context *data_ac = NULL;
4037	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4038	handle_t *handle = NULL;
4039	struct ocfs2_dx_root_block *dx_root;
4040	struct ocfs2_dx_entry_list *entry_list;
4041	struct ocfs2_dx_entry *dx_entry;
4042	struct ocfs2_dx_leaf *target_leaf;
4043
4044	ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4045	if (ret) {
4046		mlog_errno(ret);
4047		goto out;
4048	}
4049
4050	dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
4051	if (!dx_leaves) {
4052		ret = -ENOMEM;
4053		mlog_errno(ret);
4054		goto out;
4055	}
4056
4057	handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
4058	if (IS_ERR(handle)) {
4059		ret = PTR_ERR(handle);
4060		mlog_errno(ret);
4061		goto out;
4062	}
4063
4064	ret = dquot_alloc_space_nodirty(dir,
4065				       ocfs2_clusters_to_bytes(osb->sb, 1));
4066	if (ret)
4067		goto out_commit;
4068	did_quota = 1;
4069
4070	/*
4071	 * We do this up front, before the allocation, so that a
4072	 * failure to add the dx_root_bh to the journal won't result
4073	 * us losing clusters.
4074	 */
4075	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
4076				      OCFS2_JOURNAL_ACCESS_WRITE);
4077	if (ret) {
4078		mlog_errno(ret);
4079		goto out_commit;
4080	}
4081
4082	ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
4083					 num_dx_leaves, &insert_blkno);
4084	if (ret) {
4085		mlog_errno(ret);
4086		goto out_commit;
4087	}
4088
4089	/*
4090	 * Transfer the entries from our dx_root into the appropriate
4091	 * block
4092	 */
4093	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4094	entry_list = &dx_root->dr_entries;
4095
4096	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
4097		dx_entry = &entry_list->de_entries[i];
4098
4099		j = __ocfs2_dx_dir_hash_idx(osb,
4100					    le32_to_cpu(dx_entry->dx_minor_hash));
4101		target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
4102
4103		ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
4104
4105		/* Each leaf has been passed to the journal already
4106		 * via __ocfs2_dx_dir_new_cluster() */
4107	}
4108
4109	dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
4110	memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
4111	       offsetof(struct ocfs2_dx_root_block, dr_list));
4112	dx_root->dr_list.l_count =
4113		cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
4114
4115	/* This should never fail considering we start with an empty
4116	 * dx_root. */
4117	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4118	ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
4119	if (ret)
4120		mlog_errno(ret);
4121	did_quota = 0;
4122
4123	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4124	ocfs2_journal_dirty(handle, dx_root_bh);
4125
4126out_commit:
4127	if (ret < 0 && did_quota)
4128		dquot_free_space_nodirty(dir,
4129					  ocfs2_clusters_to_bytes(dir->i_sb, 1));
4130
4131	ocfs2_commit_trans(osb, handle);
4132
4133out:
4134	if (data_ac)
4135		ocfs2_free_alloc_context(data_ac);
4136
4137	if (dx_leaves) {
4138		for (i = 0; i < num_dx_leaves; i++)
4139			brelse(dx_leaves[i]);
4140		kfree(dx_leaves);
4141	}
4142	return ret;
4143}
4144
4145static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
4146{
4147	struct ocfs2_dx_root_block *dx_root;
4148	struct ocfs2_dx_entry_list *entry_list;
4149
4150	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4151	entry_list = &dx_root->dr_entries;
4152
4153	if (le16_to_cpu(entry_list->de_num_used) >=
4154	    le16_to_cpu(entry_list->de_count))
4155		return -ENOSPC;
4156
4157	return 0;
4158}
4159
4160static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
4161					   struct buffer_head *di_bh,
4162					   const char *name,
4163					   int namelen,
4164					   struct ocfs2_dir_lookup_result *lookup)
4165{
4166	int ret, free_dx_root = 1;
4167	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4168	struct buffer_head *dx_root_bh = NULL;
4169	struct buffer_head *leaf_bh = NULL;
4170	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4171	struct ocfs2_dx_root_block *dx_root;
4172
4173	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4174	if (ret) {
4175		mlog_errno(ret);
4176		goto out;
4177	}
4178
4179	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4180	if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
4181		ret = -ENOSPC;
4182		mlog_errno(ret);
4183		goto out;
4184	}
4185
4186	if (ocfs2_dx_root_inline(dx_root)) {
4187		ret = ocfs2_inline_dx_has_space(dx_root_bh);
4188
4189		if (ret == 0)
4190			goto search_el;
4191
4192		/*
4193		 * We ran out of room in the root block. Expand it to
4194		 * an extent, then allow ocfs2_find_dir_space_dx to do
4195		 * the rest.
4196		 */
4197		ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
4198		if (ret) {
4199			mlog_errno(ret);
4200			goto out;
4201		}
4202	}
4203
4204	/*
4205	 * Insert preparation for an indexed directory is split into two
4206	 * steps. The call to find_dir_space_dx reserves room in the index for
4207	 * an additional item. If we run out of space there, it's a real error
4208	 * we can't continue on.
4209	 */
4210	ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
4211				      namelen, lookup);
4212	if (ret) {
4213		mlog_errno(ret);
4214		goto out;
4215	}
4216
4217search_el:
4218	/*
4219	 * Next, we need to find space in the unindexed tree. This call
4220	 * searches using the free space linked list. If the unindexed tree
4221	 * lacks sufficient space, we'll expand it below. The expansion code
4222	 * is smart enough to add any new blocks to the free space list.
4223	 */
4224	ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
4225	if (ret && ret != -ENOSPC) {
4226		mlog_errno(ret);
4227		goto out;
4228	}
4229
4230	/* Do this up here - ocfs2_extend_dir might need the dx_root */
4231	lookup->dl_dx_root_bh = dx_root_bh;
4232	free_dx_root = 0;
4233
4234	if (ret == -ENOSPC) {
4235		ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
4236
4237		if (ret) {
4238			mlog_errno(ret);
4239			goto out;
4240		}
4241
4242		/*
4243		 * We make the assumption here that new leaf blocks are added
4244		 * to the front of our free list.
4245		 */
4246		lookup->dl_prev_leaf_bh = NULL;
4247		lookup->dl_leaf_bh = leaf_bh;
4248	}
4249
4250out:
4251	if (free_dx_root)
4252		brelse(dx_root_bh);
4253	return ret;
4254}
4255
4256/*
4257 * Get a directory ready for insert. Any directory allocation required
4258 * happens here. Success returns zero, and enough context in the dir
4259 * lookup result that ocfs2_add_entry() will be able complete the task
4260 * with minimal performance impact.
4261 */
4262int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4263				 struct inode *dir,
4264				 struct buffer_head *parent_fe_bh,
4265				 const char *name,
4266				 int namelen,
4267				 struct ocfs2_dir_lookup_result *lookup)
4268{
4269	int ret;
4270	unsigned int blocks_wanted = 1;
4271	struct buffer_head *bh = NULL;
4272
4273	trace_ocfs2_prepare_dir_for_insert(
4274		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4275
4276	if (!namelen) {
4277		ret = -EINVAL;
4278		mlog_errno(ret);
4279		goto out;
4280	}
4281
4282	/*
4283	 * Do this up front to reduce confusion.
4284	 *
4285	 * The directory might start inline, then be turned into an
4286	 * indexed one, in which case we'd need to hash deep inside
4287	 * ocfs2_find_dir_space_id(). Since
4288	 * ocfs2_prepare_dx_dir_for_insert() also needs this hash
4289	 * done, there seems no point in spreading out the calls. We
4290	 * can optimize away the case where the file system doesn't
4291	 * support indexing.
4292	 */
4293	if (ocfs2_supports_indexed_dirs(osb))
4294		ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
4295
4296	if (ocfs2_dir_indexed(dir)) {
4297		ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
4298						      name, namelen, lookup);
4299		if (ret)
4300			mlog_errno(ret);
4301		goto out;
4302	}
4303
4304	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4305		ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
4306					      namelen, &bh, &blocks_wanted);
4307	} else
4308		ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
4309
4310	if (ret && ret != -ENOSPC) {
4311		mlog_errno(ret);
4312		goto out;
4313	}
4314
4315	if (ret == -ENOSPC) {
4316		/*
4317		 * We have to expand the directory to add this name.
4318		 */
4319		BUG_ON(bh);
4320
4321		ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
4322				       lookup, &bh);
4323		if (ret) {
4324			if (ret != -ENOSPC)
4325				mlog_errno(ret);
4326			goto out;
4327		}
4328
4329		BUG_ON(!bh);
4330	}
4331
4332	lookup->dl_leaf_bh = bh;
4333	bh = NULL;
4334out:
4335	brelse(bh);
4336	return ret;
4337}
4338
4339static int ocfs2_dx_dir_remove_index(struct inode *dir,
4340				     struct buffer_head *di_bh,
4341				     struct buffer_head *dx_root_bh)
4342{
4343	int ret;
4344	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4345	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4346	struct ocfs2_dx_root_block *dx_root;
4347	struct inode *dx_alloc_inode = NULL;
4348	struct buffer_head *dx_alloc_bh = NULL;
4349	handle_t *handle;
4350	u64 blk;
4351	u16 bit;
4352	u64 bg_blkno;
4353
4354	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4355
4356	dx_alloc_inode = ocfs2_get_system_file_inode(osb,
4357					EXTENT_ALLOC_SYSTEM_INODE,
4358					le16_to_cpu(dx_root->dr_suballoc_slot));
4359	if (!dx_alloc_inode) {
4360		ret = -ENOMEM;
4361		mlog_errno(ret);
4362		goto out;
4363	}
4364	inode_lock(dx_alloc_inode);
4365
4366	ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
4367	if (ret) {
4368		mlog_errno(ret);
4369		goto out_mutex;
4370	}
4371
4372	handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
4373	if (IS_ERR(handle)) {
4374		ret = PTR_ERR(handle);
4375		mlog_errno(ret);
4376		goto out_unlock;
4377	}
4378
4379	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
4380				      OCFS2_JOURNAL_ACCESS_WRITE);
4381	if (ret) {
4382		mlog_errno(ret);
4383		goto out_commit;
4384	}
4385
4386	spin_lock(&OCFS2_I(dir)->ip_lock);
4387	OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4388	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4389	spin_unlock(&OCFS2_I(dir)->ip_lock);
4390	di->i_dx_root = cpu_to_le64(0ULL);
4391	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4392
4393	ocfs2_journal_dirty(handle, di_bh);
4394
4395	blk = le64_to_cpu(dx_root->dr_blkno);
4396	bit = le16_to_cpu(dx_root->dr_suballoc_bit);
4397	if (dx_root->dr_suballoc_loc)
4398		bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
4399	else
4400		bg_blkno = ocfs2_which_suballoc_group(blk, bit);
4401	ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
4402				       bit, bg_blkno, 1);
4403	if (ret)
4404		mlog_errno(ret);
4405
4406out_commit:
4407	ocfs2_commit_trans(osb, handle);
4408
4409out_unlock:
4410	ocfs2_inode_unlock(dx_alloc_inode, 1);
4411
4412out_mutex:
4413	inode_unlock(dx_alloc_inode);
4414	brelse(dx_alloc_bh);
4415out:
4416	iput(dx_alloc_inode);
4417	return ret;
4418}
4419
4420int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
4421{
4422	int ret;
4423	unsigned int uninitialized_var(clen);
4424	u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos);
4425	u64 uninitialized_var(blkno);
4426	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4427	struct buffer_head *dx_root_bh = NULL;
4428	struct ocfs2_dx_root_block *dx_root;
4429	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4430	struct ocfs2_cached_dealloc_ctxt dealloc;
4431	struct ocfs2_extent_tree et;
4432
4433	ocfs2_init_dealloc_ctxt(&dealloc);
4434
4435	if (!ocfs2_dir_indexed(dir))
4436		return 0;
4437
4438	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4439	if (ret) {
4440		mlog_errno(ret);
4441		goto out;
4442	}
4443	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4444
4445	if (ocfs2_dx_root_inline(dx_root))
4446		goto remove_index;
4447
4448	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4449
4450	/* XXX: What if dr_clusters is too large? */
4451	while (le32_to_cpu(dx_root->dr_clusters)) {
4452		ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
4453					      major_hash, &cpos, &blkno, &clen);
4454		if (ret) {
4455			mlog_errno(ret);
4456			goto out;
4457		}
4458
4459		p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
4460
4461		ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
4462					       &dealloc, 0, false);
4463		if (ret) {
4464			mlog_errno(ret);
4465			goto out;
4466		}
4467
4468		if (cpos == 0)
4469			break;
4470
4471		major_hash = cpos - 1;
4472	}
4473
4474remove_index:
4475	ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
4476	if (ret) {
4477		mlog_errno(ret);
4478		goto out;
4479	}
4480
4481	ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
4482out:
4483	ocfs2_schedule_truncate_log_flush(osb, 1);
4484	ocfs2_run_deallocs(osb, &dealloc);
4485
4486	brelse(dx_root_bh);
4487	return ret;
4488}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* -*- mode: c; c-basic-offset: 8; -*-
   3 * vim: noexpandtab sw=8 ts=8 sts=0:
   4 *
   5 * dir.c
   6 *
   7 * Creates, reads, walks and deletes directory-nodes
   8 *
   9 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
  10 *
  11 *  Portions of this code from linux/fs/ext3/dir.c
  12 *
  13 *  Copyright (C) 1992, 1993, 1994, 1995
  14 *  Remy Card (card@masi.ibp.fr)
  15 *  Laboratoire MASI - Institut Blaise pascal
  16 *  Universite Pierre et Marie Curie (Paris VI)
  17 *
  18 *   from
  19 *
  20 *   linux/fs/minix/dir.c
  21 *
  22 *   Copyright (C) 1991, 1992 Linus Torvalds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  23 */
  24
  25#include <linux/fs.h>
  26#include <linux/types.h>
  27#include <linux/slab.h>
  28#include <linux/highmem.h>
  29#include <linux/quotaops.h>
  30#include <linux/sort.h>
  31#include <linux/iversion.h>
  32
  33#include <cluster/masklog.h>
  34
  35#include "ocfs2.h"
  36
  37#include "alloc.h"
  38#include "blockcheck.h"
  39#include "dir.h"
  40#include "dlmglue.h"
  41#include "extent_map.h"
  42#include "file.h"
  43#include "inode.h"
  44#include "journal.h"
  45#include "namei.h"
  46#include "suballoc.h"
  47#include "super.h"
  48#include "sysfile.h"
  49#include "uptodate.h"
  50#include "ocfs2_trace.h"
  51
  52#include "buffer_head_io.h"
  53
  54#define NAMEI_RA_CHUNKS  2
  55#define NAMEI_RA_BLOCKS  4
  56#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  57
 
 
 
 
  58static int ocfs2_do_extend_dir(struct super_block *sb,
  59			       handle_t *handle,
  60			       struct inode *dir,
  61			       struct buffer_head *parent_fe_bh,
  62			       struct ocfs2_alloc_context *data_ac,
  63			       struct ocfs2_alloc_context *meta_ac,
  64			       struct buffer_head **new_bh);
  65static int ocfs2_dir_indexed(struct inode *inode);
  66
  67/*
  68 * These are distinct checks because future versions of the file system will
  69 * want to have a trailing dirent structure independent of indexing.
  70 */
  71static int ocfs2_supports_dir_trailer(struct inode *dir)
  72{
  73	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  74
  75	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  76		return 0;
  77
  78	return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
  79}
  80
  81/*
  82 * "new' here refers to the point at which we're creating a new
  83 * directory via "mkdir()", but also when we're expanding an inline
  84 * directory. In either case, we don't yet have the indexing bit set
  85 * on the directory, so the standard checks will fail in when metaecc
  86 * is turned off. Only directory-initialization type functions should
  87 * use this then. Everything else wants ocfs2_supports_dir_trailer()
  88 */
  89static int ocfs2_new_dir_wants_trailer(struct inode *dir)
  90{
  91	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  92
  93	return ocfs2_meta_ecc(osb) ||
  94		ocfs2_supports_indexed_dirs(osb);
  95}
  96
  97static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
  98{
  99	return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
 100}
 101
 102#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
 103
 104/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
 105 * them more consistent? */
 106struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
 107							    void *data)
 108{
 109	char *p = data;
 110
 111	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
 112	return (struct ocfs2_dir_block_trailer *)p;
 113}
 114
 115/*
 116 * XXX: This is executed once on every dirent. We should consider optimizing
 117 * it.
 118 */
 119static int ocfs2_skip_dir_trailer(struct inode *dir,
 120				  struct ocfs2_dir_entry *de,
 121				  unsigned long offset,
 122				  unsigned long blklen)
 123{
 124	unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
 125
 126	if (!ocfs2_supports_dir_trailer(dir))
 127		return 0;
 128
 129	if (offset != toff)
 130		return 0;
 131
 132	return 1;
 133}
 134
 135static void ocfs2_init_dir_trailer(struct inode *inode,
 136				   struct buffer_head *bh, u16 rec_len)
 137{
 138	struct ocfs2_dir_block_trailer *trailer;
 139
 140	trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
 141	strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
 142	trailer->db_compat_rec_len =
 143			cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
 144	trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
 145	trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
 146	trailer->db_free_rec_len = cpu_to_le16(rec_len);
 147}
 148/*
 149 * Link an unindexed block with a dir trailer structure into the index free
 150 * list. This function will modify dirdata_bh, but assumes you've already
 151 * passed it to the journal.
 152 */
 153static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
 154				     struct buffer_head *dx_root_bh,
 155				     struct buffer_head *dirdata_bh)
 156{
 157	int ret;
 158	struct ocfs2_dx_root_block *dx_root;
 159	struct ocfs2_dir_block_trailer *trailer;
 160
 161	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
 162				      OCFS2_JOURNAL_ACCESS_WRITE);
 163	if (ret) {
 164		mlog_errno(ret);
 165		goto out;
 166	}
 167	trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
 168	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
 169
 170	trailer->db_free_next = dx_root->dr_free_blk;
 171	dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
 172
 173	ocfs2_journal_dirty(handle, dx_root_bh);
 174
 175out:
 176	return ret;
 177}
 178
 179static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
 180{
 181	return res->dl_prev_leaf_bh == NULL;
 182}
 183
 184void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
 185{
 186	brelse(res->dl_dx_root_bh);
 187	brelse(res->dl_leaf_bh);
 188	brelse(res->dl_dx_leaf_bh);
 189	brelse(res->dl_prev_leaf_bh);
 190}
 191
 192static int ocfs2_dir_indexed(struct inode *inode)
 193{
 194	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
 195		return 1;
 196	return 0;
 197}
 198
 199static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
 200{
 201	return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
 202}
 203
 204/*
 205 * Hashing code adapted from ext3
 206 */
 207#define DELTA 0x9E3779B9
 208
 209static void TEA_transform(__u32 buf[4], __u32 const in[])
 210{
 211	__u32	sum = 0;
 212	__u32	b0 = buf[0], b1 = buf[1];
 213	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
 214	int	n = 16;
 215
 216	do {
 217		sum += DELTA;
 218		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
 219		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
 220	} while (--n);
 221
 222	buf[0] += b0;
 223	buf[1] += b1;
 224}
 225
 226static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
 227{
 228	__u32	pad, val;
 229	int	i;
 230
 231	pad = (__u32)len | ((__u32)len << 8);
 232	pad |= pad << 16;
 233
 234	val = pad;
 235	if (len > num*4)
 236		len = num * 4;
 237	for (i = 0; i < len; i++) {
 238		if ((i % 4) == 0)
 239			val = pad;
 240		val = msg[i] + (val << 8);
 241		if ((i % 4) == 3) {
 242			*buf++ = val;
 243			val = pad;
 244			num--;
 245		}
 246	}
 247	if (--num >= 0)
 248		*buf++ = val;
 249	while (--num >= 0)
 250		*buf++ = pad;
 251}
 252
 253static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
 254				   struct ocfs2_dx_hinfo *hinfo)
 255{
 256	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 257	const char	*p;
 258	__u32		in[8], buf[4];
 259
 260	/*
 261	 * XXX: Is this really necessary, if the index is never looked
 262	 * at by readdir? Is a hash value of '0' a bad idea?
 263	 */
 264	if ((len == 1 && !strncmp(".", name, 1)) ||
 265	    (len == 2 && !strncmp("..", name, 2))) {
 266		buf[0] = buf[1] = 0;
 267		goto out;
 268	}
 269
 270#ifdef OCFS2_DEBUG_DX_DIRS
 271	/*
 272	 * This makes it very easy to debug indexing problems. We
 273	 * should never allow this to be selected without hand editing
 274	 * this file though.
 275	 */
 276	buf[0] = buf[1] = len;
 277	goto out;
 278#endif
 279
 280	memcpy(buf, osb->osb_dx_seed, sizeof(buf));
 281
 282	p = name;
 283	while (len > 0) {
 284		str2hashbuf(p, len, in, 4);
 285		TEA_transform(buf, in);
 286		len -= 16;
 287		p += 16;
 288	}
 289
 290out:
 291	hinfo->major_hash = buf[0];
 292	hinfo->minor_hash = buf[1];
 293}
 294
 295/*
 296 * bh passed here can be an inode block or a dir data block, depending
 297 * on the inode inline data flag.
 298 */
 299static int ocfs2_check_dir_entry(struct inode * dir,
 300				 struct ocfs2_dir_entry * de,
 301				 struct buffer_head * bh,
 302				 unsigned long offset)
 303{
 304	const char *error_msg = NULL;
 305	const int rlen = le16_to_cpu(de->rec_len);
 306
 307	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
 308		error_msg = "rec_len is smaller than minimal";
 309	else if (unlikely(rlen % 4 != 0))
 310		error_msg = "rec_len % 4 != 0";
 311	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
 312		error_msg = "rec_len is too small for name_len";
 313	else if (unlikely(
 314		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
 315		error_msg = "directory entry across blocks";
 316
 317	if (unlikely(error_msg != NULL))
 318		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
 319		     "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
 320		     (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
 321		     offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
 322		     de->name_len);
 323
 324	return error_msg == NULL ? 1 : 0;
 325}
 326
 327static inline int ocfs2_match(int len,
 328			      const char * const name,
 329			      struct ocfs2_dir_entry *de)
 330{
 331	if (len != de->name_len)
 332		return 0;
 333	if (!de->inode)
 334		return 0;
 335	return !memcmp(name, de->name, len);
 336}
 337
 338/*
 339 * Returns 0 if not found, -1 on failure, and 1 on success
 340 */
 341static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 342					struct inode *dir,
 343					const char *name, int namelen,
 344					unsigned long offset,
 345					char *first_de,
 346					unsigned int bytes,
 347					struct ocfs2_dir_entry **res_dir)
 348{
 349	struct ocfs2_dir_entry *de;
 350	char *dlimit, *de_buf;
 351	int de_len;
 352	int ret = 0;
 353
 354	de_buf = first_de;
 355	dlimit = de_buf + bytes;
 356
 357	while (de_buf < dlimit) {
 358		/* this code is executed quadratically often */
 359		/* do minimal checking `by hand' */
 360
 361		de = (struct ocfs2_dir_entry *) de_buf;
 362
 363		if (de_buf + namelen <= dlimit &&
 364		    ocfs2_match(namelen, name, de)) {
 365			/* found a match - just to be sure, do a full check */
 366			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
 367				ret = -1;
 368				goto bail;
 369			}
 370			*res_dir = de;
 371			ret = 1;
 372			goto bail;
 373		}
 374
 375		/* prevent looping on a bad block */
 376		de_len = le16_to_cpu(de->rec_len);
 377		if (de_len <= 0) {
 378			ret = -1;
 379			goto bail;
 380		}
 381
 382		de_buf += de_len;
 383		offset += de_len;
 384	}
 385
 386bail:
 387	trace_ocfs2_search_dirblock(ret);
 388	return ret;
 389}
 390
 391static struct buffer_head *ocfs2_find_entry_id(const char *name,
 392					       int namelen,
 393					       struct inode *dir,
 394					       struct ocfs2_dir_entry **res_dir)
 395{
 396	int ret, found;
 397	struct buffer_head *di_bh = NULL;
 398	struct ocfs2_dinode *di;
 399	struct ocfs2_inline_data *data;
 400
 401	ret = ocfs2_read_inode_block(dir, &di_bh);
 402	if (ret) {
 403		mlog_errno(ret);
 404		goto out;
 405	}
 406
 407	di = (struct ocfs2_dinode *)di_bh->b_data;
 408	data = &di->id2.i_data;
 409
 410	found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
 411				      data->id_data, i_size_read(dir), res_dir);
 412	if (found == 1)
 413		return di_bh;
 414
 415	brelse(di_bh);
 416out:
 417	return NULL;
 418}
 419
 420static int ocfs2_validate_dir_block(struct super_block *sb,
 421				    struct buffer_head *bh)
 422{
 423	int rc;
 424	struct ocfs2_dir_block_trailer *trailer =
 425		ocfs2_trailer_from_bh(bh, sb);
 426
 427
 428	/*
 429	 * We don't validate dirents here, that's handled
 430	 * in-place when the code walks them.
 431	 */
 432	trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
 433
 434	BUG_ON(!buffer_uptodate(bh));
 435
 436	/*
 437	 * If the ecc fails, we return the error but otherwise
 438	 * leave the filesystem running.  We know any error is
 439	 * local to this block.
 440	 *
 441	 * Note that we are safe to call this even if the directory
 442	 * doesn't have a trailer.  Filesystems without metaecc will do
 443	 * nothing, and filesystems with it will have one.
 444	 */
 445	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
 446	if (rc)
 447		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
 448		     (unsigned long long)bh->b_blocknr);
 449
 450	return rc;
 451}
 452
 453/*
 454 * Validate a directory trailer.
 455 *
 456 * We check the trailer here rather than in ocfs2_validate_dir_block()
 457 * because that function doesn't have the inode to test.
 458 */
 459static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
 460{
 461	int rc = 0;
 462	struct ocfs2_dir_block_trailer *trailer;
 463
 464	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 465	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
 466		rc = ocfs2_error(dir->i_sb,
 467				 "Invalid dirblock #%llu: signature = %.*s\n",
 468				 (unsigned long long)bh->b_blocknr, 7,
 469				 trailer->db_signature);
 470		goto out;
 471	}
 472	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
 473		rc = ocfs2_error(dir->i_sb,
 474				 "Directory block #%llu has an invalid db_blkno of %llu\n",
 475				 (unsigned long long)bh->b_blocknr,
 476				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 477		goto out;
 478	}
 479	if (le64_to_cpu(trailer->db_parent_dinode) !=
 480	    OCFS2_I(dir)->ip_blkno) {
 481		rc = ocfs2_error(dir->i_sb,
 482				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
 483				 (unsigned long long)bh->b_blocknr,
 484				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
 485				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 486		goto out;
 487	}
 488out:
 489	return rc;
 490}
 491
 492/*
 493 * This function forces all errors to -EIO for consistency with its
 494 * predecessor, ocfs2_bread().  We haven't audited what returning the
 495 * real error codes would do to callers.  We log the real codes with
 496 * mlog_errno() before we squash them.
 497 */
 498static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
 499				struct buffer_head **bh, int flags)
 500{
 501	int rc = 0;
 502	struct buffer_head *tmp = *bh;
 503
 504	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
 505				    ocfs2_validate_dir_block);
 506	if (rc) {
 507		mlog_errno(rc);
 508		goto out;
 509	}
 510
 511	if (!(flags & OCFS2_BH_READAHEAD) &&
 512	    ocfs2_supports_dir_trailer(inode)) {
 513		rc = ocfs2_check_dir_trailer(inode, tmp);
 514		if (rc) {
 515			if (!*bh)
 516				brelse(tmp);
 517			mlog_errno(rc);
 518			goto out;
 519		}
 520	}
 521
 522	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
 523	if (!*bh)
 524		*bh = tmp;
 525
 526out:
 527	return rc ? -EIO : 0;
 528}
 529
 530/*
 531 * Read the block at 'phys' which belongs to this directory
 532 * inode. This function does no virtual->physical block translation -
 533 * what's passed in is assumed to be a valid directory block.
 534 */
 535static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
 536				       struct buffer_head **bh)
 537{
 538	int ret;
 539	struct buffer_head *tmp = *bh;
 540
 541	ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
 542			       ocfs2_validate_dir_block);
 543	if (ret) {
 544		mlog_errno(ret);
 545		goto out;
 546	}
 547
 548	if (ocfs2_supports_dir_trailer(dir)) {
 549		ret = ocfs2_check_dir_trailer(dir, tmp);
 550		if (ret) {
 551			if (!*bh)
 552				brelse(tmp);
 553			mlog_errno(ret);
 554			goto out;
 555		}
 556	}
 557
 558	if (!ret && !*bh)
 559		*bh = tmp;
 560out:
 561	return ret;
 562}
 563
 564static int ocfs2_validate_dx_root(struct super_block *sb,
 565				  struct buffer_head *bh)
 566{
 567	int ret;
 568	struct ocfs2_dx_root_block *dx_root;
 569
 570	BUG_ON(!buffer_uptodate(bh));
 571
 572	dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
 573
 574	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
 575	if (ret) {
 576		mlog(ML_ERROR,
 577		     "Checksum failed for dir index root block %llu\n",
 578		     (unsigned long long)bh->b_blocknr);
 579		return ret;
 580	}
 581
 582	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
 583		ret = ocfs2_error(sb,
 584				  "Dir Index Root # %llu has bad signature %.*s\n",
 585				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
 586				  7, dx_root->dr_signature);
 587	}
 588
 589	return ret;
 590}
 591
 592static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
 593			      struct buffer_head **dx_root_bh)
 594{
 595	int ret;
 596	u64 blkno = le64_to_cpu(di->i_dx_root);
 597	struct buffer_head *tmp = *dx_root_bh;
 598
 599	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 600			       ocfs2_validate_dx_root);
 601
 602	/* If ocfs2_read_block() got us a new bh, pass it up. */
 603	if (!ret && !*dx_root_bh)
 604		*dx_root_bh = tmp;
 605
 606	return ret;
 607}
 608
 609static int ocfs2_validate_dx_leaf(struct super_block *sb,
 610				  struct buffer_head *bh)
 611{
 612	int ret;
 613	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
 614
 615	BUG_ON(!buffer_uptodate(bh));
 616
 617	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
 618	if (ret) {
 619		mlog(ML_ERROR,
 620		     "Checksum failed for dir index leaf block %llu\n",
 621		     (unsigned long long)bh->b_blocknr);
 622		return ret;
 623	}
 624
 625	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
 626		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
 627				  7, dx_leaf->dl_signature);
 628	}
 629
 630	return ret;
 631}
 632
 633static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
 634			      struct buffer_head **dx_leaf_bh)
 635{
 636	int ret;
 637	struct buffer_head *tmp = *dx_leaf_bh;
 638
 639	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 640			       ocfs2_validate_dx_leaf);
 641
 642	/* If ocfs2_read_block() got us a new bh, pass it up. */
 643	if (!ret && !*dx_leaf_bh)
 644		*dx_leaf_bh = tmp;
 645
 646	return ret;
 647}
 648
 649/*
 650 * Read a series of dx_leaf blocks. This expects all buffer_head
 651 * pointers to be NULL on function entry.
 652 */
 653static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
 654				struct buffer_head **dx_leaf_bhs)
 655{
 656	int ret;
 657
 658	ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
 659				ocfs2_validate_dx_leaf);
 660	if (ret)
 661		mlog_errno(ret);
 662
 663	return ret;
 664}
 665
 666static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
 667					       struct inode *dir,
 668					       struct ocfs2_dir_entry **res_dir)
 669{
 670	struct super_block *sb;
 671	struct buffer_head *bh_use[NAMEI_RA_SIZE];
 672	struct buffer_head *bh, *ret = NULL;
 673	unsigned long start, block, b;
 674	int ra_max = 0;		/* Number of bh's in the readahead
 675				   buffer, bh_use[] */
 676	int ra_ptr = 0;		/* Current index into readahead
 677				   buffer */
 678	int num = 0;
 679	int nblocks, i, err;
 680
 681	sb = dir->i_sb;
 682
 683	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 684	start = OCFS2_I(dir)->ip_dir_start_lookup;
 685	if (start >= nblocks)
 686		start = 0;
 687	block = start;
 688
 689restart:
 690	do {
 691		/*
 692		 * We deal with the read-ahead logic here.
 693		 */
 694		if (ra_ptr >= ra_max) {
 695			/* Refill the readahead buffer */
 696			ra_ptr = 0;
 697			b = block;
 698			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
 699				/*
 700				 * Terminate if we reach the end of the
 701				 * directory and must wrap, or if our
 702				 * search has finished at this block.
 703				 */
 704				if (b >= nblocks || (num && block == start)) {
 705					bh_use[ra_max] = NULL;
 706					break;
 707				}
 708				num++;
 709
 710				bh = NULL;
 711				err = ocfs2_read_dir_block(dir, b++, &bh,
 712							   OCFS2_BH_READAHEAD);
 713				bh_use[ra_max] = bh;
 714			}
 715		}
 716		if ((bh = bh_use[ra_ptr++]) == NULL)
 717			goto next;
 718		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
 719			/* read error, skip block & hope for the best.
 720			 * ocfs2_read_dir_block() has released the bh. */
 721			mlog(ML_ERROR, "reading directory %llu, "
 722				    "offset %lu\n",
 723				    (unsigned long long)OCFS2_I(dir)->ip_blkno,
 724				    block);
 725			goto next;
 726		}
 727		i = ocfs2_search_dirblock(bh, dir, name, namelen,
 728					  block << sb->s_blocksize_bits,
 729					  bh->b_data, sb->s_blocksize,
 730					  res_dir);
 731		if (i == 1) {
 732			OCFS2_I(dir)->ip_dir_start_lookup = block;
 733			ret = bh;
 734			goto cleanup_and_exit;
 735		} else {
 736			brelse(bh);
 737			if (i < 0)
 738				goto cleanup_and_exit;
 739		}
 740	next:
 741		if (++block >= nblocks)
 742			block = 0;
 743	} while (block != start);
 744
 745	/*
 746	 * If the directory has grown while we were searching, then
 747	 * search the last part of the directory before giving up.
 748	 */
 749	block = nblocks;
 750	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 751	if (block < nblocks) {
 752		start = 0;
 753		goto restart;
 754	}
 755
 756cleanup_and_exit:
 757	/* Clean up the read-ahead blocks */
 758	for (; ra_ptr < ra_max; ra_ptr++)
 759		brelse(bh_use[ra_ptr]);
 760
 761	trace_ocfs2_find_entry_el(ret);
 762	return ret;
 763}
 764
 765static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
 766				   struct ocfs2_extent_list *el,
 767				   u32 major_hash,
 768				   u32 *ret_cpos,
 769				   u64 *ret_phys_blkno,
 770				   unsigned int *ret_clen)
 771{
 772	int ret = 0, i, found;
 773	struct buffer_head *eb_bh = NULL;
 774	struct ocfs2_extent_block *eb;
 775	struct ocfs2_extent_rec *rec = NULL;
 776
 777	if (el->l_tree_depth) {
 778		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
 779				      &eb_bh);
 780		if (ret) {
 781			mlog_errno(ret);
 782			goto out;
 783		}
 784
 785		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
 786		el = &eb->h_list;
 787
 788		if (el->l_tree_depth) {
 789			ret = ocfs2_error(inode->i_sb,
 790					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
 791					  inode->i_ino,
 792					  (unsigned long long)eb_bh->b_blocknr);
 793			goto out;
 794		}
 795	}
 796
 797	found = 0;
 798	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
 799		rec = &el->l_recs[i];
 800
 801		if (le32_to_cpu(rec->e_cpos) <= major_hash) {
 802			found = 1;
 803			break;
 804		}
 805	}
 806
 807	if (!found) {
 808		ret = ocfs2_error(inode->i_sb,
 809				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
 810				  inode->i_ino,
 811				  le32_to_cpu(rec->e_cpos),
 812				  ocfs2_rec_clusters(el, rec));
 813		goto out;
 814	}
 815
 816	if (ret_phys_blkno)
 817		*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
 818	if (ret_cpos)
 819		*ret_cpos = le32_to_cpu(rec->e_cpos);
 820	if (ret_clen)
 821		*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
 822
 823out:
 824	brelse(eb_bh);
 825	return ret;
 826}
 827
 828/*
 829 * Returns the block index, from the start of the cluster which this
 830 * hash belongs too.
 831 */
 832static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 833						   u32 minor_hash)
 834{
 835	return minor_hash & osb->osb_dx_mask;
 836}
 837
 838static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 839					  struct ocfs2_dx_hinfo *hinfo)
 840{
 841	return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
 842}
 843
 844static int ocfs2_dx_dir_lookup(struct inode *inode,
 845			       struct ocfs2_extent_list *el,
 846			       struct ocfs2_dx_hinfo *hinfo,
 847			       u32 *ret_cpos,
 848			       u64 *ret_phys_blkno)
 849{
 850	int ret = 0;
 851	unsigned int cend, uninitialized_var(clen);
 852	u32 uninitialized_var(cpos);
 853	u64 uninitialized_var(blkno);
 854	u32 name_hash = hinfo->major_hash;
 855
 856	ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
 857				      &clen);
 858	if (ret) {
 859		mlog_errno(ret);
 860		goto out;
 861	}
 862
 863	cend = cpos + clen;
 864	if (name_hash >= cend) {
 865		/* We want the last cluster */
 866		blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
 867		cpos += clen - 1;
 868	} else {
 869		blkno += ocfs2_clusters_to_blocks(inode->i_sb,
 870						  name_hash - cpos);
 871		cpos = name_hash;
 872	}
 873
 874	/*
 875	 * We now have the cluster which should hold our entry. To
 876	 * find the exact block from the start of the cluster to
 877	 * search, we take the lower bits of the hash.
 878	 */
 879	blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
 880
 881	if (ret_phys_blkno)
 882		*ret_phys_blkno = blkno;
 883	if (ret_cpos)
 884		*ret_cpos = cpos;
 885
 886out:
 887
 888	return ret;
 889}
 890
 891static int ocfs2_dx_dir_search(const char *name, int namelen,
 892			       struct inode *dir,
 893			       struct ocfs2_dx_root_block *dx_root,
 894			       struct ocfs2_dir_lookup_result *res)
 895{
 896	int ret, i, found;
 897	u64 uninitialized_var(phys);
 898	struct buffer_head *dx_leaf_bh = NULL;
 899	struct ocfs2_dx_leaf *dx_leaf;
 900	struct ocfs2_dx_entry *dx_entry = NULL;
 901	struct buffer_head *dir_ent_bh = NULL;
 902	struct ocfs2_dir_entry *dir_ent = NULL;
 903	struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
 904	struct ocfs2_extent_list *dr_el;
 905	struct ocfs2_dx_entry_list *entry_list;
 906
 907	ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
 908
 909	if (ocfs2_dx_root_inline(dx_root)) {
 910		entry_list = &dx_root->dr_entries;
 911		goto search;
 912	}
 913
 914	dr_el = &dx_root->dr_list;
 915
 916	ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
 917	if (ret) {
 918		mlog_errno(ret);
 919		goto out;
 920	}
 921
 922	trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
 923				  namelen, name, hinfo->major_hash,
 924				  hinfo->minor_hash, (unsigned long long)phys);
 925
 926	ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
 927	if (ret) {
 928		mlog_errno(ret);
 929		goto out;
 930	}
 931
 932	dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
 933
 934	trace_ocfs2_dx_dir_search_leaf_info(
 935			le16_to_cpu(dx_leaf->dl_list.de_num_used),
 936			le16_to_cpu(dx_leaf->dl_list.de_count));
 937
 938	entry_list = &dx_leaf->dl_list;
 939
 940search:
 941	/*
 942	 * Empty leaf is legal, so no need to check for that.
 943	 */
 944	found = 0;
 945	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
 946		dx_entry = &entry_list->de_entries[i];
 947
 948		if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
 949		    || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
 950			continue;
 951
 952		/*
 953		 * Search unindexed leaf block now. We're not
 954		 * guaranteed to find anything.
 955		 */
 956		ret = ocfs2_read_dir_block_direct(dir,
 957					  le64_to_cpu(dx_entry->dx_dirent_blk),
 958					  &dir_ent_bh);
 959		if (ret) {
 960			mlog_errno(ret);
 961			goto out;
 962		}
 963
 964		/*
 965		 * XXX: We should check the unindexed block here,
 966		 * before using it.
 967		 */
 968
 969		found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
 970					      0, dir_ent_bh->b_data,
 971					      dir->i_sb->s_blocksize, &dir_ent);
 972		if (found == 1)
 973			break;
 974
 975		if (found == -1) {
 976			/* This means we found a bad directory entry. */
 977			ret = -EIO;
 978			mlog_errno(ret);
 979			goto out;
 980		}
 981
 982		brelse(dir_ent_bh);
 983		dir_ent_bh = NULL;
 984	}
 985
 986	if (found <= 0) {
 987		ret = -ENOENT;
 988		goto out;
 989	}
 990
 991	res->dl_leaf_bh = dir_ent_bh;
 992	res->dl_entry = dir_ent;
 993	res->dl_dx_leaf_bh = dx_leaf_bh;
 994	res->dl_dx_entry = dx_entry;
 995
 996	ret = 0;
 997out:
 998	if (ret) {
 999		brelse(dx_leaf_bh);
1000		brelse(dir_ent_bh);
1001	}
1002	return ret;
1003}
1004
1005static int ocfs2_find_entry_dx(const char *name, int namelen,
1006			       struct inode *dir,
1007			       struct ocfs2_dir_lookup_result *lookup)
1008{
1009	int ret;
1010	struct buffer_head *di_bh = NULL;
1011	struct ocfs2_dinode *di;
1012	struct buffer_head *dx_root_bh = NULL;
1013	struct ocfs2_dx_root_block *dx_root;
1014
1015	ret = ocfs2_read_inode_block(dir, &di_bh);
1016	if (ret) {
1017		mlog_errno(ret);
1018		goto out;
1019	}
1020
1021	di = (struct ocfs2_dinode *)di_bh->b_data;
1022
1023	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
1024	if (ret) {
1025		mlog_errno(ret);
1026		goto out;
1027	}
1028	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
1029
1030	ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
1031	if (ret) {
1032		if (ret != -ENOENT)
1033			mlog_errno(ret);
1034		goto out;
1035	}
1036
1037	lookup->dl_dx_root_bh = dx_root_bh;
1038	dx_root_bh = NULL;
1039out:
1040	brelse(di_bh);
1041	brelse(dx_root_bh);
1042	return ret;
1043}
1044
1045/*
1046 * Try to find an entry of the provided name within 'dir'.
1047 *
1048 * If nothing was found, -ENOENT is returned. Otherwise, zero is
1049 * returned and the struct 'res' will contain information useful to
1050 * other directory manipulation functions.
1051 *
1052 * Caller can NOT assume anything about the contents of the
1053 * buffer_heads - they are passed back only so that it can be passed
1054 * into any one of the manipulation functions (add entry, delete
1055 * entry, etc). As an example, bh in the extent directory case is a
1056 * data block, in the inline-data case it actually points to an inode,
1057 * in the indexed directory case, multiple buffers are involved.
1058 */
1059int ocfs2_find_entry(const char *name, int namelen,
1060		     struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
1061{
1062	struct buffer_head *bh;
1063	struct ocfs2_dir_entry *res_dir = NULL;
1064
1065	if (ocfs2_dir_indexed(dir))
1066		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
1067
1068	/*
1069	 * The unindexed dir code only uses part of the lookup
1070	 * structure, so there's no reason to push it down further
1071	 * than this.
1072	 */
1073	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1074		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
1075	else
1076		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
1077
1078	if (bh == NULL)
1079		return -ENOENT;
1080
1081	lookup->dl_leaf_bh = bh;
1082	lookup->dl_entry = res_dir;
1083	return 0;
1084}
1085
1086/*
1087 * Update inode number and type of a previously found directory entry.
1088 */
1089int ocfs2_update_entry(struct inode *dir, handle_t *handle,
1090		       struct ocfs2_dir_lookup_result *res,
1091		       struct inode *new_entry_inode)
1092{
1093	int ret;
1094	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1095	struct ocfs2_dir_entry *de = res->dl_entry;
1096	struct buffer_head *de_bh = res->dl_leaf_bh;
1097
1098	/*
1099	 * The same code works fine for both inline-data and extent
1100	 * based directories, so no need to split this up.  The only
1101	 * difference is the journal_access function.
1102	 */
1103
1104	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1105		access = ocfs2_journal_access_di;
1106
1107	ret = access(handle, INODE_CACHE(dir), de_bh,
1108		     OCFS2_JOURNAL_ACCESS_WRITE);
1109	if (ret) {
1110		mlog_errno(ret);
1111		goto out;
1112	}
1113
1114	de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
1115	ocfs2_set_de_type(de, new_entry_inode->i_mode);
1116
1117	ocfs2_journal_dirty(handle, de_bh);
1118
1119out:
1120	return ret;
1121}
1122
1123/*
1124 * __ocfs2_delete_entry deletes a directory entry by merging it with the
1125 * previous entry
1126 */
1127static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1128				struct ocfs2_dir_entry *de_del,
1129				struct buffer_head *bh, char *first_de,
1130				unsigned int bytes)
1131{
1132	struct ocfs2_dir_entry *de, *pde;
1133	int i, status = -ENOENT;
1134	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1135
1136	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1137		access = ocfs2_journal_access_di;
1138
1139	i = 0;
1140	pde = NULL;
1141	de = (struct ocfs2_dir_entry *) first_de;
1142	while (i < bytes) {
1143		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
1144			status = -EIO;
1145			mlog_errno(status);
1146			goto bail;
1147		}
1148		if (de == de_del)  {
1149			status = access(handle, INODE_CACHE(dir), bh,
1150					OCFS2_JOURNAL_ACCESS_WRITE);
1151			if (status < 0) {
1152				status = -EIO;
1153				mlog_errno(status);
1154				goto bail;
1155			}
1156			if (pde)
1157				le16_add_cpu(&pde->rec_len,
1158						le16_to_cpu(de->rec_len));
1159			de->inode = 0;
1160			inode_inc_iversion(dir);
1161			ocfs2_journal_dirty(handle, bh);
1162			goto bail;
1163		}
1164		i += le16_to_cpu(de->rec_len);
1165		pde = de;
1166		de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1167	}
1168bail:
1169	return status;
1170}
1171
1172static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
1173{
1174	unsigned int hole;
1175
1176	if (le64_to_cpu(de->inode) == 0)
1177		hole = le16_to_cpu(de->rec_len);
1178	else
1179		hole = le16_to_cpu(de->rec_len) -
1180			OCFS2_DIR_REC_LEN(de->name_len);
1181
1182	return hole;
1183}
1184
1185static int ocfs2_find_max_rec_len(struct super_block *sb,
1186				  struct buffer_head *dirblock_bh)
1187{
1188	int size, this_hole, largest_hole = 0;
1189	char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
1190	struct ocfs2_dir_entry *de;
1191
1192	trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
1193	size = ocfs2_dir_trailer_blk_off(sb);
1194	limit = start + size;
1195	de_buf = start;
1196	de = (struct ocfs2_dir_entry *)de_buf;
1197	do {
1198		if (de_buf != trailer) {
1199			this_hole = ocfs2_figure_dirent_hole(de);
1200			if (this_hole > largest_hole)
1201				largest_hole = this_hole;
1202		}
1203
1204		de_buf += le16_to_cpu(de->rec_len);
1205		de = (struct ocfs2_dir_entry *)de_buf;
1206	} while (de_buf < limit);
1207
1208	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
1209		return largest_hole;
1210	return 0;
1211}
1212
1213static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
1214				       int index)
1215{
1216	int num_used = le16_to_cpu(entry_list->de_num_used);
1217
1218	if (num_used == 1 || index == (num_used - 1))
1219		goto clear;
1220
1221	memmove(&entry_list->de_entries[index],
1222		&entry_list->de_entries[index + 1],
1223		(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
1224clear:
1225	num_used--;
1226	memset(&entry_list->de_entries[num_used], 0,
1227	       sizeof(struct ocfs2_dx_entry));
1228	entry_list->de_num_used = cpu_to_le16(num_used);
1229}
1230
1231static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1232				 struct ocfs2_dir_lookup_result *lookup)
1233{
1234	int ret, index, max_rec_len, add_to_free_list = 0;
1235	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1236	struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
1237	struct ocfs2_dx_leaf *dx_leaf;
1238	struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
1239	struct ocfs2_dir_block_trailer *trailer;
1240	struct ocfs2_dx_root_block *dx_root;
1241	struct ocfs2_dx_entry_list *entry_list;
1242
1243	/*
1244	 * This function gets a bit messy because we might have to
1245	 * modify the root block, regardless of whether the indexed
1246	 * entries are stored inline.
1247	 */
1248
1249	/*
1250	 * *Only* set 'entry_list' here, based on where we're looking
1251	 * for the indexed entries. Later, we might still want to
1252	 * journal both blocks, based on free list state.
1253	 */
1254	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
1255	if (ocfs2_dx_root_inline(dx_root)) {
1256		entry_list = &dx_root->dr_entries;
1257	} else {
1258		dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
1259		entry_list = &dx_leaf->dl_list;
1260	}
1261
1262	/* Neither of these are a disk corruption - that should have
1263	 * been caught by lookup, before we got here. */
1264	BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
1265	BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
1266
1267	index = (char *)dx_entry - (char *)entry_list->de_entries;
1268	index /= sizeof(*dx_entry);
1269
1270	if (index >= le16_to_cpu(entry_list->de_num_used)) {
1271		mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
1272		     (unsigned long long)OCFS2_I(dir)->ip_blkno, index,
1273		     entry_list, dx_entry);
1274		return -EIO;
1275	}
1276
1277	/*
1278	 * We know that removal of this dirent will leave enough room
1279	 * for a new one, so add this block to the free list if it
1280	 * isn't already there.
1281	 */
1282	trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
1283	if (trailer->db_free_rec_len == 0)
1284		add_to_free_list = 1;
1285
1286	/*
1287	 * Add the block holding our index into the journal before
1288	 * removing the unindexed entry. If we get an error return
1289	 * from __ocfs2_delete_entry(), then it hasn't removed the
1290	 * entry yet. Likewise, successful return means we *must*
1291	 * remove the indexed entry.
1292	 *
1293	 * We're also careful to journal the root tree block here as
1294	 * the entry count needs to be updated. Also, we might be
1295	 * adding to the start of the free list.
1296	 */
1297	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1298				      OCFS2_JOURNAL_ACCESS_WRITE);
1299	if (ret) {
1300		mlog_errno(ret);
1301		goto out;
1302	}
1303
1304	if (!ocfs2_dx_root_inline(dx_root)) {
1305		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
1306					      lookup->dl_dx_leaf_bh,
1307					      OCFS2_JOURNAL_ACCESS_WRITE);
1308		if (ret) {
1309			mlog_errno(ret);
1310			goto out;
1311		}
1312	}
1313
1314	trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1315				    index);
1316
1317	ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1318				   leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
1319	if (ret) {
1320		mlog_errno(ret);
1321		goto out;
1322	}
1323
1324	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
1325	trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1326	if (add_to_free_list) {
1327		trailer->db_free_next = dx_root->dr_free_blk;
1328		dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
1329		ocfs2_journal_dirty(handle, dx_root_bh);
1330	}
1331
1332	/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
1333	ocfs2_journal_dirty(handle, leaf_bh);
1334
1335	le32_add_cpu(&dx_root->dr_num_entries, -1);
1336	ocfs2_journal_dirty(handle, dx_root_bh);
1337
1338	ocfs2_dx_list_remove_entry(entry_list, index);
1339
1340	if (!ocfs2_dx_root_inline(dx_root))
1341		ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
1342
1343out:
1344	return ret;
1345}
1346
1347static inline int ocfs2_delete_entry_id(handle_t *handle,
1348					struct inode *dir,
1349					struct ocfs2_dir_entry *de_del,
1350					struct buffer_head *bh)
1351{
1352	int ret;
1353	struct buffer_head *di_bh = NULL;
1354	struct ocfs2_dinode *di;
1355	struct ocfs2_inline_data *data;
1356
1357	ret = ocfs2_read_inode_block(dir, &di_bh);
1358	if (ret) {
1359		mlog_errno(ret);
1360		goto out;
1361	}
1362
1363	di = (struct ocfs2_dinode *)di_bh->b_data;
1364	data = &di->id2.i_data;
1365
1366	ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
1367				   i_size_read(dir));
1368
1369	brelse(di_bh);
1370out:
1371	return ret;
1372}
1373
1374static inline int ocfs2_delete_entry_el(handle_t *handle,
1375					struct inode *dir,
1376					struct ocfs2_dir_entry *de_del,
1377					struct buffer_head *bh)
1378{
1379	return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
1380				    bh->b_size);
1381}
1382
1383/*
1384 * Delete a directory entry. Hide the details of directory
1385 * implementation from the caller.
1386 */
1387int ocfs2_delete_entry(handle_t *handle,
1388		       struct inode *dir,
1389		       struct ocfs2_dir_lookup_result *res)
1390{
1391	if (ocfs2_dir_indexed(dir))
1392		return ocfs2_delete_entry_dx(handle, dir, res);
1393
1394	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1395		return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
1396					     res->dl_leaf_bh);
1397
1398	return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
1399				     res->dl_leaf_bh);
1400}
1401
1402/*
1403 * Check whether 'de' has enough room to hold an entry of
1404 * 'new_rec_len' bytes.
1405 */
1406static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
1407					 unsigned int new_rec_len)
1408{
1409	unsigned int de_really_used;
1410
1411	/* Check whether this is an empty record with enough space */
1412	if (le64_to_cpu(de->inode) == 0 &&
1413	    le16_to_cpu(de->rec_len) >= new_rec_len)
1414		return 1;
1415
1416	/*
1417	 * Record might have free space at the end which we can
1418	 * use.
1419	 */
1420	de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
1421	if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
1422	    return 1;
1423
1424	return 0;
1425}
1426
1427static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
1428					  struct ocfs2_dx_entry *dx_new_entry)
1429{
1430	int i;
1431
1432	i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
1433	dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
1434
1435	le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
1436}
1437
1438static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
1439				       struct ocfs2_dx_hinfo *hinfo,
1440				       u64 dirent_blk)
1441{
1442	int i;
1443	struct ocfs2_dx_entry *dx_entry;
1444
1445	i = le16_to_cpu(entry_list->de_num_used);
1446	dx_entry = &entry_list->de_entries[i];
1447
1448	memset(dx_entry, 0, sizeof(*dx_entry));
1449	dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
1450	dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
1451	dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
1452
1453	le16_add_cpu(&entry_list->de_num_used, 1);
1454}
1455
1456static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
1457				      struct ocfs2_dx_hinfo *hinfo,
1458				      u64 dirent_blk,
1459				      struct buffer_head *dx_leaf_bh)
1460{
1461	int ret;
1462	struct ocfs2_dx_leaf *dx_leaf;
1463
1464	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
1465				      OCFS2_JOURNAL_ACCESS_WRITE);
1466	if (ret) {
1467		mlog_errno(ret);
1468		goto out;
1469	}
1470
1471	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
1472	ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
1473	ocfs2_journal_dirty(handle, dx_leaf_bh);
1474
1475out:
1476	return ret;
1477}
1478
1479static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
1480					struct ocfs2_dx_hinfo *hinfo,
1481					u64 dirent_blk,
1482					struct ocfs2_dx_root_block *dx_root)
1483{
1484	ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
1485}
1486
1487static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
1488			       struct ocfs2_dir_lookup_result *lookup)
1489{
1490	int ret = 0;
1491	struct ocfs2_dx_root_block *dx_root;
1492	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1493
1494	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1495				      OCFS2_JOURNAL_ACCESS_WRITE);
1496	if (ret) {
1497		mlog_errno(ret);
1498		goto out;
1499	}
1500
1501	dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
1502	if (ocfs2_dx_root_inline(dx_root)) {
1503		ocfs2_dx_inline_root_insert(dir, handle,
1504					    &lookup->dl_hinfo,
1505					    lookup->dl_leaf_bh->b_blocknr,
1506					    dx_root);
1507	} else {
1508		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
1509						 lookup->dl_leaf_bh->b_blocknr,
1510						 lookup->dl_dx_leaf_bh);
1511		if (ret)
1512			goto out;
1513	}
1514
1515	le32_add_cpu(&dx_root->dr_num_entries, 1);
1516	ocfs2_journal_dirty(handle, dx_root_bh);
1517
1518out:
1519	return ret;
1520}
1521
1522static void ocfs2_remove_block_from_free_list(struct inode *dir,
1523				       handle_t *handle,
1524				       struct ocfs2_dir_lookup_result *lookup)
1525{
1526	struct ocfs2_dir_block_trailer *trailer, *prev;
1527	struct ocfs2_dx_root_block *dx_root;
1528	struct buffer_head *bh;
1529
1530	trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1531
1532	if (ocfs2_free_list_at_root(lookup)) {
1533		bh = lookup->dl_dx_root_bh;
1534		dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
1535		dx_root->dr_free_blk = trailer->db_free_next;
1536	} else {
1537		bh = lookup->dl_prev_leaf_bh;
1538		prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
1539		prev->db_free_next = trailer->db_free_next;
1540	}
1541
1542	trailer->db_free_rec_len = cpu_to_le16(0);
1543	trailer->db_free_next = cpu_to_le64(0);
1544
1545	ocfs2_journal_dirty(handle, bh);
1546	ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1547}
1548
1549/*
1550 * This expects that a journal write has been reserved on
1551 * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
1552 */
1553static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
1554				   struct ocfs2_dir_lookup_result *lookup)
1555{
1556	int max_rec_len;
1557	struct ocfs2_dir_block_trailer *trailer;
1558
1559	/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
1560	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
1561	if (max_rec_len) {
1562		/*
1563		 * There's still room in this block, so no need to remove it
1564		 * from the free list. In this case, we just want to update
1565		 * the rec len accounting.
1566		 */
1567		trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1568		trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1569		ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1570	} else {
1571		ocfs2_remove_block_from_free_list(dir, handle, lookup);
1572	}
1573}
1574
1575/* we don't always have a dentry for what we want to add, so people
1576 * like orphan dir can call this instead.
1577 *
1578 * The lookup context must have been filled from
1579 * ocfs2_prepare_dir_for_insert.
1580 */
1581int __ocfs2_add_entry(handle_t *handle,
1582		      struct inode *dir,
1583		      const char *name, int namelen,
1584		      struct inode *inode, u64 blkno,
1585		      struct buffer_head *parent_fe_bh,
1586		      struct ocfs2_dir_lookup_result *lookup)
1587{
1588	unsigned long offset;
1589	unsigned short rec_len;
1590	struct ocfs2_dir_entry *de, *de1;
1591	struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
1592	struct super_block *sb = dir->i_sb;
1593	int retval;
1594	unsigned int size = sb->s_blocksize;
1595	struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1596	char *data_start = insert_bh->b_data;
1597
1598	if (!namelen)
1599		return -EINVAL;
1600
1601	if (ocfs2_dir_indexed(dir)) {
1602		struct buffer_head *bh;
1603
1604		/*
1605		 * An indexed dir may require that we update the free space
1606		 * list. Reserve a write to the previous node in the list so
1607		 * that we don't fail later.
1608		 *
1609		 * XXX: This can be either a dx_root_block, or an unindexed
1610		 * directory tree leaf block.
1611		 */
1612		if (ocfs2_free_list_at_root(lookup)) {
1613			bh = lookup->dl_dx_root_bh;
1614			retval = ocfs2_journal_access_dr(handle,
1615						 INODE_CACHE(dir), bh,
1616						 OCFS2_JOURNAL_ACCESS_WRITE);
1617		} else {
1618			bh = lookup->dl_prev_leaf_bh;
1619			retval = ocfs2_journal_access_db(handle,
1620						 INODE_CACHE(dir), bh,
1621						 OCFS2_JOURNAL_ACCESS_WRITE);
1622		}
1623		if (retval) {
1624			mlog_errno(retval);
1625			return retval;
1626		}
1627	} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1628		data_start = di->id2.i_data.id_data;
1629		size = i_size_read(dir);
1630
1631		BUG_ON(insert_bh != parent_fe_bh);
1632	}
1633
1634	rec_len = OCFS2_DIR_REC_LEN(namelen);
1635	offset = 0;
1636	de = (struct ocfs2_dir_entry *) data_start;
1637	while (1) {
1638		BUG_ON((char *)de >= (size + data_start));
1639
1640		/* These checks should've already been passed by the
1641		 * prepare function, but I guess we can leave them
1642		 * here anyway. */
1643		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
1644			retval = -ENOENT;
1645			goto bail;
1646		}
1647		if (ocfs2_match(namelen, name, de)) {
1648			retval = -EEXIST;
1649			goto bail;
1650		}
1651
1652		/* We're guaranteed that we should have space, so we
1653		 * can't possibly have hit the trailer...right? */
1654		mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
1655				"Hit dir trailer trying to insert %.*s "
1656			        "(namelen %d) into directory %llu.  "
1657				"offset is %lu, trailer offset is %d\n",
1658				namelen, name, namelen,
1659				(unsigned long long)parent_fe_bh->b_blocknr,
1660				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
1661
1662		if (ocfs2_dirent_would_fit(de, rec_len)) {
1663			dir->i_mtime = dir->i_ctime = current_time(dir);
1664			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
1665			if (retval < 0) {
1666				mlog_errno(retval);
1667				goto bail;
1668			}
1669
1670			if (insert_bh == parent_fe_bh)
1671				retval = ocfs2_journal_access_di(handle,
1672								 INODE_CACHE(dir),
1673								 insert_bh,
1674								 OCFS2_JOURNAL_ACCESS_WRITE);
1675			else {
1676				retval = ocfs2_journal_access_db(handle,
1677								 INODE_CACHE(dir),
1678								 insert_bh,
1679					      OCFS2_JOURNAL_ACCESS_WRITE);
1680
1681				if (!retval && ocfs2_dir_indexed(dir))
1682					retval = ocfs2_dx_dir_insert(dir,
1683								handle,
1684								lookup);
1685			}
1686
1687			if (retval) {
1688				mlog_errno(retval);
1689				goto bail;
1690			}
1691
1692			/* By now the buffer is marked for journaling */
1693			offset += le16_to_cpu(de->rec_len);
1694			if (le64_to_cpu(de->inode)) {
1695				de1 = (struct ocfs2_dir_entry *)((char *) de +
1696					OCFS2_DIR_REC_LEN(de->name_len));
1697				de1->rec_len =
1698					cpu_to_le16(le16_to_cpu(de->rec_len) -
1699					OCFS2_DIR_REC_LEN(de->name_len));
1700				de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
1701				de = de1;
1702			}
1703			de->file_type = FT_UNKNOWN;
1704			if (blkno) {
1705				de->inode = cpu_to_le64(blkno);
1706				ocfs2_set_de_type(de, inode->i_mode);
1707			} else
1708				de->inode = 0;
1709			de->name_len = namelen;
1710			memcpy(de->name, name, namelen);
1711
1712			if (ocfs2_dir_indexed(dir))
1713				ocfs2_recalc_free_list(dir, handle, lookup);
1714
1715			inode_inc_iversion(dir);
1716			ocfs2_journal_dirty(handle, insert_bh);
1717			retval = 0;
1718			goto bail;
1719		}
1720
1721		offset += le16_to_cpu(de->rec_len);
1722		de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
1723	}
1724
1725	/* when you think about it, the assert above should prevent us
1726	 * from ever getting here. */
1727	retval = -ENOSPC;
1728bail:
1729	if (retval)
1730		mlog_errno(retval);
1731
1732	return retval;
1733}
1734
1735static int ocfs2_dir_foreach_blk_id(struct inode *inode,
1736				    u64 *f_version,
1737				    struct dir_context *ctx)
1738{
1739	int ret, i;
1740	unsigned long offset = ctx->pos;
1741	struct buffer_head *di_bh = NULL;
1742	struct ocfs2_dinode *di;
1743	struct ocfs2_inline_data *data;
1744	struct ocfs2_dir_entry *de;
1745
1746	ret = ocfs2_read_inode_block(inode, &di_bh);
1747	if (ret) {
1748		mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
1749		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1750		goto out;
1751	}
1752
1753	di = (struct ocfs2_dinode *)di_bh->b_data;
1754	data = &di->id2.i_data;
1755
1756	while (ctx->pos < i_size_read(inode)) {
1757		/* If the dir block has changed since the last call to
1758		 * readdir(2), then we might be pointing to an invalid
1759		 * dirent right now.  Scan from the start of the block
1760		 * to make sure. */
1761		if (!inode_eq_iversion(inode, *f_version)) {
1762			for (i = 0; i < i_size_read(inode) && i < offset; ) {
1763				de = (struct ocfs2_dir_entry *)
1764					(data->id_data + i);
1765				/* It's too expensive to do a full
1766				 * dirent test each time round this
1767				 * loop, but we do have to test at
1768				 * least that it is non-zero.  A
1769				 * failure will be detected in the
1770				 * dirent test below. */
1771				if (le16_to_cpu(de->rec_len) <
1772				    OCFS2_DIR_REC_LEN(1))
1773					break;
1774				i += le16_to_cpu(de->rec_len);
1775			}
1776			ctx->pos = offset = i;
1777			*f_version = inode_query_iversion(inode);
1778		}
1779
1780		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
1781		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
1782			/* On error, skip the f_pos to the end. */
1783			ctx->pos = i_size_read(inode);
1784			break;
1785		}
1786		offset += le16_to_cpu(de->rec_len);
1787		if (le64_to_cpu(de->inode)) {
 
 
 
 
 
1788			if (!dir_emit(ctx, de->name, de->name_len,
1789				      le64_to_cpu(de->inode),
1790				      fs_ftype_to_dtype(de->file_type)))
1791				goto out;
1792		}
1793		ctx->pos += le16_to_cpu(de->rec_len);
1794	}
1795out:
1796	brelse(di_bh);
1797	return 0;
1798}
1799
1800/*
1801 * NOTE: This function can be called against unindexed directories,
1802 * and indexed ones.
1803 */
1804static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1805				    u64 *f_version,
1806				    struct dir_context *ctx,
1807				    bool persist)
1808{
1809	unsigned long offset, blk, last_ra_blk = 0;
1810	int i;
1811	struct buffer_head * bh, * tmp;
1812	struct ocfs2_dir_entry * de;
1813	struct super_block * sb = inode->i_sb;
1814	unsigned int ra_sectors = 16;
1815	int stored = 0;
1816
1817	bh = NULL;
1818
1819	offset = ctx->pos & (sb->s_blocksize - 1);
1820
1821	while (ctx->pos < i_size_read(inode)) {
1822		blk = ctx->pos >> sb->s_blocksize_bits;
1823		if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
1824			/* Skip the corrupt dirblock and keep trying */
1825			ctx->pos += sb->s_blocksize - offset;
1826			continue;
1827		}
1828
1829		/* The idea here is to begin with 8k read-ahead and to stay
1830		 * 4k ahead of our current position.
1831		 *
1832		 * TODO: Use the pagecache for this. We just need to
1833		 * make sure it's cluster-safe... */
1834		if (!last_ra_blk
1835		    || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
1836			for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
1837			     i > 0; i--) {
1838				tmp = NULL;
1839				if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
1840							  OCFS2_BH_READAHEAD))
1841					brelse(tmp);
1842			}
1843			last_ra_blk = blk;
1844			ra_sectors = 8;
1845		}
1846
1847		/* If the dir block has changed since the last call to
1848		 * readdir(2), then we might be pointing to an invalid
1849		 * dirent right now.  Scan from the start of the block
1850		 * to make sure. */
1851		if (!inode_eq_iversion(inode, *f_version)) {
1852			for (i = 0; i < sb->s_blocksize && i < offset; ) {
1853				de = (struct ocfs2_dir_entry *) (bh->b_data + i);
1854				/* It's too expensive to do a full
1855				 * dirent test each time round this
1856				 * loop, but we do have to test at
1857				 * least that it is non-zero.  A
1858				 * failure will be detected in the
1859				 * dirent test below. */
1860				if (le16_to_cpu(de->rec_len) <
1861				    OCFS2_DIR_REC_LEN(1))
1862					break;
1863				i += le16_to_cpu(de->rec_len);
1864			}
1865			offset = i;
1866			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
1867				| offset;
1868			*f_version = inode_query_iversion(inode);
1869		}
1870
1871		while (ctx->pos < i_size_read(inode)
1872		       && offset < sb->s_blocksize) {
1873			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
1874			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
1875				/* On error, skip the f_pos to the
1876				   next block. */
1877				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1878				break;
 
1879			}
1880			if (le64_to_cpu(de->inode)) {
 
 
 
 
1881				if (!dir_emit(ctx, de->name,
1882						de->name_len,
1883						le64_to_cpu(de->inode),
1884					fs_ftype_to_dtype(de->file_type))) {
1885					brelse(bh);
1886					return 0;
1887				}
1888				stored++;
1889			}
1890			offset += le16_to_cpu(de->rec_len);
1891			ctx->pos += le16_to_cpu(de->rec_len);
1892		}
1893		offset = 0;
1894		brelse(bh);
1895		bh = NULL;
1896		if (!persist && stored)
1897			break;
1898	}
1899	return 0;
1900}
1901
1902static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
1903				 struct dir_context *ctx,
1904				 bool persist)
1905{
1906	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1907		return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
1908	return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
1909}
1910
1911/*
1912 * This is intended to be called from inside other kernel functions,
1913 * so we fake some arguments.
1914 */
1915int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
1916{
1917	u64 version = inode_query_iversion(inode);
1918	ocfs2_dir_foreach_blk(inode, &version, ctx, true);
1919	return 0;
1920}
1921
1922/*
1923 * ocfs2_readdir()
1924 *
1925 */
1926int ocfs2_readdir(struct file *file, struct dir_context *ctx)
1927{
1928	int error = 0;
1929	struct inode *inode = file_inode(file);
1930	int lock_level = 0;
1931
1932	trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
1933
1934	error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level, 1);
1935	if (lock_level && error >= 0) {
1936		/* We release EX lock which used to update atime
1937		 * and get PR lock again to reduce contention
1938		 * on commonly accessed directories. */
1939		ocfs2_inode_unlock(inode, 1);
1940		lock_level = 0;
1941		error = ocfs2_inode_lock(inode, NULL, 0);
1942	}
1943	if (error < 0) {
1944		if (error != -ENOENT)
1945			mlog_errno(error);
1946		/* we haven't got any yet, so propagate the error. */
1947		goto bail_nolock;
1948	}
1949
1950	error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
1951
1952	ocfs2_inode_unlock(inode, lock_level);
1953	if (error)
1954		mlog_errno(error);
1955
1956bail_nolock:
1957
1958	return error;
1959}
1960
1961/*
1962 * NOTE: this should always be called with parent dir i_mutex taken.
1963 */
1964int ocfs2_find_files_on_disk(const char *name,
1965			     int namelen,
1966			     u64 *blkno,
1967			     struct inode *inode,
1968			     struct ocfs2_dir_lookup_result *lookup)
1969{
1970	int status = -ENOENT;
1971
1972	trace_ocfs2_find_files_on_disk(namelen, name, blkno,
1973				(unsigned long long)OCFS2_I(inode)->ip_blkno);
1974
1975	status = ocfs2_find_entry(name, namelen, inode, lookup);
1976	if (status)
1977		goto leave;
1978
1979	*blkno = le64_to_cpu(lookup->dl_entry->inode);
1980
1981	status = 0;
1982leave:
1983
1984	return status;
1985}
1986
1987/*
1988 * Convenience function for callers which just want the block number
1989 * mapped to a name and don't require the full dirent info, etc.
1990 */
1991int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
1992			       int namelen, u64 *blkno)
1993{
1994	int ret;
1995	struct ocfs2_dir_lookup_result lookup = { NULL, };
1996
1997	ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
1998	ocfs2_free_dir_lookup_result(&lookup);
1999
2000	return ret;
2001}
2002
2003/* Check for a name within a directory.
2004 *
2005 * Return 0 if the name does not exist
2006 * Return -EEXIST if the directory contains the name
2007 *
2008 * Callers should have i_mutex + a cluster lock on dir
2009 */
2010int ocfs2_check_dir_for_entry(struct inode *dir,
2011			      const char *name,
2012			      int namelen)
2013{
2014	int ret = 0;
2015	struct ocfs2_dir_lookup_result lookup = { NULL, };
2016
2017	trace_ocfs2_check_dir_for_entry(
2018		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2019
2020	if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
2021		ret = -EEXIST;
2022		mlog_errno(ret);
2023	}
2024
2025	ocfs2_free_dir_lookup_result(&lookup);
2026
2027	return ret;
2028}
2029
2030struct ocfs2_empty_dir_priv {
2031	struct dir_context ctx;
2032	unsigned seen_dot;
2033	unsigned seen_dot_dot;
2034	unsigned seen_other;
2035	unsigned dx_dir;
2036};
2037static int ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
2038				   int name_len, loff_t pos, u64 ino,
2039				   unsigned type)
2040{
2041	struct ocfs2_empty_dir_priv *p =
2042		container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
2043
2044	/*
2045	 * Check the positions of "." and ".." records to be sure
2046	 * they're in the correct place.
2047	 *
2048	 * Indexed directories don't need to proceed past the first
2049	 * two entries, so we end the scan after seeing '..'. Despite
2050	 * that, we allow the scan to proceed In the event that we
2051	 * have a corrupted indexed directory (no dot or dot dot
2052	 * entries). This allows us to double check for existing
2053	 * entries which might not have been found in the index.
2054	 */
2055	if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
2056		p->seen_dot = 1;
2057		return 0;
2058	}
2059
2060	if (name_len == 2 && !strncmp("..", name, 2) &&
2061	    pos == OCFS2_DIR_REC_LEN(1)) {
2062		p->seen_dot_dot = 1;
2063
2064		if (p->dx_dir && p->seen_dot)
2065			return 1;
2066
2067		return 0;
2068	}
2069
2070	p->seen_other = 1;
2071	return 1;
2072}
2073
2074static int ocfs2_empty_dir_dx(struct inode *inode,
2075			      struct ocfs2_empty_dir_priv *priv)
2076{
2077	int ret;
2078	struct buffer_head *di_bh = NULL;
2079	struct buffer_head *dx_root_bh = NULL;
2080	struct ocfs2_dinode *di;
2081	struct ocfs2_dx_root_block *dx_root;
2082
2083	priv->dx_dir = 1;
2084
2085	ret = ocfs2_read_inode_block(inode, &di_bh);
2086	if (ret) {
2087		mlog_errno(ret);
2088		goto out;
2089	}
2090	di = (struct ocfs2_dinode *)di_bh->b_data;
2091
2092	ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
2093	if (ret) {
2094		mlog_errno(ret);
2095		goto out;
2096	}
2097	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2098
2099	if (le32_to_cpu(dx_root->dr_num_entries) != 2)
2100		priv->seen_other = 1;
2101
2102out:
2103	brelse(di_bh);
2104	brelse(dx_root_bh);
2105	return ret;
2106}
2107
2108/*
2109 * routine to check that the specified directory is empty (for rmdir)
2110 *
2111 * Returns 1 if dir is empty, zero otherwise.
2112 *
2113 * XXX: This is a performance problem for unindexed directories.
2114 */
2115int ocfs2_empty_dir(struct inode *inode)
2116{
2117	int ret;
2118	struct ocfs2_empty_dir_priv priv = {
2119		.ctx.actor = ocfs2_empty_dir_filldir,
2120	};
2121
2122	if (ocfs2_dir_indexed(inode)) {
2123		ret = ocfs2_empty_dir_dx(inode, &priv);
2124		if (ret)
2125			mlog_errno(ret);
2126		/*
2127		 * We still run ocfs2_dir_foreach to get the checks
2128		 * for "." and "..".
2129		 */
2130	}
2131
2132	ret = ocfs2_dir_foreach(inode, &priv.ctx);
2133	if (ret)
2134		mlog_errno(ret);
2135
2136	if (!priv.seen_dot || !priv.seen_dot_dot) {
2137		mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
2138		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
2139		/*
2140		 * XXX: Is it really safe to allow an unlink to continue?
2141		 */
2142		return 1;
2143	}
2144
2145	return !priv.seen_other;
2146}
2147
2148/*
2149 * Fills "." and ".." dirents in a new directory block. Returns dirent for
2150 * "..", which might be used during creation of a directory with a trailing
2151 * header. It is otherwise safe to ignore the return code.
2152 */
2153static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
2154							  struct inode *parent,
2155							  char *start,
2156							  unsigned int size)
2157{
2158	struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
2159
2160	de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
2161	de->name_len = 1;
2162	de->rec_len =
2163		cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
2164	strcpy(de->name, ".");
2165	ocfs2_set_de_type(de, S_IFDIR);
2166
2167	de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
2168	de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
2169	de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
2170	de->name_len = 2;
2171	strcpy(de->name, "..");
2172	ocfs2_set_de_type(de, S_IFDIR);
2173
2174	return de;
2175}
2176
2177/*
2178 * This works together with code in ocfs2_mknod_locked() which sets
2179 * the inline-data flag and initializes the inline-data section.
2180 */
2181static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
2182				 handle_t *handle,
2183				 struct inode *parent,
2184				 struct inode *inode,
2185				 struct buffer_head *di_bh)
2186{
2187	int ret;
2188	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2189	struct ocfs2_inline_data *data = &di->id2.i_data;
2190	unsigned int size = le16_to_cpu(data->id_count);
2191
2192	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2193				      OCFS2_JOURNAL_ACCESS_WRITE);
2194	if (ret) {
2195		mlog_errno(ret);
2196		goto out;
2197	}
2198
2199	ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
2200	ocfs2_journal_dirty(handle, di_bh);
2201
2202	i_size_write(inode, size);
2203	set_nlink(inode, 2);
2204	inode->i_blocks = ocfs2_inode_sector_count(inode);
2205
2206	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2207	if (ret < 0)
2208		mlog_errno(ret);
2209
2210out:
2211	return ret;
2212}
2213
2214static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2215				 handle_t *handle,
2216				 struct inode *parent,
2217				 struct inode *inode,
2218				 struct buffer_head *fe_bh,
2219				 struct ocfs2_alloc_context *data_ac,
2220				 struct buffer_head **ret_new_bh)
2221{
2222	int status;
2223	unsigned int size = osb->sb->s_blocksize;
2224	struct buffer_head *new_bh = NULL;
2225	struct ocfs2_dir_entry *de;
2226
2227	if (ocfs2_new_dir_wants_trailer(inode))
2228		size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2229
2230	status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
2231				     data_ac, NULL, &new_bh);
2232	if (status < 0) {
2233		mlog_errno(status);
2234		goto bail;
2235	}
2236
2237	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2238
2239	status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
2240					 OCFS2_JOURNAL_ACCESS_CREATE);
2241	if (status < 0) {
2242		mlog_errno(status);
2243		goto bail;
2244	}
2245	memset(new_bh->b_data, 0, osb->sb->s_blocksize);
2246
2247	de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
2248	if (ocfs2_new_dir_wants_trailer(inode)) {
2249		int size = le16_to_cpu(de->rec_len);
2250
2251		/*
2252		 * Figure out the size of the hole left over after
2253		 * insertion of '.' and '..'. The trailer wants this
2254		 * information.
2255		 */
2256		size -= OCFS2_DIR_REC_LEN(2);
2257		size -= sizeof(struct ocfs2_dir_block_trailer);
2258
2259		ocfs2_init_dir_trailer(inode, new_bh, size);
2260	}
2261
2262	ocfs2_journal_dirty(handle, new_bh);
2263
2264	i_size_write(inode, inode->i_sb->s_blocksize);
2265	set_nlink(inode, 2);
2266	inode->i_blocks = ocfs2_inode_sector_count(inode);
2267	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
2268	if (status < 0) {
2269		mlog_errno(status);
2270		goto bail;
2271	}
2272
2273	status = 0;
2274	if (ret_new_bh) {
2275		*ret_new_bh = new_bh;
2276		new_bh = NULL;
2277	}
2278bail:
2279	brelse(new_bh);
2280
2281	return status;
2282}
2283
2284static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2285				     handle_t *handle, struct inode *dir,
2286				     struct buffer_head *di_bh,
2287				     struct buffer_head *dirdata_bh,
2288				     struct ocfs2_alloc_context *meta_ac,
2289				     int dx_inline, u32 num_entries,
2290				     struct buffer_head **ret_dx_root_bh)
2291{
2292	int ret;
2293	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2294	u16 dr_suballoc_bit;
2295	u64 suballoc_loc, dr_blkno;
2296	unsigned int num_bits;
2297	struct buffer_head *dx_root_bh = NULL;
2298	struct ocfs2_dx_root_block *dx_root;
2299	struct ocfs2_dir_block_trailer *trailer =
2300		ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
2301
2302	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
2303				   &dr_suballoc_bit, &num_bits, &dr_blkno);
2304	if (ret) {
2305		mlog_errno(ret);
2306		goto out;
2307	}
2308
2309	trace_ocfs2_dx_dir_attach_index(
2310				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2311				(unsigned long long)dr_blkno);
2312
2313	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2314	if (dx_root_bh == NULL) {
2315		ret = -ENOMEM;
2316		goto out;
2317	}
2318	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
2319
2320	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
2321				      OCFS2_JOURNAL_ACCESS_CREATE);
2322	if (ret < 0) {
2323		mlog_errno(ret);
2324		goto out;
2325	}
2326
2327	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2328	memset(dx_root, 0, osb->sb->s_blocksize);
2329	strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2330	dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2331	dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
2332	dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2333	dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2334	dx_root->dr_blkno = cpu_to_le64(dr_blkno);
2335	dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
2336	dx_root->dr_num_entries = cpu_to_le32(num_entries);
2337	if (le16_to_cpu(trailer->db_free_rec_len))
2338		dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
2339	else
2340		dx_root->dr_free_blk = cpu_to_le64(0);
2341
2342	if (dx_inline) {
2343		dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
2344		dx_root->dr_entries.de_count =
2345			cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
2346	} else {
2347		dx_root->dr_list.l_count =
2348			cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
2349	}
2350	ocfs2_journal_dirty(handle, dx_root_bh);
2351
2352	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2353				      OCFS2_JOURNAL_ACCESS_CREATE);
2354	if (ret) {
2355		mlog_errno(ret);
2356		goto out;
2357	}
2358
2359	di->i_dx_root = cpu_to_le64(dr_blkno);
2360
2361	spin_lock(&OCFS2_I(dir)->ip_lock);
2362	OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2363	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2364	spin_unlock(&OCFS2_I(dir)->ip_lock);
2365
2366	ocfs2_journal_dirty(handle, di_bh);
2367
2368	*ret_dx_root_bh = dx_root_bh;
2369	dx_root_bh = NULL;
2370
2371out:
2372	brelse(dx_root_bh);
2373	return ret;
2374}
2375
2376static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2377				       handle_t *handle, struct inode *dir,
2378				       struct buffer_head **dx_leaves,
2379				       int num_dx_leaves, u64 start_blk)
2380{
2381	int ret, i;
2382	struct ocfs2_dx_leaf *dx_leaf;
2383	struct buffer_head *bh;
2384
2385	for (i = 0; i < num_dx_leaves; i++) {
2386		bh = sb_getblk(osb->sb, start_blk + i);
2387		if (bh == NULL) {
2388			ret = -ENOMEM;
2389			goto out;
2390		}
2391		dx_leaves[i] = bh;
2392
2393		ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
2394
2395		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
2396					      OCFS2_JOURNAL_ACCESS_CREATE);
2397		if (ret < 0) {
2398			mlog_errno(ret);
2399			goto out;
2400		}
2401
2402		dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
2403
2404		memset(dx_leaf, 0, osb->sb->s_blocksize);
2405		strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
2406		dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
2407		dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
2408		dx_leaf->dl_list.de_count =
2409			cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2410
2411		trace_ocfs2_dx_dir_format_cluster(
2412				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2413				(unsigned long long)bh->b_blocknr,
2414				le16_to_cpu(dx_leaf->dl_list.de_count));
2415
2416		ocfs2_journal_dirty(handle, bh);
2417	}
2418
2419	ret = 0;
2420out:
2421	return ret;
2422}
2423
2424/*
2425 * Allocates and formats a new cluster for use in an indexed dir
2426 * leaf. This version will not do the extent insert, so that it can be
2427 * used by operations which need careful ordering.
2428 */
2429static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
2430				      u32 cpos, handle_t *handle,
2431				      struct ocfs2_alloc_context *data_ac,
2432				      struct buffer_head **dx_leaves,
2433				      int num_dx_leaves, u64 *ret_phys_blkno)
2434{
2435	int ret;
2436	u32 phys, num;
2437	u64 phys_blkno;
2438	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2439
2440	/*
2441	 * XXX: For create, this should claim cluster for the index
2442	 * *before* the unindexed insert so that we have a better
2443	 * chance of contiguousness as the directory grows in number
2444	 * of entries.
2445	 */
2446	ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
2447	if (ret) {
2448		mlog_errno(ret);
2449		goto out;
2450	}
2451
2452	/*
2453	 * Format the new cluster first. That way, we're inserting
2454	 * valid data.
2455	 */
2456	phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
2457	ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
2458					  num_dx_leaves, phys_blkno);
2459	if (ret) {
2460		mlog_errno(ret);
2461		goto out;
2462	}
2463
2464	*ret_phys_blkno = phys_blkno;
2465out:
2466	return ret;
2467}
2468
2469static int ocfs2_dx_dir_new_cluster(struct inode *dir,
2470				    struct ocfs2_extent_tree *et,
2471				    u32 cpos, handle_t *handle,
2472				    struct ocfs2_alloc_context *data_ac,
2473				    struct ocfs2_alloc_context *meta_ac,
2474				    struct buffer_head **dx_leaves,
2475				    int num_dx_leaves)
2476{
2477	int ret;
2478	u64 phys_blkno;
2479
2480	ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
2481					 num_dx_leaves, &phys_blkno);
2482	if (ret) {
2483		mlog_errno(ret);
2484		goto out;
2485	}
2486
2487	ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
2488				  meta_ac);
2489	if (ret)
2490		mlog_errno(ret);
2491out:
2492	return ret;
2493}
2494
2495static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
2496							int *ret_num_leaves)
2497{
2498	int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
2499	struct buffer_head **dx_leaves;
2500
2501	dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
2502			    GFP_NOFS);
2503	if (dx_leaves && ret_num_leaves)
2504		*ret_num_leaves = num_dx_leaves;
2505
2506	return dx_leaves;
2507}
2508
2509static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
2510				 handle_t *handle,
2511				 struct inode *parent,
2512				 struct inode *inode,
2513				 struct buffer_head *di_bh,
2514				 struct ocfs2_alloc_context *data_ac,
2515				 struct ocfs2_alloc_context *meta_ac)
2516{
2517	int ret;
2518	struct buffer_head *leaf_bh = NULL;
2519	struct buffer_head *dx_root_bh = NULL;
2520	struct ocfs2_dx_hinfo hinfo;
2521	struct ocfs2_dx_root_block *dx_root;
2522	struct ocfs2_dx_entry_list *entry_list;
2523
2524	/*
2525	 * Our strategy is to create the directory as though it were
2526	 * unindexed, then add the index block. This works with very
2527	 * little complication since the state of a new directory is a
2528	 * very well known quantity.
2529	 *
2530	 * Essentially, we have two dirents ("." and ".."), in the 1st
2531	 * block which need indexing. These are easily inserted into
2532	 * the index block.
2533	 */
2534
2535	ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
2536				    data_ac, &leaf_bh);
2537	if (ret) {
2538		mlog_errno(ret);
2539		goto out;
2540	}
2541
2542	ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
2543					meta_ac, 1, 2, &dx_root_bh);
2544	if (ret) {
2545		mlog_errno(ret);
2546		goto out;
2547	}
2548	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2549	entry_list = &dx_root->dr_entries;
2550
2551	/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
2552	ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
2553	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2554
2555	ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
2556	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2557
2558out:
2559	brelse(dx_root_bh);
2560	brelse(leaf_bh);
2561	return ret;
2562}
2563
2564int ocfs2_fill_new_dir(struct ocfs2_super *osb,
2565		       handle_t *handle,
2566		       struct inode *parent,
2567		       struct inode *inode,
2568		       struct buffer_head *fe_bh,
2569		       struct ocfs2_alloc_context *data_ac,
2570		       struct ocfs2_alloc_context *meta_ac)
2571
2572{
2573	BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
2574
2575	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2576		return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
2577
2578	if (ocfs2_supports_indexed_dirs(osb))
2579		return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
2580					     data_ac, meta_ac);
2581
2582	return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
2583				     data_ac, NULL);
2584}
2585
2586static int ocfs2_dx_dir_index_block(struct inode *dir,
2587				    handle_t *handle,
2588				    struct buffer_head **dx_leaves,
2589				    int num_dx_leaves,
2590				    u32 *num_dx_entries,
2591				    struct buffer_head *dirent_bh)
2592{
2593	int ret = 0, namelen, i;
2594	char *de_buf, *limit;
2595	struct ocfs2_dir_entry *de;
2596	struct buffer_head *dx_leaf_bh;
2597	struct ocfs2_dx_hinfo hinfo;
2598	u64 dirent_blk = dirent_bh->b_blocknr;
2599
2600	de_buf = dirent_bh->b_data;
2601	limit = de_buf + dir->i_sb->s_blocksize;
2602
2603	while (de_buf < limit) {
2604		de = (struct ocfs2_dir_entry *)de_buf;
2605
2606		namelen = de->name_len;
2607		if (!namelen || !de->inode)
2608			goto inc;
2609
2610		ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
2611
2612		i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
2613		dx_leaf_bh = dx_leaves[i];
2614
2615		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
2616						 dirent_blk, dx_leaf_bh);
2617		if (ret) {
2618			mlog_errno(ret);
2619			goto out;
2620		}
2621
2622		*num_dx_entries = *num_dx_entries + 1;
2623
2624inc:
2625		de_buf += le16_to_cpu(de->rec_len);
2626	}
2627
2628out:
2629	return ret;
2630}
2631
2632/*
2633 * XXX: This expects dx_root_bh to already be part of the transaction.
2634 */
2635static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2636					 struct buffer_head *dx_root_bh,
2637					 struct buffer_head *dirent_bh)
2638{
2639	char *de_buf, *limit;
2640	struct ocfs2_dx_root_block *dx_root;
2641	struct ocfs2_dir_entry *de;
2642	struct ocfs2_dx_hinfo hinfo;
2643	u64 dirent_blk = dirent_bh->b_blocknr;
2644
2645	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2646
2647	de_buf = dirent_bh->b_data;
2648	limit = de_buf + dir->i_sb->s_blocksize;
2649
2650	while (de_buf < limit) {
2651		de = (struct ocfs2_dir_entry *)de_buf;
2652
2653		if (!de->name_len || !de->inode)
2654			goto inc;
2655
2656		ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2657
2658		trace_ocfs2_dx_dir_index_root_block(
2659				(unsigned long long)dir->i_ino,
2660				hinfo.major_hash, hinfo.minor_hash,
2661				de->name_len, de->name,
2662				le16_to_cpu(dx_root->dr_entries.de_num_used));
2663
2664		ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2665					   dirent_blk);
2666
2667		le32_add_cpu(&dx_root->dr_num_entries, 1);
2668inc:
2669		de_buf += le16_to_cpu(de->rec_len);
2670	}
2671}
2672
2673/*
2674 * Count the number of inline directory entries in di_bh and compare
2675 * them against the number of entries we can hold in an inline dx root
2676 * block.
2677 */
2678static int ocfs2_new_dx_should_be_inline(struct inode *dir,
2679					 struct buffer_head *di_bh)
2680{
2681	int dirent_count = 0;
2682	char *de_buf, *limit;
2683	struct ocfs2_dir_entry *de;
2684	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2685
2686	de_buf = di->id2.i_data.id_data;
2687	limit = de_buf + i_size_read(dir);
2688
2689	while (de_buf < limit) {
2690		de = (struct ocfs2_dir_entry *)de_buf;
2691
2692		if (de->name_len && de->inode)
2693			dirent_count++;
2694
2695		de_buf += le16_to_cpu(de->rec_len);
2696	}
2697
2698	/* We are careful to leave room for one extra record. */
2699	return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
2700}
2701
2702/*
2703 * Expand rec_len of the rightmost dirent in a directory block so that it
2704 * contains the end of our valid space for dirents. We do this during
2705 * expansion from an inline directory to one with extents. The first dir block
2706 * in that case is taken from the inline data portion of the inode block.
2707 *
2708 * This will also return the largest amount of contiguous space for a dirent
2709 * in the block. That value is *not* necessarily the last dirent, even after
2710 * expansion. The directory indexing code wants this value for free space
2711 * accounting. We do this here since we're already walking the entire dir
2712 * block.
2713 *
2714 * We add the dir trailer if this filesystem wants it.
2715 */
2716static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
2717					     struct inode *dir)
2718{
2719	struct super_block *sb = dir->i_sb;
2720	struct ocfs2_dir_entry *de;
2721	struct ocfs2_dir_entry *prev_de;
2722	char *de_buf, *limit;
2723	unsigned int new_size = sb->s_blocksize;
2724	unsigned int bytes, this_hole;
2725	unsigned int largest_hole = 0;
2726
2727	if (ocfs2_new_dir_wants_trailer(dir))
2728		new_size = ocfs2_dir_trailer_blk_off(sb);
2729
2730	bytes = new_size - old_size;
2731
2732	limit = start + old_size;
2733	de_buf = start;
2734	de = (struct ocfs2_dir_entry *)de_buf;
2735	do {
2736		this_hole = ocfs2_figure_dirent_hole(de);
2737		if (this_hole > largest_hole)
2738			largest_hole = this_hole;
2739
2740		prev_de = de;
2741		de_buf += le16_to_cpu(de->rec_len);
2742		de = (struct ocfs2_dir_entry *)de_buf;
2743	} while (de_buf < limit);
2744
2745	le16_add_cpu(&prev_de->rec_len, bytes);
2746
2747	/* We need to double check this after modification of the final
2748	 * dirent. */
2749	this_hole = ocfs2_figure_dirent_hole(prev_de);
2750	if (this_hole > largest_hole)
2751		largest_hole = this_hole;
2752
2753	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
2754		return largest_hole;
2755	return 0;
2756}
2757
2758/*
2759 * We allocate enough clusters to fulfill "blocks_wanted", but set
2760 * i_size to exactly one block. Ocfs2_extend_dir() will handle the
2761 * rest automatically for us.
2762 *
2763 * *first_block_bh is a pointer to the 1st data block allocated to the
2764 *  directory.
2765 */
2766static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2767				   unsigned int blocks_wanted,
2768				   struct ocfs2_dir_lookup_result *lookup,
2769				   struct buffer_head **first_block_bh)
2770{
2771	u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
2772	struct super_block *sb = dir->i_sb;
2773	int ret, i, num_dx_leaves = 0, dx_inline = 0,
2774		credits = ocfs2_inline_to_extents_credits(sb);
2775	u64 dx_insert_blkno, blkno,
2776		bytes = blocks_wanted << sb->s_blocksize_bits;
2777	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2778	struct ocfs2_inode_info *oi = OCFS2_I(dir);
2779	struct ocfs2_alloc_context *data_ac = NULL;
2780	struct ocfs2_alloc_context *meta_ac = NULL;
2781	struct buffer_head *dirdata_bh = NULL;
2782	struct buffer_head *dx_root_bh = NULL;
2783	struct buffer_head **dx_leaves = NULL;
2784	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2785	handle_t *handle;
2786	struct ocfs2_extent_tree et;
2787	struct ocfs2_extent_tree dx_et;
2788	int did_quota = 0, bytes_allocated = 0;
2789
2790	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
2791
2792	alloc = ocfs2_clusters_for_bytes(sb, bytes);
2793	dx_alloc = 0;
2794
2795	down_write(&oi->ip_alloc_sem);
2796
2797	if (ocfs2_supports_indexed_dirs(osb)) {
2798		credits += ocfs2_add_dir_index_credits(sb);
2799
2800		dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
2801		if (!dx_inline) {
2802			/* Add one more cluster for an index leaf */
2803			dx_alloc++;
2804			dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
2805								&num_dx_leaves);
2806			if (!dx_leaves) {
2807				ret = -ENOMEM;
2808				mlog_errno(ret);
2809				goto out;
2810			}
2811		}
2812
2813		/* This gets us the dx_root */
2814		ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
2815		if (ret) {
2816			mlog_errno(ret);
2817			goto out;
2818		}
2819	}
2820
2821	/*
2822	 * We should never need more than 2 clusters for the unindexed
2823	 * tree - maximum dirent size is far less than one block. In
2824	 * fact, the only time we'd need more than one cluster is if
2825	 * blocksize == clustersize and the dirent won't fit in the
2826	 * extra space that the expansion to a single block gives. As
2827	 * of today, that only happens on 4k/4k file systems.
2828	 */
2829	BUG_ON(alloc > 2);
2830
2831	ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
2832	if (ret) {
2833		mlog_errno(ret);
2834		goto out;
2835	}
2836
2837	/*
2838	 * Prepare for worst case allocation scenario of two separate
2839	 * extents in the unindexed tree.
2840	 */
2841	if (alloc == 2)
2842		credits += OCFS2_SUBALLOC_ALLOC;
2843
2844	handle = ocfs2_start_trans(osb, credits);
2845	if (IS_ERR(handle)) {
2846		ret = PTR_ERR(handle);
2847		mlog_errno(ret);
2848		goto out;
2849	}
2850
2851	ret = dquot_alloc_space_nodirty(dir,
2852		ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
2853	if (ret)
2854		goto out_commit;
2855	did_quota = 1;
2856
2857	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2858		/*
2859		 * Allocate our index cluster first, to maximize the
2860		 * possibility that unindexed leaves grow
2861		 * contiguously.
2862		 */
2863		ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
2864						 dx_leaves, num_dx_leaves,
2865						 &dx_insert_blkno);
2866		if (ret) {
2867			mlog_errno(ret);
2868			goto out_commit;
2869		}
2870		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2871	}
2872
2873	/*
2874	 * Try to claim as many clusters as the bitmap can give though
2875	 * if we only get one now, that's enough to continue. The rest
2876	 * will be claimed after the conversion to extents.
2877	 */
2878	if (ocfs2_dir_resv_allowed(osb))
2879		data_ac->ac_resv = &oi->ip_la_data_resv;
2880	ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
2881	if (ret) {
2882		mlog_errno(ret);
2883		goto out_commit;
2884	}
2885	bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2886
2887	/*
2888	 * Operations are carefully ordered so that we set up the new
2889	 * data block first. The conversion from inline data to
2890	 * extents follows.
2891	 */
2892	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2893	dirdata_bh = sb_getblk(sb, blkno);
2894	if (!dirdata_bh) {
2895		ret = -ENOMEM;
2896		mlog_errno(ret);
2897		goto out_commit;
2898	}
2899
2900	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
2901
2902	ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
2903				      OCFS2_JOURNAL_ACCESS_CREATE);
2904	if (ret) {
2905		mlog_errno(ret);
2906		goto out_commit;
2907	}
2908
2909	memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
2910	memset(dirdata_bh->b_data + i_size_read(dir), 0,
2911	       sb->s_blocksize - i_size_read(dir));
2912	i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
2913	if (ocfs2_new_dir_wants_trailer(dir)) {
2914		/*
2915		 * Prepare the dir trailer up front. It will otherwise look
2916		 * like a valid dirent. Even if inserting the index fails
2917		 * (unlikely), then all we'll have done is given first dir
2918		 * block a small amount of fragmentation.
2919		 */
2920		ocfs2_init_dir_trailer(dir, dirdata_bh, i);
2921	}
2922
2923	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2924	ocfs2_journal_dirty(handle, dirdata_bh);
2925
2926	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2927		/*
2928		 * Dx dirs with an external cluster need to do this up
2929		 * front. Inline dx root's get handled later, after
2930		 * we've allocated our root block. We get passed back
2931		 * a total number of items so that dr_num_entries can
2932		 * be correctly set once the dx_root has been
2933		 * allocated.
2934		 */
2935		ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
2936					       num_dx_leaves, &num_dx_entries,
2937					       dirdata_bh);
2938		if (ret) {
2939			mlog_errno(ret);
2940			goto out_commit;
2941		}
2942	}
2943
2944	/*
2945	 * Set extent, i_size, etc on the directory. After this, the
2946	 * inode should contain the same exact dirents as before and
2947	 * be fully accessible from system calls.
2948	 *
2949	 * We let the later dirent insert modify c/mtime - to the user
2950	 * the data hasn't changed.
2951	 */
2952	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2953				      OCFS2_JOURNAL_ACCESS_CREATE);
2954	if (ret) {
2955		mlog_errno(ret);
2956		goto out_commit;
2957	}
2958
2959	spin_lock(&oi->ip_lock);
2960	oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
2961	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2962	spin_unlock(&oi->ip_lock);
2963
2964	ocfs2_dinode_new_extent_list(dir, di);
2965
2966	i_size_write(dir, sb->s_blocksize);
2967	dir->i_mtime = dir->i_ctime = current_time(dir);
2968
2969	di->i_size = cpu_to_le64(sb->s_blocksize);
2970	di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
2971	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
2972	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2973
2974	/*
2975	 * This should never fail as our extent list is empty and all
2976	 * related blocks have been journaled already.
2977	 */
2978	ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
2979				  0, NULL);
2980	if (ret) {
2981		mlog_errno(ret);
2982		goto out_commit;
2983	}
2984
2985	/*
2986	 * Set i_blocks after the extent insert for the most up to
2987	 * date ip_clusters value.
2988	 */
2989	dir->i_blocks = ocfs2_inode_sector_count(dir);
2990
2991	ocfs2_journal_dirty(handle, di_bh);
2992
2993	if (ocfs2_supports_indexed_dirs(osb)) {
2994		ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
2995						dirdata_bh, meta_ac, dx_inline,
2996						num_dx_entries, &dx_root_bh);
2997		if (ret) {
2998			mlog_errno(ret);
2999			goto out_commit;
3000		}
3001
3002		if (dx_inline) {
3003			ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
3004						      dirdata_bh);
3005		} else {
3006			ocfs2_init_dx_root_extent_tree(&dx_et,
3007						       INODE_CACHE(dir),
3008						       dx_root_bh);
3009			ret = ocfs2_insert_extent(handle, &dx_et, 0,
3010						  dx_insert_blkno, 1, 0, NULL);
3011			if (ret)
3012				mlog_errno(ret);
3013		}
3014	}
3015
3016	/*
3017	 * We asked for two clusters, but only got one in the 1st
3018	 * pass. Claim the 2nd cluster as a separate extent.
3019	 */
3020	if (alloc > len) {
3021		ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
3022					   &len);
3023		if (ret) {
3024			mlog_errno(ret);
3025			goto out_commit;
3026		}
3027		blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
3028
3029		ret = ocfs2_insert_extent(handle, &et, 1,
3030					  blkno, len, 0, NULL);
3031		if (ret) {
3032			mlog_errno(ret);
3033			goto out_commit;
3034		}
3035		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
3036	}
3037
3038	*first_block_bh = dirdata_bh;
3039	dirdata_bh = NULL;
3040	if (ocfs2_supports_indexed_dirs(osb)) {
3041		unsigned int off;
3042
3043		if (!dx_inline) {
3044			/*
3045			 * We need to return the correct block within the
3046			 * cluster which should hold our entry.
3047			 */
3048			off = ocfs2_dx_dir_hash_idx(osb,
3049						    &lookup->dl_hinfo);
3050			get_bh(dx_leaves[off]);
3051			lookup->dl_dx_leaf_bh = dx_leaves[off];
3052		}
3053		lookup->dl_dx_root_bh = dx_root_bh;
3054		dx_root_bh = NULL;
3055	}
3056
3057out_commit:
3058	if (ret < 0 && did_quota)
3059		dquot_free_space_nodirty(dir, bytes_allocated);
3060
3061	ocfs2_commit_trans(osb, handle);
3062
3063out:
3064	up_write(&oi->ip_alloc_sem);
3065	if (data_ac)
3066		ocfs2_free_alloc_context(data_ac);
3067	if (meta_ac)
3068		ocfs2_free_alloc_context(meta_ac);
3069
3070	if (dx_leaves) {
3071		for (i = 0; i < num_dx_leaves; i++)
3072			brelse(dx_leaves[i]);
3073		kfree(dx_leaves);
3074	}
3075
3076	brelse(dirdata_bh);
3077	brelse(dx_root_bh);
3078
3079	return ret;
3080}
3081
3082/* returns a bh of the 1st new block in the allocation. */
3083static int ocfs2_do_extend_dir(struct super_block *sb,
3084			       handle_t *handle,
3085			       struct inode *dir,
3086			       struct buffer_head *parent_fe_bh,
3087			       struct ocfs2_alloc_context *data_ac,
3088			       struct ocfs2_alloc_context *meta_ac,
3089			       struct buffer_head **new_bh)
3090{
3091	int status;
3092	int extend, did_quota = 0;
3093	u64 p_blkno, v_blkno;
3094
3095	spin_lock(&OCFS2_I(dir)->ip_lock);
3096	extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
3097	spin_unlock(&OCFS2_I(dir)->ip_lock);
3098
3099	if (extend) {
3100		u32 offset = OCFS2_I(dir)->ip_clusters;
3101
3102		status = dquot_alloc_space_nodirty(dir,
3103					ocfs2_clusters_to_bytes(sb, 1));
3104		if (status)
3105			goto bail;
3106		did_quota = 1;
3107
3108		status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
3109					      1, 0, parent_fe_bh, handle,
3110					      data_ac, meta_ac, NULL);
3111		BUG_ON(status == -EAGAIN);
3112		if (status < 0) {
3113			mlog_errno(status);
3114			goto bail;
3115		}
3116	}
3117
3118	v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
3119	status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
3120	if (status < 0) {
3121		mlog_errno(status);
3122		goto bail;
3123	}
3124
3125	*new_bh = sb_getblk(sb, p_blkno);
3126	if (!*new_bh) {
3127		status = -ENOMEM;
3128		mlog_errno(status);
3129		goto bail;
3130	}
3131	status = 0;
3132bail:
3133	if (did_quota && status < 0)
3134		dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3135	return status;
3136}
3137
3138/*
3139 * Assumes you already have a cluster lock on the directory.
3140 *
3141 * 'blocks_wanted' is only used if we have an inline directory which
3142 * is to be turned into an extent based one. The size of the dirent to
3143 * insert might be larger than the space gained by growing to just one
3144 * block, so we may have to grow the inode by two blocks in that case.
3145 *
3146 * If the directory is already indexed, dx_root_bh must be provided.
3147 */
3148static int ocfs2_extend_dir(struct ocfs2_super *osb,
3149			    struct inode *dir,
3150			    struct buffer_head *parent_fe_bh,
3151			    unsigned int blocks_wanted,
3152			    struct ocfs2_dir_lookup_result *lookup,
3153			    struct buffer_head **new_de_bh)
3154{
3155	int status = 0;
3156	int credits, num_free_extents, drop_alloc_sem = 0;
3157	loff_t dir_i_size;
3158	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
3159	struct ocfs2_extent_list *el = &fe->id2.i_list;
3160	struct ocfs2_alloc_context *data_ac = NULL;
3161	struct ocfs2_alloc_context *meta_ac = NULL;
3162	handle_t *handle = NULL;
3163	struct buffer_head *new_bh = NULL;
3164	struct ocfs2_dir_entry * de;
3165	struct super_block *sb = osb->sb;
3166	struct ocfs2_extent_tree et;
3167	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3168
3169	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3170		/*
3171		 * This would be a code error as an inline directory should
3172		 * never have an index root.
3173		 */
3174		BUG_ON(dx_root_bh);
3175
3176		status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
3177						 blocks_wanted, lookup,
3178						 &new_bh);
3179		if (status) {
3180			mlog_errno(status);
3181			goto bail;
3182		}
3183
3184		/* Expansion from inline to an indexed directory will
3185		 * have given us this. */
3186		dx_root_bh = lookup->dl_dx_root_bh;
3187
3188		if (blocks_wanted == 1) {
3189			/*
3190			 * If the new dirent will fit inside the space
3191			 * created by pushing out to one block, then
3192			 * we can complete the operation
3193			 * here. Otherwise we have to expand i_size
3194			 * and format the 2nd block below.
3195			 */
3196			BUG_ON(new_bh == NULL);
3197			goto bail_bh;
3198		}
3199
3200		/*
3201		 * Get rid of 'new_bh' - we want to format the 2nd
3202		 * data block and return that instead.
3203		 */
3204		brelse(new_bh);
3205		new_bh = NULL;
3206
3207		down_write(&OCFS2_I(dir)->ip_alloc_sem);
3208		drop_alloc_sem = 1;
3209		dir_i_size = i_size_read(dir);
3210		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3211		goto do_extend;
3212	}
3213
3214	down_write(&OCFS2_I(dir)->ip_alloc_sem);
3215	drop_alloc_sem = 1;
3216	dir_i_size = i_size_read(dir);
3217	trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3218			       dir_i_size);
3219
3220	/* dir->i_size is always block aligned. */
3221	spin_lock(&OCFS2_I(dir)->ip_lock);
3222	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
3223		spin_unlock(&OCFS2_I(dir)->ip_lock);
3224		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
3225					      parent_fe_bh);
3226		num_free_extents = ocfs2_num_free_extents(&et);
3227		if (num_free_extents < 0) {
3228			status = num_free_extents;
3229			mlog_errno(status);
3230			goto bail;
3231		}
3232
3233		if (!num_free_extents) {
3234			status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
3235			if (status < 0) {
3236				if (status != -ENOSPC)
3237					mlog_errno(status);
3238				goto bail;
3239			}
3240		}
3241
3242		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
3243		if (status < 0) {
3244			if (status != -ENOSPC)
3245				mlog_errno(status);
3246			goto bail;
3247		}
3248
3249		if (ocfs2_dir_resv_allowed(osb))
3250			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3251
3252		credits = ocfs2_calc_extend_credits(sb, el);
3253	} else {
3254		spin_unlock(&OCFS2_I(dir)->ip_lock);
3255		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3256	}
3257
3258do_extend:
3259	if (ocfs2_dir_indexed(dir))
3260		credits++; /* For attaching the new dirent block to the
3261			    * dx_root */
3262
3263	handle = ocfs2_start_trans(osb, credits);
3264	if (IS_ERR(handle)) {
3265		status = PTR_ERR(handle);
3266		handle = NULL;
3267		mlog_errno(status);
3268		goto bail;
3269	}
3270
3271	status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
3272				     data_ac, meta_ac, &new_bh);
3273	if (status < 0) {
3274		mlog_errno(status);
3275		goto bail;
3276	}
3277
3278	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
3279
3280	status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
3281					 OCFS2_JOURNAL_ACCESS_CREATE);
3282	if (status < 0) {
3283		mlog_errno(status);
3284		goto bail;
3285	}
3286	memset(new_bh->b_data, 0, sb->s_blocksize);
3287
3288	de = (struct ocfs2_dir_entry *) new_bh->b_data;
3289	de->inode = 0;
3290	if (ocfs2_supports_dir_trailer(dir)) {
3291		de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
3292
3293		ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
3294
3295		if (ocfs2_dir_indexed(dir)) {
3296			status = ocfs2_dx_dir_link_trailer(dir, handle,
3297							   dx_root_bh, new_bh);
3298			if (status) {
3299				mlog_errno(status);
3300				goto bail;
3301			}
3302		}
3303	} else {
3304		de->rec_len = cpu_to_le16(sb->s_blocksize);
3305	}
3306	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3307	ocfs2_journal_dirty(handle, new_bh);
3308
3309	dir_i_size += dir->i_sb->s_blocksize;
3310	i_size_write(dir, dir_i_size);
3311	dir->i_blocks = ocfs2_inode_sector_count(dir);
3312	status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
3313	if (status < 0) {
3314		mlog_errno(status);
3315		goto bail;
3316	}
3317
3318bail_bh:
3319	*new_de_bh = new_bh;
3320	get_bh(*new_de_bh);
3321bail:
3322	if (handle)
3323		ocfs2_commit_trans(osb, handle);
3324	if (drop_alloc_sem)
3325		up_write(&OCFS2_I(dir)->ip_alloc_sem);
3326
3327	if (data_ac)
3328		ocfs2_free_alloc_context(data_ac);
3329	if (meta_ac)
3330		ocfs2_free_alloc_context(meta_ac);
3331
3332	brelse(new_bh);
3333
3334	return status;
3335}
3336
3337static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
3338				   const char *name, int namelen,
3339				   struct buffer_head **ret_de_bh,
3340				   unsigned int *blocks_wanted)
3341{
3342	int ret;
3343	struct super_block *sb = dir->i_sb;
3344	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3345	struct ocfs2_dir_entry *de, *last_de = NULL;
3346	char *de_buf, *limit;
3347	unsigned long offset = 0;
3348	unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize;
3349
3350	/*
3351	 * This calculates how many free bytes we'd have in block zero, should
3352	 * this function force expansion to an extent tree.
3353	 */
3354	if (ocfs2_new_dir_wants_trailer(dir))
3355		free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
3356	else
3357		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
3358
3359	de_buf = di->id2.i_data.id_data;
3360	limit = de_buf + i_size_read(dir);
3361	rec_len = OCFS2_DIR_REC_LEN(namelen);
3362
3363	while (de_buf < limit) {
3364		de = (struct ocfs2_dir_entry *)de_buf;
3365
3366		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
3367			ret = -ENOENT;
3368			goto out;
3369		}
3370		if (ocfs2_match(namelen, name, de)) {
3371			ret = -EEXIST;
3372			goto out;
3373		}
3374		/*
3375		 * No need to check for a trailing dirent record here as
3376		 * they're not used for inline dirs.
3377		 */
3378
3379		if (ocfs2_dirent_would_fit(de, rec_len)) {
3380			/* Ok, we found a spot. Return this bh and let
3381			 * the caller actually fill it in. */
3382			*ret_de_bh = di_bh;
3383			get_bh(*ret_de_bh);
3384			ret = 0;
3385			goto out;
3386		}
3387
3388		last_de = de;
3389		de_buf += le16_to_cpu(de->rec_len);
3390		offset += le16_to_cpu(de->rec_len);
3391	}
3392
3393	/*
3394	 * We're going to require expansion of the directory - figure
3395	 * out how many blocks we'll need so that a place for the
3396	 * dirent can be found.
3397	 */
3398	*blocks_wanted = 1;
3399	new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
3400	if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
3401		*blocks_wanted = 2;
3402
3403	ret = -ENOSPC;
3404out:
3405	return ret;
3406}
3407
3408static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
3409				   int namelen, struct buffer_head **ret_de_bh)
3410{
3411	unsigned long offset;
3412	struct buffer_head *bh = NULL;
3413	unsigned short rec_len;
3414	struct ocfs2_dir_entry *de;
3415	struct super_block *sb = dir->i_sb;
3416	int status;
3417	int blocksize = dir->i_sb->s_blocksize;
3418
3419	status = ocfs2_read_dir_block(dir, 0, &bh, 0);
3420	if (status)
3421		goto bail;
3422
3423	rec_len = OCFS2_DIR_REC_LEN(namelen);
3424	offset = 0;
3425	de = (struct ocfs2_dir_entry *) bh->b_data;
3426	while (1) {
3427		if ((char *)de >= sb->s_blocksize + bh->b_data) {
3428			brelse(bh);
3429			bh = NULL;
3430
3431			if (i_size_read(dir) <= offset) {
3432				/*
3433				 * Caller will have to expand this
3434				 * directory.
3435				 */
3436				status = -ENOSPC;
3437				goto bail;
3438			}
3439			status = ocfs2_read_dir_block(dir,
3440					     offset >> sb->s_blocksize_bits,
3441					     &bh, 0);
3442			if (status)
3443				goto bail;
3444
3445			/* move to next block */
3446			de = (struct ocfs2_dir_entry *) bh->b_data;
3447		}
3448		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
3449			status = -ENOENT;
3450			goto bail;
3451		}
3452		if (ocfs2_match(namelen, name, de)) {
3453			status = -EEXIST;
3454			goto bail;
3455		}
3456
3457		if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
3458					   blocksize))
3459			goto next;
3460
3461		if (ocfs2_dirent_would_fit(de, rec_len)) {
3462			/* Ok, we found a spot. Return this bh and let
3463			 * the caller actually fill it in. */
3464			*ret_de_bh = bh;
3465			get_bh(*ret_de_bh);
3466			status = 0;
3467			goto bail;
3468		}
3469next:
3470		offset += le16_to_cpu(de->rec_len);
3471		de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
3472	}
3473
3474bail:
3475	brelse(bh);
3476	if (status)
3477		mlog_errno(status);
3478
3479	return status;
3480}
3481
3482static int dx_leaf_sort_cmp(const void *a, const void *b)
3483{
3484	const struct ocfs2_dx_entry *entry1 = a;
3485	const struct ocfs2_dx_entry *entry2 = b;
3486	u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
3487	u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
3488	u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
3489	u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
3490
3491	if (major_hash1 > major_hash2)
3492		return 1;
3493	if (major_hash1 < major_hash2)
3494		return -1;
3495
3496	/*
3497	 * It is not strictly necessary to sort by minor
3498	 */
3499	if (minor_hash1 > minor_hash2)
3500		return 1;
3501	if (minor_hash1 < minor_hash2)
3502		return -1;
3503	return 0;
3504}
3505
3506static void dx_leaf_sort_swap(void *a, void *b, int size)
3507{
3508	struct ocfs2_dx_entry *entry1 = a;
3509	struct ocfs2_dx_entry *entry2 = b;
3510
3511	BUG_ON(size != sizeof(*entry1));
3512
3513	swap(*entry1, *entry2);
3514}
3515
3516static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
3517{
3518	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3519	int i, num = le16_to_cpu(dl_list->de_num_used);
3520
3521	for (i = 0; i < (num - 1); i++) {
3522		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
3523		    le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
3524			return 0;
3525	}
3526
3527	return 1;
3528}
3529
3530/*
3531 * Find the optimal value to split this leaf on. This expects the leaf
3532 * entries to be in sorted order.
3533 *
3534 * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
3535 * the hash we want to insert.
3536 *
3537 * This function is only concerned with the major hash - that which
3538 * determines which cluster an item belongs to.
3539 */
3540static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
3541					u32 leaf_cpos, u32 insert_hash,
3542					u32 *split_hash)
3543{
3544	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3545	int i, num_used = le16_to_cpu(dl_list->de_num_used);
3546	int allsame;
3547
3548	/*
3549	 * There's a couple rare, but nasty corner cases we have to
3550	 * check for here. All of them involve a leaf where all value
3551	 * have the same hash, which is what we look for first.
3552	 *
3553	 * Most of the time, all of the above is false, and we simply
3554	 * pick the median value for a split.
3555	 */
3556	allsame = ocfs2_dx_leaf_same_major(dx_leaf);
3557	if (allsame) {
3558		u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
3559
3560		if (val == insert_hash) {
3561			/*
3562			 * No matter where we would choose to split,
3563			 * the new entry would want to occupy the same
3564			 * block as these. Since there's no space left
3565			 * in their existing block, we know there
3566			 * won't be space after the split.
3567			 */
3568			return -ENOSPC;
3569		}
3570
3571		if (val == leaf_cpos) {
3572			/*
3573			 * Because val is the same as leaf_cpos (which
3574			 * is the smallest value this leaf can have),
3575			 * yet is not equal to insert_hash, then we
3576			 * know that insert_hash *must* be larger than
3577			 * val (and leaf_cpos). At least cpos+1 in value.
3578			 *
3579			 * We also know then, that there cannot be an
3580			 * adjacent extent (otherwise we'd be looking
3581			 * at it). Choosing this value gives us a
3582			 * chance to get some contiguousness.
3583			 */
3584			*split_hash = leaf_cpos + 1;
3585			return 0;
3586		}
3587
3588		if (val > insert_hash) {
3589			/*
3590			 * val can not be the same as insert hash, and
3591			 * also must be larger than leaf_cpos. Also,
3592			 * we know that there can't be a leaf between
3593			 * cpos and val, otherwise the entries with
3594			 * hash 'val' would be there.
3595			 */
3596			*split_hash = val;
3597			return 0;
3598		}
3599
3600		*split_hash = insert_hash;
3601		return 0;
3602	}
3603
3604	/*
3605	 * Since the records are sorted and the checks above
3606	 * guaranteed that not all records in this block are the same,
3607	 * we simple travel forward, from the median, and pick the 1st
3608	 * record whose value is larger than leaf_cpos.
3609	 */
3610	for (i = (num_used / 2); i < num_used; i++)
3611		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
3612		    leaf_cpos)
3613			break;
3614
3615	BUG_ON(i == num_used); /* Should be impossible */
3616	*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
3617	return 0;
3618}
3619
3620/*
3621 * Transfer all entries in orig_dx_leaves whose major hash is equal to or
3622 * larger than split_hash into new_dx_leaves. We use a temporary
3623 * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
3624 *
3625 * Since the block offset inside a leaf (cluster) is a constant mask
3626 * of minor_hash, we can optimize - an item at block offset X within
3627 * the original cluster, will be at offset X within the new cluster.
3628 */
3629static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3630				       handle_t *handle,
3631				       struct ocfs2_dx_leaf *tmp_dx_leaf,
3632				       struct buffer_head **orig_dx_leaves,
3633				       struct buffer_head **new_dx_leaves,
3634				       int num_dx_leaves)
3635{
3636	int i, j, num_used;
3637	u32 major_hash;
3638	struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
3639	struct ocfs2_dx_entry_list *orig_list, *tmp_list;
3640	struct ocfs2_dx_entry *dx_entry;
3641
3642	tmp_list = &tmp_dx_leaf->dl_list;
3643
3644	for (i = 0; i < num_dx_leaves; i++) {
3645		orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
3646		orig_list = &orig_dx_leaf->dl_list;
3647		new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
 
3648
3649		num_used = le16_to_cpu(orig_list->de_num_used);
3650
3651		memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
3652		tmp_list->de_num_used = cpu_to_le16(0);
3653		memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
3654
3655		for (j = 0; j < num_used; j++) {
3656			dx_entry = &orig_list->de_entries[j];
3657			major_hash = le32_to_cpu(dx_entry->dx_major_hash);
3658			if (major_hash >= split_hash)
3659				ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
3660							      dx_entry);
3661			else
3662				ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
3663							      dx_entry);
3664		}
3665		memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
3666
3667		ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
3668		ocfs2_journal_dirty(handle, new_dx_leaves[i]);
3669	}
3670}
3671
3672static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3673					  struct ocfs2_dx_root_block *dx_root)
3674{
3675	int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3676
3677	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3678	credits += ocfs2_quota_trans_credits(osb->sb);
3679	return credits;
3680}
3681
3682/*
3683 * Find the median value in dx_leaf_bh and allocate a new leaf to move
3684 * half our entries into.
3685 */
3686static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3687				  struct buffer_head *dx_root_bh,
3688				  struct buffer_head *dx_leaf_bh,
3689				  struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
3690				  u64 leaf_blkno)
3691{
3692	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3693	int credits, ret, i, num_used, did_quota = 0;
3694	u32 cpos, split_hash, insert_hash = hinfo->major_hash;
3695	u64 orig_leaves_start;
3696	int num_dx_leaves;
3697	struct buffer_head **orig_dx_leaves = NULL;
3698	struct buffer_head **new_dx_leaves = NULL;
3699	struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
3700	struct ocfs2_extent_tree et;
3701	handle_t *handle = NULL;
3702	struct ocfs2_dx_root_block *dx_root;
3703	struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3704
3705	trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3706				     (unsigned long long)leaf_blkno,
3707				     insert_hash);
3708
3709	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3710
3711	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3712	/*
3713	 * XXX: This is a rather large limit. We should use a more
3714	 * realistic value.
3715	 */
3716	if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
3717		return -ENOSPC;
3718
3719	num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
3720	if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
3721		mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
3722		     "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
3723		     (unsigned long long)leaf_blkno, num_used);
3724		ret = -EIO;
3725		goto out;
3726	}
3727
3728	orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
3729	if (!orig_dx_leaves) {
3730		ret = -ENOMEM;
3731		mlog_errno(ret);
3732		goto out;
3733	}
3734
3735	new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
3736	if (!new_dx_leaves) {
3737		ret = -ENOMEM;
3738		mlog_errno(ret);
3739		goto out;
3740	}
3741
3742	ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
3743	if (ret) {
3744		if (ret != -ENOSPC)
3745			mlog_errno(ret);
3746		goto out;
3747	}
3748
3749	credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
3750	handle = ocfs2_start_trans(osb, credits);
3751	if (IS_ERR(handle)) {
3752		ret = PTR_ERR(handle);
3753		handle = NULL;
3754		mlog_errno(ret);
3755		goto out;
3756	}
3757
3758	ret = dquot_alloc_space_nodirty(dir,
3759				       ocfs2_clusters_to_bytes(dir->i_sb, 1));
3760	if (ret)
3761		goto out_commit;
3762	did_quota = 1;
3763
3764	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
3765				      OCFS2_JOURNAL_ACCESS_WRITE);
3766	if (ret) {
3767		mlog_errno(ret);
3768		goto out_commit;
3769	}
3770
3771	/*
3772	 * This block is changing anyway, so we can sort it in place.
3773	 */
3774	sort(dx_leaf->dl_list.de_entries, num_used,
3775	     sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
3776	     dx_leaf_sort_swap);
3777
3778	ocfs2_journal_dirty(handle, dx_leaf_bh);
3779
3780	ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
3781					   &split_hash);
3782	if (ret) {
3783		mlog_errno(ret);
3784		goto  out_commit;
3785	}
3786
3787	trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3788
3789	/*
3790	 * We have to carefully order operations here. There are items
3791	 * which want to be in the new cluster before insert, but in
3792	 * order to put those items in the new cluster, we alter the
3793	 * old cluster. A failure to insert gets nasty.
3794	 *
3795	 * So, start by reserving writes to the old
3796	 * cluster. ocfs2_dx_dir_new_cluster will reserve writes on
3797	 * the new cluster for us, before inserting it. The insert
3798	 * won't happen if there's an error before that. Once the
3799	 * insert is done then, we can transfer from one leaf into the
3800	 * other without fear of hitting any error.
3801	 */
3802
3803	/*
3804	 * The leaf transfer wants some scratch space so that we don't
3805	 * wind up doing a bunch of expensive memmove().
3806	 */
3807	tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
3808	if (!tmp_dx_leaf) {
3809		ret = -ENOMEM;
3810		mlog_errno(ret);
3811		goto out_commit;
3812	}
3813
3814	orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
3815	ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
3816				   orig_dx_leaves);
3817	if (ret) {
3818		mlog_errno(ret);
3819		goto out_commit;
3820	}
3821
3822	cpos = split_hash;
3823	ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3824				       data_ac, meta_ac, new_dx_leaves,
3825				       num_dx_leaves);
3826	if (ret) {
3827		mlog_errno(ret);
3828		goto out_commit;
3829	}
3830
3831	for (i = 0; i < num_dx_leaves; i++) {
3832		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3833					      orig_dx_leaves[i],
3834					      OCFS2_JOURNAL_ACCESS_WRITE);
3835		if (ret) {
3836			mlog_errno(ret);
3837			goto out_commit;
3838		}
3839
3840		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3841					      new_dx_leaves[i],
3842					      OCFS2_JOURNAL_ACCESS_WRITE);
3843		if (ret) {
3844			mlog_errno(ret);
3845			goto out_commit;
3846		}
3847	}
3848
3849	ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
3850				   orig_dx_leaves, new_dx_leaves, num_dx_leaves);
3851
3852out_commit:
3853	if (ret < 0 && did_quota)
3854		dquot_free_space_nodirty(dir,
3855				ocfs2_clusters_to_bytes(dir->i_sb, 1));
3856
3857	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3858	ocfs2_commit_trans(osb, handle);
3859
3860out:
3861	if (orig_dx_leaves || new_dx_leaves) {
3862		for (i = 0; i < num_dx_leaves; i++) {
3863			if (orig_dx_leaves)
3864				brelse(orig_dx_leaves[i]);
3865			if (new_dx_leaves)
3866				brelse(new_dx_leaves[i]);
3867		}
3868		kfree(orig_dx_leaves);
3869		kfree(new_dx_leaves);
3870	}
3871
3872	if (meta_ac)
3873		ocfs2_free_alloc_context(meta_ac);
3874	if (data_ac)
3875		ocfs2_free_alloc_context(data_ac);
3876
3877	kfree(tmp_dx_leaf);
3878	return ret;
3879}
3880
3881static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
3882				   struct buffer_head *di_bh,
3883				   struct buffer_head *dx_root_bh,
3884				   const char *name, int namelen,
3885				   struct ocfs2_dir_lookup_result *lookup)
3886{
3887	int ret, rebalanced = 0;
3888	struct ocfs2_dx_root_block *dx_root;
3889	struct buffer_head *dx_leaf_bh = NULL;
3890	struct ocfs2_dx_leaf *dx_leaf;
3891	u64 blkno;
3892	u32 leaf_cpos;
3893
3894	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3895
3896restart_search:
3897	ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
3898				  &leaf_cpos, &blkno);
3899	if (ret) {
3900		mlog_errno(ret);
3901		goto out;
3902	}
3903
3904	ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
3905	if (ret) {
3906		mlog_errno(ret);
3907		goto out;
3908	}
3909
3910	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3911
3912	if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
3913	    le16_to_cpu(dx_leaf->dl_list.de_count)) {
3914		if (rebalanced) {
3915			/*
3916			 * Rebalancing should have provided us with
3917			 * space in an appropriate leaf.
3918			 *
3919			 * XXX: Is this an abnormal condition then?
3920			 * Should we print a message here?
3921			 */
3922			ret = -ENOSPC;
3923			goto out;
3924		}
3925
3926		ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
3927					     &lookup->dl_hinfo, leaf_cpos,
3928					     blkno);
3929		if (ret) {
3930			if (ret != -ENOSPC)
3931				mlog_errno(ret);
3932			goto out;
3933		}
3934
3935		/*
3936		 * Restart the lookup. The rebalance might have
3937		 * changed which block our item fits into. Mark our
3938		 * progress, so we only execute this once.
3939		 */
3940		brelse(dx_leaf_bh);
3941		dx_leaf_bh = NULL;
3942		rebalanced = 1;
3943		goto restart_search;
3944	}
3945
3946	lookup->dl_dx_leaf_bh = dx_leaf_bh;
3947	dx_leaf_bh = NULL;
3948
3949out:
3950	brelse(dx_leaf_bh);
3951	return ret;
3952}
3953
3954static int ocfs2_search_dx_free_list(struct inode *dir,
3955				     struct buffer_head *dx_root_bh,
3956				     int namelen,
3957				     struct ocfs2_dir_lookup_result *lookup)
3958{
3959	int ret = -ENOSPC;
3960	struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
3961	struct ocfs2_dir_block_trailer *db;
3962	u64 next_block;
3963	int rec_len = OCFS2_DIR_REC_LEN(namelen);
3964	struct ocfs2_dx_root_block *dx_root;
3965
3966	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3967	next_block = le64_to_cpu(dx_root->dr_free_blk);
3968
3969	while (next_block) {
3970		brelse(prev_leaf_bh);
3971		prev_leaf_bh = leaf_bh;
3972		leaf_bh = NULL;
3973
3974		ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
3975		if (ret) {
3976			mlog_errno(ret);
3977			goto out;
3978		}
3979
3980		db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
3981		if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
3982			lookup->dl_leaf_bh = leaf_bh;
3983			lookup->dl_prev_leaf_bh = prev_leaf_bh;
3984			leaf_bh = NULL;
3985			prev_leaf_bh = NULL;
3986			break;
3987		}
3988
3989		next_block = le64_to_cpu(db->db_free_next);
3990	}
3991
3992	if (!next_block)
3993		ret = -ENOSPC;
3994
3995out:
3996
3997	brelse(leaf_bh);
3998	brelse(prev_leaf_bh);
3999	return ret;
4000}
4001
4002static int ocfs2_expand_inline_dx_root(struct inode *dir,
4003				       struct buffer_head *dx_root_bh)
4004{
4005	int ret, num_dx_leaves, i, j, did_quota = 0;
4006	struct buffer_head **dx_leaves = NULL;
4007	struct ocfs2_extent_tree et;
4008	u64 insert_blkno;
4009	struct ocfs2_alloc_context *data_ac = NULL;
4010	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4011	handle_t *handle = NULL;
4012	struct ocfs2_dx_root_block *dx_root;
4013	struct ocfs2_dx_entry_list *entry_list;
4014	struct ocfs2_dx_entry *dx_entry;
4015	struct ocfs2_dx_leaf *target_leaf;
4016
4017	ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4018	if (ret) {
4019		mlog_errno(ret);
4020		goto out;
4021	}
4022
4023	dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
4024	if (!dx_leaves) {
4025		ret = -ENOMEM;
4026		mlog_errno(ret);
4027		goto out;
4028	}
4029
4030	handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
4031	if (IS_ERR(handle)) {
4032		ret = PTR_ERR(handle);
4033		mlog_errno(ret);
4034		goto out;
4035	}
4036
4037	ret = dquot_alloc_space_nodirty(dir,
4038				       ocfs2_clusters_to_bytes(osb->sb, 1));
4039	if (ret)
4040		goto out_commit;
4041	did_quota = 1;
4042
4043	/*
4044	 * We do this up front, before the allocation, so that a
4045	 * failure to add the dx_root_bh to the journal won't result
4046	 * us losing clusters.
4047	 */
4048	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
4049				      OCFS2_JOURNAL_ACCESS_WRITE);
4050	if (ret) {
4051		mlog_errno(ret);
4052		goto out_commit;
4053	}
4054
4055	ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
4056					 num_dx_leaves, &insert_blkno);
4057	if (ret) {
4058		mlog_errno(ret);
4059		goto out_commit;
4060	}
4061
4062	/*
4063	 * Transfer the entries from our dx_root into the appropriate
4064	 * block
4065	 */
4066	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4067	entry_list = &dx_root->dr_entries;
4068
4069	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
4070		dx_entry = &entry_list->de_entries[i];
4071
4072		j = __ocfs2_dx_dir_hash_idx(osb,
4073					    le32_to_cpu(dx_entry->dx_minor_hash));
4074		target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
4075
4076		ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
4077
4078		/* Each leaf has been passed to the journal already
4079		 * via __ocfs2_dx_dir_new_cluster() */
4080	}
4081
4082	dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
4083	memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
4084	       offsetof(struct ocfs2_dx_root_block, dr_list));
4085	dx_root->dr_list.l_count =
4086		cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
4087
4088	/* This should never fail considering we start with an empty
4089	 * dx_root. */
4090	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4091	ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
4092	if (ret)
4093		mlog_errno(ret);
4094	did_quota = 0;
4095
4096	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4097	ocfs2_journal_dirty(handle, dx_root_bh);
4098
4099out_commit:
4100	if (ret < 0 && did_quota)
4101		dquot_free_space_nodirty(dir,
4102					  ocfs2_clusters_to_bytes(dir->i_sb, 1));
4103
4104	ocfs2_commit_trans(osb, handle);
4105
4106out:
4107	if (data_ac)
4108		ocfs2_free_alloc_context(data_ac);
4109
4110	if (dx_leaves) {
4111		for (i = 0; i < num_dx_leaves; i++)
4112			brelse(dx_leaves[i]);
4113		kfree(dx_leaves);
4114	}
4115	return ret;
4116}
4117
4118static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
4119{
4120	struct ocfs2_dx_root_block *dx_root;
4121	struct ocfs2_dx_entry_list *entry_list;
4122
4123	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4124	entry_list = &dx_root->dr_entries;
4125
4126	if (le16_to_cpu(entry_list->de_num_used) >=
4127	    le16_to_cpu(entry_list->de_count))
4128		return -ENOSPC;
4129
4130	return 0;
4131}
4132
4133static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
4134					   struct buffer_head *di_bh,
4135					   const char *name,
4136					   int namelen,
4137					   struct ocfs2_dir_lookup_result *lookup)
4138{
4139	int ret, free_dx_root = 1;
4140	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4141	struct buffer_head *dx_root_bh = NULL;
4142	struct buffer_head *leaf_bh = NULL;
4143	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4144	struct ocfs2_dx_root_block *dx_root;
4145
4146	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4147	if (ret) {
4148		mlog_errno(ret);
4149		goto out;
4150	}
4151
4152	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4153	if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
4154		ret = -ENOSPC;
4155		mlog_errno(ret);
4156		goto out;
4157	}
4158
4159	if (ocfs2_dx_root_inline(dx_root)) {
4160		ret = ocfs2_inline_dx_has_space(dx_root_bh);
4161
4162		if (ret == 0)
4163			goto search_el;
4164
4165		/*
4166		 * We ran out of room in the root block. Expand it to
4167		 * an extent, then allow ocfs2_find_dir_space_dx to do
4168		 * the rest.
4169		 */
4170		ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
4171		if (ret) {
4172			mlog_errno(ret);
4173			goto out;
4174		}
4175	}
4176
4177	/*
4178	 * Insert preparation for an indexed directory is split into two
4179	 * steps. The call to find_dir_space_dx reserves room in the index for
4180	 * an additional item. If we run out of space there, it's a real error
4181	 * we can't continue on.
4182	 */
4183	ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
4184				      namelen, lookup);
4185	if (ret) {
4186		mlog_errno(ret);
4187		goto out;
4188	}
4189
4190search_el:
4191	/*
4192	 * Next, we need to find space in the unindexed tree. This call
4193	 * searches using the free space linked list. If the unindexed tree
4194	 * lacks sufficient space, we'll expand it below. The expansion code
4195	 * is smart enough to add any new blocks to the free space list.
4196	 */
4197	ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
4198	if (ret && ret != -ENOSPC) {
4199		mlog_errno(ret);
4200		goto out;
4201	}
4202
4203	/* Do this up here - ocfs2_extend_dir might need the dx_root */
4204	lookup->dl_dx_root_bh = dx_root_bh;
4205	free_dx_root = 0;
4206
4207	if (ret == -ENOSPC) {
4208		ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
4209
4210		if (ret) {
4211			mlog_errno(ret);
4212			goto out;
4213		}
4214
4215		/*
4216		 * We make the assumption here that new leaf blocks are added
4217		 * to the front of our free list.
4218		 */
4219		lookup->dl_prev_leaf_bh = NULL;
4220		lookup->dl_leaf_bh = leaf_bh;
4221	}
4222
4223out:
4224	if (free_dx_root)
4225		brelse(dx_root_bh);
4226	return ret;
4227}
4228
4229/*
4230 * Get a directory ready for insert. Any directory allocation required
4231 * happens here. Success returns zero, and enough context in the dir
4232 * lookup result that ocfs2_add_entry() will be able complete the task
4233 * with minimal performance impact.
4234 */
4235int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4236				 struct inode *dir,
4237				 struct buffer_head *parent_fe_bh,
4238				 const char *name,
4239				 int namelen,
4240				 struct ocfs2_dir_lookup_result *lookup)
4241{
4242	int ret;
4243	unsigned int blocks_wanted = 1;
4244	struct buffer_head *bh = NULL;
4245
4246	trace_ocfs2_prepare_dir_for_insert(
4247		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4248
4249	if (!namelen) {
4250		ret = -EINVAL;
4251		mlog_errno(ret);
4252		goto out;
4253	}
4254
4255	/*
4256	 * Do this up front to reduce confusion.
4257	 *
4258	 * The directory might start inline, then be turned into an
4259	 * indexed one, in which case we'd need to hash deep inside
4260	 * ocfs2_find_dir_space_id(). Since
4261	 * ocfs2_prepare_dx_dir_for_insert() also needs this hash
4262	 * done, there seems no point in spreading out the calls. We
4263	 * can optimize away the case where the file system doesn't
4264	 * support indexing.
4265	 */
4266	if (ocfs2_supports_indexed_dirs(osb))
4267		ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
4268
4269	if (ocfs2_dir_indexed(dir)) {
4270		ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
4271						      name, namelen, lookup);
4272		if (ret)
4273			mlog_errno(ret);
4274		goto out;
4275	}
4276
4277	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4278		ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
4279					      namelen, &bh, &blocks_wanted);
4280	} else
4281		ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
4282
4283	if (ret && ret != -ENOSPC) {
4284		mlog_errno(ret);
4285		goto out;
4286	}
4287
4288	if (ret == -ENOSPC) {
4289		/*
4290		 * We have to expand the directory to add this name.
4291		 */
4292		BUG_ON(bh);
4293
4294		ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
4295				       lookup, &bh);
4296		if (ret) {
4297			if (ret != -ENOSPC)
4298				mlog_errno(ret);
4299			goto out;
4300		}
4301
4302		BUG_ON(!bh);
4303	}
4304
4305	lookup->dl_leaf_bh = bh;
4306	bh = NULL;
4307out:
4308	brelse(bh);
4309	return ret;
4310}
4311
4312static int ocfs2_dx_dir_remove_index(struct inode *dir,
4313				     struct buffer_head *di_bh,
4314				     struct buffer_head *dx_root_bh)
4315{
4316	int ret;
4317	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4318	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4319	struct ocfs2_dx_root_block *dx_root;
4320	struct inode *dx_alloc_inode = NULL;
4321	struct buffer_head *dx_alloc_bh = NULL;
4322	handle_t *handle;
4323	u64 blk;
4324	u16 bit;
4325	u64 bg_blkno;
4326
4327	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4328
4329	dx_alloc_inode = ocfs2_get_system_file_inode(osb,
4330					EXTENT_ALLOC_SYSTEM_INODE,
4331					le16_to_cpu(dx_root->dr_suballoc_slot));
4332	if (!dx_alloc_inode) {
4333		ret = -ENOMEM;
4334		mlog_errno(ret);
4335		goto out;
4336	}
4337	inode_lock(dx_alloc_inode);
4338
4339	ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
4340	if (ret) {
4341		mlog_errno(ret);
4342		goto out_mutex;
4343	}
4344
4345	handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
4346	if (IS_ERR(handle)) {
4347		ret = PTR_ERR(handle);
4348		mlog_errno(ret);
4349		goto out_unlock;
4350	}
4351
4352	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
4353				      OCFS2_JOURNAL_ACCESS_WRITE);
4354	if (ret) {
4355		mlog_errno(ret);
4356		goto out_commit;
4357	}
4358
4359	spin_lock(&OCFS2_I(dir)->ip_lock);
4360	OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4361	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4362	spin_unlock(&OCFS2_I(dir)->ip_lock);
4363	di->i_dx_root = cpu_to_le64(0ULL);
4364	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4365
4366	ocfs2_journal_dirty(handle, di_bh);
4367
4368	blk = le64_to_cpu(dx_root->dr_blkno);
4369	bit = le16_to_cpu(dx_root->dr_suballoc_bit);
4370	if (dx_root->dr_suballoc_loc)
4371		bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
4372	else
4373		bg_blkno = ocfs2_which_suballoc_group(blk, bit);
4374	ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
4375				       bit, bg_blkno, 1);
4376	if (ret)
4377		mlog_errno(ret);
4378
4379out_commit:
4380	ocfs2_commit_trans(osb, handle);
4381
4382out_unlock:
4383	ocfs2_inode_unlock(dx_alloc_inode, 1);
4384
4385out_mutex:
4386	inode_unlock(dx_alloc_inode);
4387	brelse(dx_alloc_bh);
4388out:
4389	iput(dx_alloc_inode);
4390	return ret;
4391}
4392
4393int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
4394{
4395	int ret;
4396	unsigned int uninitialized_var(clen);
4397	u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos);
4398	u64 uninitialized_var(blkno);
4399	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4400	struct buffer_head *dx_root_bh = NULL;
4401	struct ocfs2_dx_root_block *dx_root;
4402	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4403	struct ocfs2_cached_dealloc_ctxt dealloc;
4404	struct ocfs2_extent_tree et;
4405
4406	ocfs2_init_dealloc_ctxt(&dealloc);
4407
4408	if (!ocfs2_dir_indexed(dir))
4409		return 0;
4410
4411	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4412	if (ret) {
4413		mlog_errno(ret);
4414		goto out;
4415	}
4416	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4417
4418	if (ocfs2_dx_root_inline(dx_root))
4419		goto remove_index;
4420
4421	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4422
4423	/* XXX: What if dr_clusters is too large? */
4424	while (le32_to_cpu(dx_root->dr_clusters)) {
4425		ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
4426					      major_hash, &cpos, &blkno, &clen);
4427		if (ret) {
4428			mlog_errno(ret);
4429			goto out;
4430		}
4431
4432		p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
4433
4434		ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
4435					       &dealloc, 0, false);
4436		if (ret) {
4437			mlog_errno(ret);
4438			goto out;
4439		}
4440
4441		if (cpos == 0)
4442			break;
4443
4444		major_hash = cpos - 1;
4445	}
4446
4447remove_index:
4448	ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
4449	if (ret) {
4450		mlog_errno(ret);
4451		goto out;
4452	}
4453
4454	ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
4455out:
4456	ocfs2_schedule_truncate_log_flush(osb, 1);
4457	ocfs2_run_deallocs(osb, &dealloc);
4458
4459	brelse(dx_root_bh);
4460	return ret;
4461}