Linux Audio

Check our new training course

Loading...
v4.10.11
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * dir.c
   5 *
   6 * Creates, reads, walks and deletes directory-nodes
   7 *
   8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   9 *
  10 *  Portions of this code from linux/fs/ext3/dir.c
  11 *
  12 *  Copyright (C) 1992, 1993, 1994, 1995
  13 *  Remy Card (card@masi.ibp.fr)
  14 *  Laboratoire MASI - Institut Blaise pascal
  15 *  Universite Pierre et Marie Curie (Paris VI)
  16 *
  17 *   from
  18 *
  19 *   linux/fs/minix/dir.c
  20 *
  21 *   Copyright (C) 1991, 1992 Linus Torvalds
  22 *
  23 * This program is free software; you can redistribute it and/or
  24 * modify it under the terms of the GNU General Public
  25 * License as published by the Free Software Foundation; either
  26 * version 2 of the License, or (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  31 * General Public License for more details.
  32 *
  33 * You should have received a copy of the GNU General Public
  34 * License along with this program; if not, write to the
  35 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  36 * Boston, MA 021110-1307, USA.
  37 */
  38
  39#include <linux/fs.h>
  40#include <linux/types.h>
  41#include <linux/slab.h>
  42#include <linux/highmem.h>
  43#include <linux/quotaops.h>
  44#include <linux/sort.h>
 
  45
  46#include <cluster/masklog.h>
  47
  48#include "ocfs2.h"
  49
  50#include "alloc.h"
  51#include "blockcheck.h"
  52#include "dir.h"
  53#include "dlmglue.h"
  54#include "extent_map.h"
  55#include "file.h"
  56#include "inode.h"
  57#include "journal.h"
  58#include "namei.h"
  59#include "suballoc.h"
  60#include "super.h"
  61#include "sysfile.h"
  62#include "uptodate.h"
  63#include "ocfs2_trace.h"
  64
  65#include "buffer_head_io.h"
  66
  67#define NAMEI_RA_CHUNKS  2
  68#define NAMEI_RA_BLOCKS  4
  69#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  70
  71static unsigned char ocfs2_filetype_table[] = {
  72	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
  73};
  74
  75static int ocfs2_do_extend_dir(struct super_block *sb,
  76			       handle_t *handle,
  77			       struct inode *dir,
  78			       struct buffer_head *parent_fe_bh,
  79			       struct ocfs2_alloc_context *data_ac,
  80			       struct ocfs2_alloc_context *meta_ac,
  81			       struct buffer_head **new_bh);
  82static int ocfs2_dir_indexed(struct inode *inode);
  83
  84/*
  85 * These are distinct checks because future versions of the file system will
  86 * want to have a trailing dirent structure independent of indexing.
  87 */
  88static int ocfs2_supports_dir_trailer(struct inode *dir)
  89{
  90	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  91
  92	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  93		return 0;
  94
  95	return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
  96}
  97
  98/*
  99 * "new' here refers to the point at which we're creating a new
 100 * directory via "mkdir()", but also when we're expanding an inline
 101 * directory. In either case, we don't yet have the indexing bit set
 102 * on the directory, so the standard checks will fail in when metaecc
 103 * is turned off. Only directory-initialization type functions should
 104 * use this then. Everything else wants ocfs2_supports_dir_trailer()
 105 */
 106static int ocfs2_new_dir_wants_trailer(struct inode *dir)
 107{
 108	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 109
 110	return ocfs2_meta_ecc(osb) ||
 111		ocfs2_supports_indexed_dirs(osb);
 112}
 113
 114static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
 115{
 116	return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
 117}
 118
 119#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
 120
 121/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
 122 * them more consistent? */
 123struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
 124							    void *data)
 125{
 126	char *p = data;
 127
 128	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
 129	return (struct ocfs2_dir_block_trailer *)p;
 130}
 131
 132/*
 133 * XXX: This is executed once on every dirent. We should consider optimizing
 134 * it.
 135 */
 136static int ocfs2_skip_dir_trailer(struct inode *dir,
 137				  struct ocfs2_dir_entry *de,
 138				  unsigned long offset,
 139				  unsigned long blklen)
 140{
 141	unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
 142
 143	if (!ocfs2_supports_dir_trailer(dir))
 144		return 0;
 145
 146	if (offset != toff)
 147		return 0;
 148
 149	return 1;
 150}
 151
 152static void ocfs2_init_dir_trailer(struct inode *inode,
 153				   struct buffer_head *bh, u16 rec_len)
 154{
 155	struct ocfs2_dir_block_trailer *trailer;
 156
 157	trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
 158	strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
 159	trailer->db_compat_rec_len =
 160			cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
 161	trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
 162	trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
 163	trailer->db_free_rec_len = cpu_to_le16(rec_len);
 164}
 165/*
 166 * Link an unindexed block with a dir trailer structure into the index free
 167 * list. This function will modify dirdata_bh, but assumes you've already
 168 * passed it to the journal.
 169 */
 170static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
 171				     struct buffer_head *dx_root_bh,
 172				     struct buffer_head *dirdata_bh)
 173{
 174	int ret;
 175	struct ocfs2_dx_root_block *dx_root;
 176	struct ocfs2_dir_block_trailer *trailer;
 177
 178	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
 179				      OCFS2_JOURNAL_ACCESS_WRITE);
 180	if (ret) {
 181		mlog_errno(ret);
 182		goto out;
 183	}
 184	trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
 185	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
 186
 187	trailer->db_free_next = dx_root->dr_free_blk;
 188	dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
 189
 190	ocfs2_journal_dirty(handle, dx_root_bh);
 191
 192out:
 193	return ret;
 194}
 195
 196static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
 197{
 198	return res->dl_prev_leaf_bh == NULL;
 199}
 200
 201void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
 202{
 203	brelse(res->dl_dx_root_bh);
 204	brelse(res->dl_leaf_bh);
 205	brelse(res->dl_dx_leaf_bh);
 206	brelse(res->dl_prev_leaf_bh);
 207}
 208
 209static int ocfs2_dir_indexed(struct inode *inode)
 210{
 211	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
 212		return 1;
 213	return 0;
 214}
 215
 216static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
 217{
 218	return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
 219}
 220
 221/*
 222 * Hashing code adapted from ext3
 223 */
 224#define DELTA 0x9E3779B9
 225
 226static void TEA_transform(__u32 buf[4], __u32 const in[])
 227{
 228	__u32	sum = 0;
 229	__u32	b0 = buf[0], b1 = buf[1];
 230	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
 231	int	n = 16;
 232
 233	do {
 234		sum += DELTA;
 235		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
 236		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
 237	} while (--n);
 238
 239	buf[0] += b0;
 240	buf[1] += b1;
 241}
 242
 243static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
 244{
 245	__u32	pad, val;
 246	int	i;
 247
 248	pad = (__u32)len | ((__u32)len << 8);
 249	pad |= pad << 16;
 250
 251	val = pad;
 252	if (len > num*4)
 253		len = num * 4;
 254	for (i = 0; i < len; i++) {
 255		if ((i % 4) == 0)
 256			val = pad;
 257		val = msg[i] + (val << 8);
 258		if ((i % 4) == 3) {
 259			*buf++ = val;
 260			val = pad;
 261			num--;
 262		}
 263	}
 264	if (--num >= 0)
 265		*buf++ = val;
 266	while (--num >= 0)
 267		*buf++ = pad;
 268}
 269
 270static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
 271				   struct ocfs2_dx_hinfo *hinfo)
 272{
 273	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 274	const char	*p;
 275	__u32		in[8], buf[4];
 276
 277	/*
 278	 * XXX: Is this really necessary, if the index is never looked
 279	 * at by readdir? Is a hash value of '0' a bad idea?
 280	 */
 281	if ((len == 1 && !strncmp(".", name, 1)) ||
 282	    (len == 2 && !strncmp("..", name, 2))) {
 283		buf[0] = buf[1] = 0;
 284		goto out;
 285	}
 286
 287#ifdef OCFS2_DEBUG_DX_DIRS
 288	/*
 289	 * This makes it very easy to debug indexing problems. We
 290	 * should never allow this to be selected without hand editing
 291	 * this file though.
 292	 */
 293	buf[0] = buf[1] = len;
 294	goto out;
 295#endif
 296
 297	memcpy(buf, osb->osb_dx_seed, sizeof(buf));
 298
 299	p = name;
 300	while (len > 0) {
 301		str2hashbuf(p, len, in, 4);
 302		TEA_transform(buf, in);
 303		len -= 16;
 304		p += 16;
 305	}
 306
 307out:
 308	hinfo->major_hash = buf[0];
 309	hinfo->minor_hash = buf[1];
 310}
 311
 312/*
 313 * bh passed here can be an inode block or a dir data block, depending
 314 * on the inode inline data flag.
 315 */
 316static int ocfs2_check_dir_entry(struct inode * dir,
 317				 struct ocfs2_dir_entry * de,
 318				 struct buffer_head * bh,
 319				 unsigned long offset)
 320{
 321	const char *error_msg = NULL;
 322	const int rlen = le16_to_cpu(de->rec_len);
 323
 324	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
 325		error_msg = "rec_len is smaller than minimal";
 326	else if (unlikely(rlen % 4 != 0))
 327		error_msg = "rec_len % 4 != 0";
 328	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
 329		error_msg = "rec_len is too small for name_len";
 330	else if (unlikely(
 331		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
 332		error_msg = "directory entry across blocks";
 333
 334	if (unlikely(error_msg != NULL))
 335		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
 336		     "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
 337		     (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
 338		     offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
 339		     de->name_len);
 340
 341	return error_msg == NULL ? 1 : 0;
 342}
 343
 344static inline int ocfs2_match(int len,
 345			      const char * const name,
 346			      struct ocfs2_dir_entry *de)
 347{
 348	if (len != de->name_len)
 349		return 0;
 350	if (!de->inode)
 351		return 0;
 352	return !memcmp(name, de->name, len);
 353}
 354
 355/*
 356 * Returns 0 if not found, -1 on failure, and 1 on success
 357 */
 358static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 359					struct inode *dir,
 360					const char *name, int namelen,
 361					unsigned long offset,
 362					char *first_de,
 363					unsigned int bytes,
 364					struct ocfs2_dir_entry **res_dir)
 365{
 366	struct ocfs2_dir_entry *de;
 367	char *dlimit, *de_buf;
 368	int de_len;
 369	int ret = 0;
 370
 371	de_buf = first_de;
 372	dlimit = de_buf + bytes;
 373
 374	while (de_buf < dlimit) {
 375		/* this code is executed quadratically often */
 376		/* do minimal checking `by hand' */
 377
 378		de = (struct ocfs2_dir_entry *) de_buf;
 379
 380		if (de_buf + namelen <= dlimit &&
 381		    ocfs2_match(namelen, name, de)) {
 382			/* found a match - just to be sure, do a full check */
 383			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
 384				ret = -1;
 385				goto bail;
 386			}
 387			*res_dir = de;
 388			ret = 1;
 389			goto bail;
 390		}
 391
 392		/* prevent looping on a bad block */
 393		de_len = le16_to_cpu(de->rec_len);
 394		if (de_len <= 0) {
 395			ret = -1;
 396			goto bail;
 397		}
 398
 399		de_buf += de_len;
 400		offset += de_len;
 401	}
 402
 403bail:
 404	trace_ocfs2_search_dirblock(ret);
 405	return ret;
 406}
 407
 408static struct buffer_head *ocfs2_find_entry_id(const char *name,
 409					       int namelen,
 410					       struct inode *dir,
 411					       struct ocfs2_dir_entry **res_dir)
 412{
 413	int ret, found;
 414	struct buffer_head *di_bh = NULL;
 415	struct ocfs2_dinode *di;
 416	struct ocfs2_inline_data *data;
 417
 418	ret = ocfs2_read_inode_block(dir, &di_bh);
 419	if (ret) {
 420		mlog_errno(ret);
 421		goto out;
 422	}
 423
 424	di = (struct ocfs2_dinode *)di_bh->b_data;
 425	data = &di->id2.i_data;
 426
 427	found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
 428				      data->id_data, i_size_read(dir), res_dir);
 429	if (found == 1)
 430		return di_bh;
 431
 432	brelse(di_bh);
 433out:
 434	return NULL;
 435}
 436
 437static int ocfs2_validate_dir_block(struct super_block *sb,
 438				    struct buffer_head *bh)
 439{
 440	int rc;
 441	struct ocfs2_dir_block_trailer *trailer =
 442		ocfs2_trailer_from_bh(bh, sb);
 443
 444
 445	/*
 446	 * We don't validate dirents here, that's handled
 447	 * in-place when the code walks them.
 448	 */
 449	trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
 450
 451	BUG_ON(!buffer_uptodate(bh));
 452
 453	/*
 454	 * If the ecc fails, we return the error but otherwise
 455	 * leave the filesystem running.  We know any error is
 456	 * local to this block.
 457	 *
 458	 * Note that we are safe to call this even if the directory
 459	 * doesn't have a trailer.  Filesystems without metaecc will do
 460	 * nothing, and filesystems with it will have one.
 461	 */
 462	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
 463	if (rc)
 464		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
 465		     (unsigned long long)bh->b_blocknr);
 466
 467	return rc;
 468}
 469
 470/*
 471 * Validate a directory trailer.
 472 *
 473 * We check the trailer here rather than in ocfs2_validate_dir_block()
 474 * because that function doesn't have the inode to test.
 475 */
 476static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
 477{
 478	int rc = 0;
 479	struct ocfs2_dir_block_trailer *trailer;
 480
 481	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 482	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
 483		rc = ocfs2_error(dir->i_sb,
 484				 "Invalid dirblock #%llu: signature = %.*s\n",
 485				 (unsigned long long)bh->b_blocknr, 7,
 486				 trailer->db_signature);
 487		goto out;
 488	}
 489	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
 490		rc = ocfs2_error(dir->i_sb,
 491				 "Directory block #%llu has an invalid db_blkno of %llu\n",
 492				 (unsigned long long)bh->b_blocknr,
 493				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 494		goto out;
 495	}
 496	if (le64_to_cpu(trailer->db_parent_dinode) !=
 497	    OCFS2_I(dir)->ip_blkno) {
 498		rc = ocfs2_error(dir->i_sb,
 499				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
 500				 (unsigned long long)bh->b_blocknr,
 501				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
 502				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 503		goto out;
 504	}
 505out:
 506	return rc;
 507}
 508
 509/*
 510 * This function forces all errors to -EIO for consistency with its
 511 * predecessor, ocfs2_bread().  We haven't audited what returning the
 512 * real error codes would do to callers.  We log the real codes with
 513 * mlog_errno() before we squash them.
 514 */
 515static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
 516				struct buffer_head **bh, int flags)
 517{
 518	int rc = 0;
 519	struct buffer_head *tmp = *bh;
 520
 521	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
 522				    ocfs2_validate_dir_block);
 523	if (rc) {
 524		mlog_errno(rc);
 525		goto out;
 526	}
 527
 528	if (!(flags & OCFS2_BH_READAHEAD) &&
 529	    ocfs2_supports_dir_trailer(inode)) {
 530		rc = ocfs2_check_dir_trailer(inode, tmp);
 531		if (rc) {
 532			if (!*bh)
 533				brelse(tmp);
 534			mlog_errno(rc);
 535			goto out;
 536		}
 537	}
 538
 539	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
 540	if (!*bh)
 541		*bh = tmp;
 542
 543out:
 544	return rc ? -EIO : 0;
 545}
 546
 547/*
 548 * Read the block at 'phys' which belongs to this directory
 549 * inode. This function does no virtual->physical block translation -
 550 * what's passed in is assumed to be a valid directory block.
 551 */
 552static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
 553				       struct buffer_head **bh)
 554{
 555	int ret;
 556	struct buffer_head *tmp = *bh;
 557
 558	ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
 559			       ocfs2_validate_dir_block);
 560	if (ret) {
 561		mlog_errno(ret);
 562		goto out;
 563	}
 564
 565	if (ocfs2_supports_dir_trailer(dir)) {
 566		ret = ocfs2_check_dir_trailer(dir, tmp);
 567		if (ret) {
 568			if (!*bh)
 569				brelse(tmp);
 570			mlog_errno(ret);
 571			goto out;
 572		}
 573	}
 574
 575	if (!ret && !*bh)
 576		*bh = tmp;
 577out:
 578	return ret;
 579}
 580
 581static int ocfs2_validate_dx_root(struct super_block *sb,
 582				  struct buffer_head *bh)
 583{
 584	int ret;
 585	struct ocfs2_dx_root_block *dx_root;
 586
 587	BUG_ON(!buffer_uptodate(bh));
 588
 589	dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
 590
 591	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
 592	if (ret) {
 593		mlog(ML_ERROR,
 594		     "Checksum failed for dir index root block %llu\n",
 595		     (unsigned long long)bh->b_blocknr);
 596		return ret;
 597	}
 598
 599	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
 600		ret = ocfs2_error(sb,
 601				  "Dir Index Root # %llu has bad signature %.*s\n",
 602				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
 603				  7, dx_root->dr_signature);
 604	}
 605
 606	return ret;
 607}
 608
 609static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
 610			      struct buffer_head **dx_root_bh)
 611{
 612	int ret;
 613	u64 blkno = le64_to_cpu(di->i_dx_root);
 614	struct buffer_head *tmp = *dx_root_bh;
 615
 616	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 617			       ocfs2_validate_dx_root);
 618
 619	/* If ocfs2_read_block() got us a new bh, pass it up. */
 620	if (!ret && !*dx_root_bh)
 621		*dx_root_bh = tmp;
 622
 623	return ret;
 624}
 625
 626static int ocfs2_validate_dx_leaf(struct super_block *sb,
 627				  struct buffer_head *bh)
 628{
 629	int ret;
 630	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
 631
 632	BUG_ON(!buffer_uptodate(bh));
 633
 634	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
 635	if (ret) {
 636		mlog(ML_ERROR,
 637		     "Checksum failed for dir index leaf block %llu\n",
 638		     (unsigned long long)bh->b_blocknr);
 639		return ret;
 640	}
 641
 642	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
 643		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
 644				  7, dx_leaf->dl_signature);
 645	}
 646
 647	return ret;
 648}
 649
 650static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
 651			      struct buffer_head **dx_leaf_bh)
 652{
 653	int ret;
 654	struct buffer_head *tmp = *dx_leaf_bh;
 655
 656	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 657			       ocfs2_validate_dx_leaf);
 658
 659	/* If ocfs2_read_block() got us a new bh, pass it up. */
 660	if (!ret && !*dx_leaf_bh)
 661		*dx_leaf_bh = tmp;
 662
 663	return ret;
 664}
 665
 666/*
 667 * Read a series of dx_leaf blocks. This expects all buffer_head
 668 * pointers to be NULL on function entry.
 669 */
 670static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
 671				struct buffer_head **dx_leaf_bhs)
 672{
 673	int ret;
 674
 675	ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
 676				ocfs2_validate_dx_leaf);
 677	if (ret)
 678		mlog_errno(ret);
 679
 680	return ret;
 681}
 682
 683static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
 684					       struct inode *dir,
 685					       struct ocfs2_dir_entry **res_dir)
 686{
 687	struct super_block *sb;
 688	struct buffer_head *bh_use[NAMEI_RA_SIZE];
 689	struct buffer_head *bh, *ret = NULL;
 690	unsigned long start, block, b;
 691	int ra_max = 0;		/* Number of bh's in the readahead
 692				   buffer, bh_use[] */
 693	int ra_ptr = 0;		/* Current index into readahead
 694				   buffer */
 695	int num = 0;
 696	int nblocks, i, err;
 697
 698	sb = dir->i_sb;
 699
 700	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 701	start = OCFS2_I(dir)->ip_dir_start_lookup;
 702	if (start >= nblocks)
 703		start = 0;
 704	block = start;
 705
 706restart:
 707	do {
 708		/*
 709		 * We deal with the read-ahead logic here.
 710		 */
 711		if (ra_ptr >= ra_max) {
 712			/* Refill the readahead buffer */
 713			ra_ptr = 0;
 714			b = block;
 715			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
 716				/*
 717				 * Terminate if we reach the end of the
 718				 * directory and must wrap, or if our
 719				 * search has finished at this block.
 720				 */
 721				if (b >= nblocks || (num && block == start)) {
 722					bh_use[ra_max] = NULL;
 723					break;
 724				}
 725				num++;
 726
 727				bh = NULL;
 728				err = ocfs2_read_dir_block(dir, b++, &bh,
 729							   OCFS2_BH_READAHEAD);
 730				bh_use[ra_max] = bh;
 731			}
 732		}
 733		if ((bh = bh_use[ra_ptr++]) == NULL)
 734			goto next;
 735		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
 736			/* read error, skip block & hope for the best.
 737			 * ocfs2_read_dir_block() has released the bh. */
 738			mlog(ML_ERROR, "reading directory %llu, "
 739				    "offset %lu\n",
 740				    (unsigned long long)OCFS2_I(dir)->ip_blkno,
 741				    block);
 742			goto next;
 743		}
 744		i = ocfs2_search_dirblock(bh, dir, name, namelen,
 745					  block << sb->s_blocksize_bits,
 746					  bh->b_data, sb->s_blocksize,
 747					  res_dir);
 748		if (i == 1) {
 749			OCFS2_I(dir)->ip_dir_start_lookup = block;
 750			ret = bh;
 751			goto cleanup_and_exit;
 752		} else {
 753			brelse(bh);
 754			if (i < 0)
 755				goto cleanup_and_exit;
 756		}
 757	next:
 758		if (++block >= nblocks)
 759			block = 0;
 760	} while (block != start);
 761
 762	/*
 763	 * If the directory has grown while we were searching, then
 764	 * search the last part of the directory before giving up.
 765	 */
 766	block = nblocks;
 767	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 768	if (block < nblocks) {
 769		start = 0;
 770		goto restart;
 771	}
 772
 773cleanup_and_exit:
 774	/* Clean up the read-ahead blocks */
 775	for (; ra_ptr < ra_max; ra_ptr++)
 776		brelse(bh_use[ra_ptr]);
 777
 778	trace_ocfs2_find_entry_el(ret);
 779	return ret;
 780}
 781
 782static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
 783				   struct ocfs2_extent_list *el,
 784				   u32 major_hash,
 785				   u32 *ret_cpos,
 786				   u64 *ret_phys_blkno,
 787				   unsigned int *ret_clen)
 788{
 789	int ret = 0, i, found;
 790	struct buffer_head *eb_bh = NULL;
 791	struct ocfs2_extent_block *eb;
 792	struct ocfs2_extent_rec *rec = NULL;
 793
 794	if (el->l_tree_depth) {
 795		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
 796				      &eb_bh);
 797		if (ret) {
 798			mlog_errno(ret);
 799			goto out;
 800		}
 801
 802		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
 803		el = &eb->h_list;
 804
 805		if (el->l_tree_depth) {
 806			ret = ocfs2_error(inode->i_sb,
 807					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
 808					  inode->i_ino,
 809					  (unsigned long long)eb_bh->b_blocknr);
 810			goto out;
 811		}
 812	}
 813
 814	found = 0;
 815	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
 816		rec = &el->l_recs[i];
 817
 818		if (le32_to_cpu(rec->e_cpos) <= major_hash) {
 819			found = 1;
 820			break;
 821		}
 822	}
 823
 824	if (!found) {
 825		ret = ocfs2_error(inode->i_sb,
 826				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
 827				  inode->i_ino,
 828				  le32_to_cpu(rec->e_cpos),
 829				  ocfs2_rec_clusters(el, rec));
 830		goto out;
 831	}
 832
 833	if (ret_phys_blkno)
 834		*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
 835	if (ret_cpos)
 836		*ret_cpos = le32_to_cpu(rec->e_cpos);
 837	if (ret_clen)
 838		*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
 839
 840out:
 841	brelse(eb_bh);
 842	return ret;
 843}
 844
 845/*
 846 * Returns the block index, from the start of the cluster which this
 847 * hash belongs too.
 848 */
 849static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 850						   u32 minor_hash)
 851{
 852	return minor_hash & osb->osb_dx_mask;
 853}
 854
 855static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 856					  struct ocfs2_dx_hinfo *hinfo)
 857{
 858	return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
 859}
 860
 861static int ocfs2_dx_dir_lookup(struct inode *inode,
 862			       struct ocfs2_extent_list *el,
 863			       struct ocfs2_dx_hinfo *hinfo,
 864			       u32 *ret_cpos,
 865			       u64 *ret_phys_blkno)
 866{
 867	int ret = 0;
 868	unsigned int cend, uninitialized_var(clen);
 869	u32 uninitialized_var(cpos);
 870	u64 uninitialized_var(blkno);
 871	u32 name_hash = hinfo->major_hash;
 872
 873	ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
 874				      &clen);
 875	if (ret) {
 876		mlog_errno(ret);
 877		goto out;
 878	}
 879
 880	cend = cpos + clen;
 881	if (name_hash >= cend) {
 882		/* We want the last cluster */
 883		blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
 884		cpos += clen - 1;
 885	} else {
 886		blkno += ocfs2_clusters_to_blocks(inode->i_sb,
 887						  name_hash - cpos);
 888		cpos = name_hash;
 889	}
 890
 891	/*
 892	 * We now have the cluster which should hold our entry. To
 893	 * find the exact block from the start of the cluster to
 894	 * search, we take the lower bits of the hash.
 895	 */
 896	blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
 897
 898	if (ret_phys_blkno)
 899		*ret_phys_blkno = blkno;
 900	if (ret_cpos)
 901		*ret_cpos = cpos;
 902
 903out:
 904
 905	return ret;
 906}
 907
 908static int ocfs2_dx_dir_search(const char *name, int namelen,
 909			       struct inode *dir,
 910			       struct ocfs2_dx_root_block *dx_root,
 911			       struct ocfs2_dir_lookup_result *res)
 912{
 913	int ret, i, found;
 914	u64 uninitialized_var(phys);
 915	struct buffer_head *dx_leaf_bh = NULL;
 916	struct ocfs2_dx_leaf *dx_leaf;
 917	struct ocfs2_dx_entry *dx_entry = NULL;
 918	struct buffer_head *dir_ent_bh = NULL;
 919	struct ocfs2_dir_entry *dir_ent = NULL;
 920	struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
 921	struct ocfs2_extent_list *dr_el;
 922	struct ocfs2_dx_entry_list *entry_list;
 923
 924	ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
 925
 926	if (ocfs2_dx_root_inline(dx_root)) {
 927		entry_list = &dx_root->dr_entries;
 928		goto search;
 929	}
 930
 931	dr_el = &dx_root->dr_list;
 932
 933	ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
 934	if (ret) {
 935		mlog_errno(ret);
 936		goto out;
 937	}
 938
 939	trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
 940				  namelen, name, hinfo->major_hash,
 941				  hinfo->minor_hash, (unsigned long long)phys);
 942
 943	ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
 944	if (ret) {
 945		mlog_errno(ret);
 946		goto out;
 947	}
 948
 949	dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
 950
 951	trace_ocfs2_dx_dir_search_leaf_info(
 952			le16_to_cpu(dx_leaf->dl_list.de_num_used),
 953			le16_to_cpu(dx_leaf->dl_list.de_count));
 954
 955	entry_list = &dx_leaf->dl_list;
 956
 957search:
 958	/*
 959	 * Empty leaf is legal, so no need to check for that.
 960	 */
 961	found = 0;
 962	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
 963		dx_entry = &entry_list->de_entries[i];
 964
 965		if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
 966		    || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
 967			continue;
 968
 969		/*
 970		 * Search unindexed leaf block now. We're not
 971		 * guaranteed to find anything.
 972		 */
 973		ret = ocfs2_read_dir_block_direct(dir,
 974					  le64_to_cpu(dx_entry->dx_dirent_blk),
 975					  &dir_ent_bh);
 976		if (ret) {
 977			mlog_errno(ret);
 978			goto out;
 979		}
 980
 981		/*
 982		 * XXX: We should check the unindexed block here,
 983		 * before using it.
 984		 */
 985
 986		found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
 987					      0, dir_ent_bh->b_data,
 988					      dir->i_sb->s_blocksize, &dir_ent);
 989		if (found == 1)
 990			break;
 991
 992		if (found == -1) {
 993			/* This means we found a bad directory entry. */
 994			ret = -EIO;
 995			mlog_errno(ret);
 996			goto out;
 997		}
 998
 999		brelse(dir_ent_bh);
1000		dir_ent_bh = NULL;
1001	}
1002
1003	if (found <= 0) {
1004		ret = -ENOENT;
1005		goto out;
1006	}
1007
1008	res->dl_leaf_bh = dir_ent_bh;
1009	res->dl_entry = dir_ent;
1010	res->dl_dx_leaf_bh = dx_leaf_bh;
1011	res->dl_dx_entry = dx_entry;
1012
1013	ret = 0;
1014out:
1015	if (ret) {
1016		brelse(dx_leaf_bh);
1017		brelse(dir_ent_bh);
1018	}
1019	return ret;
1020}
1021
1022static int ocfs2_find_entry_dx(const char *name, int namelen,
1023			       struct inode *dir,
1024			       struct ocfs2_dir_lookup_result *lookup)
1025{
1026	int ret;
1027	struct buffer_head *di_bh = NULL;
1028	struct ocfs2_dinode *di;
1029	struct buffer_head *dx_root_bh = NULL;
1030	struct ocfs2_dx_root_block *dx_root;
1031
1032	ret = ocfs2_read_inode_block(dir, &di_bh);
1033	if (ret) {
1034		mlog_errno(ret);
1035		goto out;
1036	}
1037
1038	di = (struct ocfs2_dinode *)di_bh->b_data;
1039
1040	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
1041	if (ret) {
1042		mlog_errno(ret);
1043		goto out;
1044	}
1045	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
1046
1047	ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
1048	if (ret) {
1049		if (ret != -ENOENT)
1050			mlog_errno(ret);
1051		goto out;
1052	}
1053
1054	lookup->dl_dx_root_bh = dx_root_bh;
1055	dx_root_bh = NULL;
1056out:
1057	brelse(di_bh);
1058	brelse(dx_root_bh);
1059	return ret;
1060}
1061
1062/*
1063 * Try to find an entry of the provided name within 'dir'.
1064 *
1065 * If nothing was found, -ENOENT is returned. Otherwise, zero is
1066 * returned and the struct 'res' will contain information useful to
1067 * other directory manipulation functions.
1068 *
1069 * Caller can NOT assume anything about the contents of the
1070 * buffer_heads - they are passed back only so that it can be passed
1071 * into any one of the manipulation functions (add entry, delete
1072 * entry, etc). As an example, bh in the extent directory case is a
1073 * data block, in the inline-data case it actually points to an inode,
1074 * in the indexed directory case, multiple buffers are involved.
1075 */
1076int ocfs2_find_entry(const char *name, int namelen,
1077		     struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
1078{
1079	struct buffer_head *bh;
1080	struct ocfs2_dir_entry *res_dir = NULL;
1081
1082	if (ocfs2_dir_indexed(dir))
1083		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
1084
1085	/*
1086	 * The unindexed dir code only uses part of the lookup
1087	 * structure, so there's no reason to push it down further
1088	 * than this.
1089	 */
1090	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1091		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
1092	else
1093		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
1094
1095	if (bh == NULL)
1096		return -ENOENT;
1097
1098	lookup->dl_leaf_bh = bh;
1099	lookup->dl_entry = res_dir;
1100	return 0;
1101}
1102
1103/*
1104 * Update inode number and type of a previously found directory entry.
1105 */
1106int ocfs2_update_entry(struct inode *dir, handle_t *handle,
1107		       struct ocfs2_dir_lookup_result *res,
1108		       struct inode *new_entry_inode)
1109{
1110	int ret;
1111	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1112	struct ocfs2_dir_entry *de = res->dl_entry;
1113	struct buffer_head *de_bh = res->dl_leaf_bh;
1114
1115	/*
1116	 * The same code works fine for both inline-data and extent
1117	 * based directories, so no need to split this up.  The only
1118	 * difference is the journal_access function.
1119	 */
1120
1121	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1122		access = ocfs2_journal_access_di;
1123
1124	ret = access(handle, INODE_CACHE(dir), de_bh,
1125		     OCFS2_JOURNAL_ACCESS_WRITE);
1126	if (ret) {
1127		mlog_errno(ret);
1128		goto out;
1129	}
1130
1131	de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
1132	ocfs2_set_de_type(de, new_entry_inode->i_mode);
1133
1134	ocfs2_journal_dirty(handle, de_bh);
1135
1136out:
1137	return ret;
1138}
1139
1140/*
1141 * __ocfs2_delete_entry deletes a directory entry by merging it with the
1142 * previous entry
1143 */
1144static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1145				struct ocfs2_dir_entry *de_del,
1146				struct buffer_head *bh, char *first_de,
1147				unsigned int bytes)
1148{
1149	struct ocfs2_dir_entry *de, *pde;
1150	int i, status = -ENOENT;
1151	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1152
1153	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1154		access = ocfs2_journal_access_di;
1155
1156	i = 0;
1157	pde = NULL;
1158	de = (struct ocfs2_dir_entry *) first_de;
1159	while (i < bytes) {
1160		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
1161			status = -EIO;
1162			mlog_errno(status);
1163			goto bail;
1164		}
1165		if (de == de_del)  {
1166			status = access(handle, INODE_CACHE(dir), bh,
1167					OCFS2_JOURNAL_ACCESS_WRITE);
1168			if (status < 0) {
1169				status = -EIO;
1170				mlog_errno(status);
1171				goto bail;
1172			}
1173			if (pde)
1174				le16_add_cpu(&pde->rec_len,
1175						le16_to_cpu(de->rec_len));
1176			de->inode = 0;
1177			dir->i_version++;
1178			ocfs2_journal_dirty(handle, bh);
1179			goto bail;
1180		}
1181		i += le16_to_cpu(de->rec_len);
1182		pde = de;
1183		de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1184	}
1185bail:
1186	return status;
1187}
1188
1189static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
1190{
1191	unsigned int hole;
1192
1193	if (le64_to_cpu(de->inode) == 0)
1194		hole = le16_to_cpu(de->rec_len);
1195	else
1196		hole = le16_to_cpu(de->rec_len) -
1197			OCFS2_DIR_REC_LEN(de->name_len);
1198
1199	return hole;
1200}
1201
1202static int ocfs2_find_max_rec_len(struct super_block *sb,
1203				  struct buffer_head *dirblock_bh)
1204{
1205	int size, this_hole, largest_hole = 0;
1206	char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
1207	struct ocfs2_dir_entry *de;
1208
1209	trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
1210	size = ocfs2_dir_trailer_blk_off(sb);
1211	limit = start + size;
1212	de_buf = start;
1213	de = (struct ocfs2_dir_entry *)de_buf;
1214	do {
1215		if (de_buf != trailer) {
1216			this_hole = ocfs2_figure_dirent_hole(de);
1217			if (this_hole > largest_hole)
1218				largest_hole = this_hole;
1219		}
1220
1221		de_buf += le16_to_cpu(de->rec_len);
1222		de = (struct ocfs2_dir_entry *)de_buf;
1223	} while (de_buf < limit);
1224
1225	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
1226		return largest_hole;
1227	return 0;
1228}
1229
1230static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
1231				       int index)
1232{
1233	int num_used = le16_to_cpu(entry_list->de_num_used);
1234
1235	if (num_used == 1 || index == (num_used - 1))
1236		goto clear;
1237
1238	memmove(&entry_list->de_entries[index],
1239		&entry_list->de_entries[index + 1],
1240		(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
1241clear:
1242	num_used--;
1243	memset(&entry_list->de_entries[num_used], 0,
1244	       sizeof(struct ocfs2_dx_entry));
1245	entry_list->de_num_used = cpu_to_le16(num_used);
1246}
1247
1248static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1249				 struct ocfs2_dir_lookup_result *lookup)
1250{
1251	int ret, index, max_rec_len, add_to_free_list = 0;
1252	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1253	struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
1254	struct ocfs2_dx_leaf *dx_leaf;
1255	struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
1256	struct ocfs2_dir_block_trailer *trailer;
1257	struct ocfs2_dx_root_block *dx_root;
1258	struct ocfs2_dx_entry_list *entry_list;
1259
1260	/*
1261	 * This function gets a bit messy because we might have to
1262	 * modify the root block, regardless of whether the indexed
1263	 * entries are stored inline.
1264	 */
1265
1266	/*
1267	 * *Only* set 'entry_list' here, based on where we're looking
1268	 * for the indexed entries. Later, we might still want to
1269	 * journal both blocks, based on free list state.
1270	 */
1271	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
1272	if (ocfs2_dx_root_inline(dx_root)) {
1273		entry_list = &dx_root->dr_entries;
1274	} else {
1275		dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
1276		entry_list = &dx_leaf->dl_list;
1277	}
1278
1279	/* Neither of these are a disk corruption - that should have
1280	 * been caught by lookup, before we got here. */
1281	BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
1282	BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
1283
1284	index = (char *)dx_entry - (char *)entry_list->de_entries;
1285	index /= sizeof(*dx_entry);
1286
1287	if (index >= le16_to_cpu(entry_list->de_num_used)) {
1288		mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
1289		     (unsigned long long)OCFS2_I(dir)->ip_blkno, index,
1290		     entry_list, dx_entry);
1291		return -EIO;
1292	}
1293
1294	/*
1295	 * We know that removal of this dirent will leave enough room
1296	 * for a new one, so add this block to the free list if it
1297	 * isn't already there.
1298	 */
1299	trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
1300	if (trailer->db_free_rec_len == 0)
1301		add_to_free_list = 1;
1302
1303	/*
1304	 * Add the block holding our index into the journal before
1305	 * removing the unindexed entry. If we get an error return
1306	 * from __ocfs2_delete_entry(), then it hasn't removed the
1307	 * entry yet. Likewise, successful return means we *must*
1308	 * remove the indexed entry.
1309	 *
1310	 * We're also careful to journal the root tree block here as
1311	 * the entry count needs to be updated. Also, we might be
1312	 * adding to the start of the free list.
1313	 */
1314	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1315				      OCFS2_JOURNAL_ACCESS_WRITE);
1316	if (ret) {
1317		mlog_errno(ret);
1318		goto out;
1319	}
1320
1321	if (!ocfs2_dx_root_inline(dx_root)) {
1322		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
1323					      lookup->dl_dx_leaf_bh,
1324					      OCFS2_JOURNAL_ACCESS_WRITE);
1325		if (ret) {
1326			mlog_errno(ret);
1327			goto out;
1328		}
1329	}
1330
1331	trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1332				    index);
1333
1334	ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1335				   leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
1336	if (ret) {
1337		mlog_errno(ret);
1338		goto out;
1339	}
1340
1341	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
1342	trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1343	if (add_to_free_list) {
1344		trailer->db_free_next = dx_root->dr_free_blk;
1345		dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
1346		ocfs2_journal_dirty(handle, dx_root_bh);
1347	}
1348
1349	/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
1350	ocfs2_journal_dirty(handle, leaf_bh);
1351
1352	le32_add_cpu(&dx_root->dr_num_entries, -1);
1353	ocfs2_journal_dirty(handle, dx_root_bh);
1354
1355	ocfs2_dx_list_remove_entry(entry_list, index);
1356
1357	if (!ocfs2_dx_root_inline(dx_root))
1358		ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
1359
1360out:
1361	return ret;
1362}
1363
1364static inline int ocfs2_delete_entry_id(handle_t *handle,
1365					struct inode *dir,
1366					struct ocfs2_dir_entry *de_del,
1367					struct buffer_head *bh)
1368{
1369	int ret;
1370	struct buffer_head *di_bh = NULL;
1371	struct ocfs2_dinode *di;
1372	struct ocfs2_inline_data *data;
1373
1374	ret = ocfs2_read_inode_block(dir, &di_bh);
1375	if (ret) {
1376		mlog_errno(ret);
1377		goto out;
1378	}
1379
1380	di = (struct ocfs2_dinode *)di_bh->b_data;
1381	data = &di->id2.i_data;
1382
1383	ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
1384				   i_size_read(dir));
1385
1386	brelse(di_bh);
1387out:
1388	return ret;
1389}
1390
1391static inline int ocfs2_delete_entry_el(handle_t *handle,
1392					struct inode *dir,
1393					struct ocfs2_dir_entry *de_del,
1394					struct buffer_head *bh)
1395{
1396	return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
1397				    bh->b_size);
1398}
1399
1400/*
1401 * Delete a directory entry. Hide the details of directory
1402 * implementation from the caller.
1403 */
1404int ocfs2_delete_entry(handle_t *handle,
1405		       struct inode *dir,
1406		       struct ocfs2_dir_lookup_result *res)
1407{
1408	if (ocfs2_dir_indexed(dir))
1409		return ocfs2_delete_entry_dx(handle, dir, res);
1410
1411	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1412		return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
1413					     res->dl_leaf_bh);
1414
1415	return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
1416				     res->dl_leaf_bh);
1417}
1418
1419/*
1420 * Check whether 'de' has enough room to hold an entry of
1421 * 'new_rec_len' bytes.
1422 */
1423static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
1424					 unsigned int new_rec_len)
1425{
1426	unsigned int de_really_used;
1427
1428	/* Check whether this is an empty record with enough space */
1429	if (le64_to_cpu(de->inode) == 0 &&
1430	    le16_to_cpu(de->rec_len) >= new_rec_len)
1431		return 1;
1432
1433	/*
1434	 * Record might have free space at the end which we can
1435	 * use.
1436	 */
1437	de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
1438	if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
1439	    return 1;
1440
1441	return 0;
1442}
1443
1444static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
1445					  struct ocfs2_dx_entry *dx_new_entry)
1446{
1447	int i;
1448
1449	i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
1450	dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
1451
1452	le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
1453}
1454
1455static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
1456				       struct ocfs2_dx_hinfo *hinfo,
1457				       u64 dirent_blk)
1458{
1459	int i;
1460	struct ocfs2_dx_entry *dx_entry;
1461
1462	i = le16_to_cpu(entry_list->de_num_used);
1463	dx_entry = &entry_list->de_entries[i];
1464
1465	memset(dx_entry, 0, sizeof(*dx_entry));
1466	dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
1467	dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
1468	dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
1469
1470	le16_add_cpu(&entry_list->de_num_used, 1);
1471}
1472
1473static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
1474				      struct ocfs2_dx_hinfo *hinfo,
1475				      u64 dirent_blk,
1476				      struct buffer_head *dx_leaf_bh)
1477{
1478	int ret;
1479	struct ocfs2_dx_leaf *dx_leaf;
1480
1481	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
1482				      OCFS2_JOURNAL_ACCESS_WRITE);
1483	if (ret) {
1484		mlog_errno(ret);
1485		goto out;
1486	}
1487
1488	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
1489	ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
1490	ocfs2_journal_dirty(handle, dx_leaf_bh);
1491
1492out:
1493	return ret;
1494}
1495
1496static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
1497					struct ocfs2_dx_hinfo *hinfo,
1498					u64 dirent_blk,
1499					struct ocfs2_dx_root_block *dx_root)
1500{
1501	ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
1502}
1503
1504static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
1505			       struct ocfs2_dir_lookup_result *lookup)
1506{
1507	int ret = 0;
1508	struct ocfs2_dx_root_block *dx_root;
1509	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1510
1511	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1512				      OCFS2_JOURNAL_ACCESS_WRITE);
1513	if (ret) {
1514		mlog_errno(ret);
1515		goto out;
1516	}
1517
1518	dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
1519	if (ocfs2_dx_root_inline(dx_root)) {
1520		ocfs2_dx_inline_root_insert(dir, handle,
1521					    &lookup->dl_hinfo,
1522					    lookup->dl_leaf_bh->b_blocknr,
1523					    dx_root);
1524	} else {
1525		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
1526						 lookup->dl_leaf_bh->b_blocknr,
1527						 lookup->dl_dx_leaf_bh);
1528		if (ret)
1529			goto out;
1530	}
1531
1532	le32_add_cpu(&dx_root->dr_num_entries, 1);
1533	ocfs2_journal_dirty(handle, dx_root_bh);
1534
1535out:
1536	return ret;
1537}
1538
1539static void ocfs2_remove_block_from_free_list(struct inode *dir,
1540				       handle_t *handle,
1541				       struct ocfs2_dir_lookup_result *lookup)
1542{
1543	struct ocfs2_dir_block_trailer *trailer, *prev;
1544	struct ocfs2_dx_root_block *dx_root;
1545	struct buffer_head *bh;
1546
1547	trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1548
1549	if (ocfs2_free_list_at_root(lookup)) {
1550		bh = lookup->dl_dx_root_bh;
1551		dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
1552		dx_root->dr_free_blk = trailer->db_free_next;
1553	} else {
1554		bh = lookup->dl_prev_leaf_bh;
1555		prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
1556		prev->db_free_next = trailer->db_free_next;
1557	}
1558
1559	trailer->db_free_rec_len = cpu_to_le16(0);
1560	trailer->db_free_next = cpu_to_le64(0);
1561
1562	ocfs2_journal_dirty(handle, bh);
1563	ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1564}
1565
1566/*
1567 * This expects that a journal write has been reserved on
1568 * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
1569 */
1570static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
1571				   struct ocfs2_dir_lookup_result *lookup)
1572{
1573	int max_rec_len;
1574	struct ocfs2_dir_block_trailer *trailer;
1575
1576	/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
1577	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
1578	if (max_rec_len) {
1579		/*
1580		 * There's still room in this block, so no need to remove it
1581		 * from the free list. In this case, we just want to update
1582		 * the rec len accounting.
1583		 */
1584		trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1585		trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1586		ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1587	} else {
1588		ocfs2_remove_block_from_free_list(dir, handle, lookup);
1589	}
1590}
1591
1592/* we don't always have a dentry for what we want to add, so people
1593 * like orphan dir can call this instead.
1594 *
1595 * The lookup context must have been filled from
1596 * ocfs2_prepare_dir_for_insert.
1597 */
1598int __ocfs2_add_entry(handle_t *handle,
1599		      struct inode *dir,
1600		      const char *name, int namelen,
1601		      struct inode *inode, u64 blkno,
1602		      struct buffer_head *parent_fe_bh,
1603		      struct ocfs2_dir_lookup_result *lookup)
1604{
1605	unsigned long offset;
1606	unsigned short rec_len;
1607	struct ocfs2_dir_entry *de, *de1;
1608	struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
1609	struct super_block *sb = dir->i_sb;
1610	int retval;
1611	unsigned int size = sb->s_blocksize;
1612	struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1613	char *data_start = insert_bh->b_data;
1614
1615	if (!namelen)
1616		return -EINVAL;
1617
1618	if (ocfs2_dir_indexed(dir)) {
1619		struct buffer_head *bh;
1620
1621		/*
1622		 * An indexed dir may require that we update the free space
1623		 * list. Reserve a write to the previous node in the list so
1624		 * that we don't fail later.
1625		 *
1626		 * XXX: This can be either a dx_root_block, or an unindexed
1627		 * directory tree leaf block.
1628		 */
1629		if (ocfs2_free_list_at_root(lookup)) {
1630			bh = lookup->dl_dx_root_bh;
1631			retval = ocfs2_journal_access_dr(handle,
1632						 INODE_CACHE(dir), bh,
1633						 OCFS2_JOURNAL_ACCESS_WRITE);
1634		} else {
1635			bh = lookup->dl_prev_leaf_bh;
1636			retval = ocfs2_journal_access_db(handle,
1637						 INODE_CACHE(dir), bh,
1638						 OCFS2_JOURNAL_ACCESS_WRITE);
1639		}
1640		if (retval) {
1641			mlog_errno(retval);
1642			return retval;
1643		}
1644	} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1645		data_start = di->id2.i_data.id_data;
1646		size = i_size_read(dir);
1647
1648		BUG_ON(insert_bh != parent_fe_bh);
1649	}
1650
1651	rec_len = OCFS2_DIR_REC_LEN(namelen);
1652	offset = 0;
1653	de = (struct ocfs2_dir_entry *) data_start;
1654	while (1) {
1655		BUG_ON((char *)de >= (size + data_start));
1656
1657		/* These checks should've already been passed by the
1658		 * prepare function, but I guess we can leave them
1659		 * here anyway. */
1660		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
1661			retval = -ENOENT;
1662			goto bail;
1663		}
1664		if (ocfs2_match(namelen, name, de)) {
1665			retval = -EEXIST;
1666			goto bail;
1667		}
1668
1669		/* We're guaranteed that we should have space, so we
1670		 * can't possibly have hit the trailer...right? */
1671		mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
1672				"Hit dir trailer trying to insert %.*s "
1673			        "(namelen %d) into directory %llu.  "
1674				"offset is %lu, trailer offset is %d\n",
1675				namelen, name, namelen,
1676				(unsigned long long)parent_fe_bh->b_blocknr,
1677				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
1678
1679		if (ocfs2_dirent_would_fit(de, rec_len)) {
1680			dir->i_mtime = dir->i_ctime = current_time(dir);
1681			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
1682			if (retval < 0) {
1683				mlog_errno(retval);
1684				goto bail;
1685			}
1686
1687			if (insert_bh == parent_fe_bh)
1688				retval = ocfs2_journal_access_di(handle,
1689								 INODE_CACHE(dir),
1690								 insert_bh,
1691								 OCFS2_JOURNAL_ACCESS_WRITE);
1692			else {
1693				retval = ocfs2_journal_access_db(handle,
1694								 INODE_CACHE(dir),
1695								 insert_bh,
1696					      OCFS2_JOURNAL_ACCESS_WRITE);
1697
1698				if (!retval && ocfs2_dir_indexed(dir))
1699					retval = ocfs2_dx_dir_insert(dir,
1700								handle,
1701								lookup);
1702			}
1703
1704			if (retval) {
1705				mlog_errno(retval);
1706				goto bail;
1707			}
1708
1709			/* By now the buffer is marked for journaling */
1710			offset += le16_to_cpu(de->rec_len);
1711			if (le64_to_cpu(de->inode)) {
1712				de1 = (struct ocfs2_dir_entry *)((char *) de +
1713					OCFS2_DIR_REC_LEN(de->name_len));
1714				de1->rec_len =
1715					cpu_to_le16(le16_to_cpu(de->rec_len) -
1716					OCFS2_DIR_REC_LEN(de->name_len));
1717				de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
1718				de = de1;
1719			}
1720			de->file_type = OCFS2_FT_UNKNOWN;
1721			if (blkno) {
1722				de->inode = cpu_to_le64(blkno);
1723				ocfs2_set_de_type(de, inode->i_mode);
1724			} else
1725				de->inode = 0;
1726			de->name_len = namelen;
1727			memcpy(de->name, name, namelen);
1728
1729			if (ocfs2_dir_indexed(dir))
1730				ocfs2_recalc_free_list(dir, handle, lookup);
1731
1732			dir->i_version++;
1733			ocfs2_journal_dirty(handle, insert_bh);
1734			retval = 0;
1735			goto bail;
1736		}
1737
1738		offset += le16_to_cpu(de->rec_len);
1739		de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
1740	}
1741
1742	/* when you think about it, the assert above should prevent us
1743	 * from ever getting here. */
1744	retval = -ENOSPC;
1745bail:
1746	if (retval)
1747		mlog_errno(retval);
1748
1749	return retval;
1750}
1751
1752static int ocfs2_dir_foreach_blk_id(struct inode *inode,
1753				    u64 *f_version,
1754				    struct dir_context *ctx)
1755{
1756	int ret, i;
1757	unsigned long offset = ctx->pos;
1758	struct buffer_head *di_bh = NULL;
1759	struct ocfs2_dinode *di;
1760	struct ocfs2_inline_data *data;
1761	struct ocfs2_dir_entry *de;
1762
1763	ret = ocfs2_read_inode_block(inode, &di_bh);
1764	if (ret) {
1765		mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
1766		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1767		goto out;
1768	}
1769
1770	di = (struct ocfs2_dinode *)di_bh->b_data;
1771	data = &di->id2.i_data;
1772
1773	while (ctx->pos < i_size_read(inode)) {
1774		/* If the dir block has changed since the last call to
1775		 * readdir(2), then we might be pointing to an invalid
1776		 * dirent right now.  Scan from the start of the block
1777		 * to make sure. */
1778		if (*f_version != inode->i_version) {
1779			for (i = 0; i < i_size_read(inode) && i < offset; ) {
1780				de = (struct ocfs2_dir_entry *)
1781					(data->id_data + i);
1782				/* It's too expensive to do a full
1783				 * dirent test each time round this
1784				 * loop, but we do have to test at
1785				 * least that it is non-zero.  A
1786				 * failure will be detected in the
1787				 * dirent test below. */
1788				if (le16_to_cpu(de->rec_len) <
1789				    OCFS2_DIR_REC_LEN(1))
1790					break;
1791				i += le16_to_cpu(de->rec_len);
1792			}
1793			ctx->pos = offset = i;
1794			*f_version = inode->i_version;
1795		}
1796
1797		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
1798		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
1799			/* On error, skip the f_pos to the end. */
1800			ctx->pos = i_size_read(inode);
1801			break;
1802		}
1803		offset += le16_to_cpu(de->rec_len);
1804		if (le64_to_cpu(de->inode)) {
1805			unsigned char d_type = DT_UNKNOWN;
1806
1807			if (de->file_type < OCFS2_FT_MAX)
1808				d_type = ocfs2_filetype_table[de->file_type];
1809
1810			if (!dir_emit(ctx, de->name, de->name_len,
1811				      le64_to_cpu(de->inode), d_type))
 
1812				goto out;
1813		}
1814		ctx->pos += le16_to_cpu(de->rec_len);
1815	}
1816out:
1817	brelse(di_bh);
1818	return 0;
1819}
1820
1821/*
1822 * NOTE: This function can be called against unindexed directories,
1823 * and indexed ones.
1824 */
1825static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1826				    u64 *f_version,
1827				    struct dir_context *ctx,
1828				    bool persist)
1829{
1830	unsigned long offset, blk, last_ra_blk = 0;
1831	int i;
1832	struct buffer_head * bh, * tmp;
1833	struct ocfs2_dir_entry * de;
1834	struct super_block * sb = inode->i_sb;
1835	unsigned int ra_sectors = 16;
1836	int stored = 0;
1837
1838	bh = NULL;
1839
1840	offset = ctx->pos & (sb->s_blocksize - 1);
1841
1842	while (ctx->pos < i_size_read(inode)) {
1843		blk = ctx->pos >> sb->s_blocksize_bits;
1844		if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
1845			/* Skip the corrupt dirblock and keep trying */
1846			ctx->pos += sb->s_blocksize - offset;
1847			continue;
1848		}
1849
1850		/* The idea here is to begin with 8k read-ahead and to stay
1851		 * 4k ahead of our current position.
1852		 *
1853		 * TODO: Use the pagecache for this. We just need to
1854		 * make sure it's cluster-safe... */
1855		if (!last_ra_blk
1856		    || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
1857			for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
1858			     i > 0; i--) {
1859				tmp = NULL;
1860				if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
1861							  OCFS2_BH_READAHEAD))
1862					brelse(tmp);
1863			}
1864			last_ra_blk = blk;
1865			ra_sectors = 8;
1866		}
1867
1868		/* If the dir block has changed since the last call to
1869		 * readdir(2), then we might be pointing to an invalid
1870		 * dirent right now.  Scan from the start of the block
1871		 * to make sure. */
1872		if (*f_version != inode->i_version) {
1873			for (i = 0; i < sb->s_blocksize && i < offset; ) {
1874				de = (struct ocfs2_dir_entry *) (bh->b_data + i);
1875				/* It's too expensive to do a full
1876				 * dirent test each time round this
1877				 * loop, but we do have to test at
1878				 * least that it is non-zero.  A
1879				 * failure will be detected in the
1880				 * dirent test below. */
1881				if (le16_to_cpu(de->rec_len) <
1882				    OCFS2_DIR_REC_LEN(1))
1883					break;
1884				i += le16_to_cpu(de->rec_len);
1885			}
1886			offset = i;
1887			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
1888				| offset;
1889			*f_version = inode->i_version;
1890		}
1891
1892		while (ctx->pos < i_size_read(inode)
1893		       && offset < sb->s_blocksize) {
1894			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
1895			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
1896				/* On error, skip the f_pos to the
1897				   next block. */
1898				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1899				brelse(bh);
1900				continue;
1901			}
1902			if (le64_to_cpu(de->inode)) {
1903				unsigned char d_type = DT_UNKNOWN;
1904
1905				if (de->file_type < OCFS2_FT_MAX)
1906					d_type = ocfs2_filetype_table[de->file_type];
1907				if (!dir_emit(ctx, de->name,
1908						de->name_len,
1909						le64_to_cpu(de->inode),
1910						d_type)) {
1911					brelse(bh);
1912					return 0;
1913				}
1914				stored++;
1915			}
1916			offset += le16_to_cpu(de->rec_len);
1917			ctx->pos += le16_to_cpu(de->rec_len);
1918		}
1919		offset = 0;
1920		brelse(bh);
1921		bh = NULL;
1922		if (!persist && stored)
1923			break;
1924	}
1925	return 0;
1926}
1927
1928static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
1929				 struct dir_context *ctx,
1930				 bool persist)
1931{
1932	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1933		return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
1934	return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
1935}
1936
1937/*
1938 * This is intended to be called from inside other kernel functions,
1939 * so we fake some arguments.
1940 */
1941int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
1942{
1943	u64 version = inode->i_version;
1944	ocfs2_dir_foreach_blk(inode, &version, ctx, true);
1945	return 0;
1946}
1947
1948/*
1949 * ocfs2_readdir()
1950 *
1951 */
1952int ocfs2_readdir(struct file *file, struct dir_context *ctx)
1953{
1954	int error = 0;
1955	struct inode *inode = file_inode(file);
1956	int lock_level = 0;
1957
1958	trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
1959
1960	error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level);
1961	if (lock_level && error >= 0) {
1962		/* We release EX lock which used to update atime
1963		 * and get PR lock again to reduce contention
1964		 * on commonly accessed directories. */
1965		ocfs2_inode_unlock(inode, 1);
1966		lock_level = 0;
1967		error = ocfs2_inode_lock(inode, NULL, 0);
1968	}
1969	if (error < 0) {
1970		if (error != -ENOENT)
1971			mlog_errno(error);
1972		/* we haven't got any yet, so propagate the error. */
1973		goto bail_nolock;
1974	}
1975
1976	error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
1977
1978	ocfs2_inode_unlock(inode, lock_level);
1979	if (error)
1980		mlog_errno(error);
1981
1982bail_nolock:
1983
1984	return error;
1985}
1986
1987/*
1988 * NOTE: this should always be called with parent dir i_mutex taken.
1989 */
1990int ocfs2_find_files_on_disk(const char *name,
1991			     int namelen,
1992			     u64 *blkno,
1993			     struct inode *inode,
1994			     struct ocfs2_dir_lookup_result *lookup)
1995{
1996	int status = -ENOENT;
1997
1998	trace_ocfs2_find_files_on_disk(namelen, name, blkno,
1999				(unsigned long long)OCFS2_I(inode)->ip_blkno);
2000
2001	status = ocfs2_find_entry(name, namelen, inode, lookup);
2002	if (status)
2003		goto leave;
2004
2005	*blkno = le64_to_cpu(lookup->dl_entry->inode);
2006
2007	status = 0;
2008leave:
2009
2010	return status;
2011}
2012
2013/*
2014 * Convenience function for callers which just want the block number
2015 * mapped to a name and don't require the full dirent info, etc.
2016 */
2017int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
2018			       int namelen, u64 *blkno)
2019{
2020	int ret;
2021	struct ocfs2_dir_lookup_result lookup = { NULL, };
2022
2023	ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
2024	ocfs2_free_dir_lookup_result(&lookup);
2025
2026	return ret;
2027}
2028
2029/* Check for a name within a directory.
2030 *
2031 * Return 0 if the name does not exist
2032 * Return -EEXIST if the directory contains the name
2033 *
2034 * Callers should have i_mutex + a cluster lock on dir
2035 */
2036int ocfs2_check_dir_for_entry(struct inode *dir,
2037			      const char *name,
2038			      int namelen)
2039{
2040	int ret = 0;
2041	struct ocfs2_dir_lookup_result lookup = { NULL, };
2042
2043	trace_ocfs2_check_dir_for_entry(
2044		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2045
2046	if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
2047		ret = -EEXIST;
2048		mlog_errno(ret);
2049	}
2050
2051	ocfs2_free_dir_lookup_result(&lookup);
2052
2053	return ret;
2054}
2055
2056struct ocfs2_empty_dir_priv {
2057	struct dir_context ctx;
2058	unsigned seen_dot;
2059	unsigned seen_dot_dot;
2060	unsigned seen_other;
2061	unsigned dx_dir;
2062};
2063static int ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
2064				   int name_len, loff_t pos, u64 ino,
2065				   unsigned type)
2066{
2067	struct ocfs2_empty_dir_priv *p =
2068		container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
2069
2070	/*
2071	 * Check the positions of "." and ".." records to be sure
2072	 * they're in the correct place.
2073	 *
2074	 * Indexed directories don't need to proceed past the first
2075	 * two entries, so we end the scan after seeing '..'. Despite
2076	 * that, we allow the scan to proceed In the event that we
2077	 * have a corrupted indexed directory (no dot or dot dot
2078	 * entries). This allows us to double check for existing
2079	 * entries which might not have been found in the index.
2080	 */
2081	if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
2082		p->seen_dot = 1;
2083		return 0;
2084	}
2085
2086	if (name_len == 2 && !strncmp("..", name, 2) &&
2087	    pos == OCFS2_DIR_REC_LEN(1)) {
2088		p->seen_dot_dot = 1;
2089
2090		if (p->dx_dir && p->seen_dot)
2091			return 1;
2092
2093		return 0;
2094	}
2095
2096	p->seen_other = 1;
2097	return 1;
2098}
2099
2100static int ocfs2_empty_dir_dx(struct inode *inode,
2101			      struct ocfs2_empty_dir_priv *priv)
2102{
2103	int ret;
2104	struct buffer_head *di_bh = NULL;
2105	struct buffer_head *dx_root_bh = NULL;
2106	struct ocfs2_dinode *di;
2107	struct ocfs2_dx_root_block *dx_root;
2108
2109	priv->dx_dir = 1;
2110
2111	ret = ocfs2_read_inode_block(inode, &di_bh);
2112	if (ret) {
2113		mlog_errno(ret);
2114		goto out;
2115	}
2116	di = (struct ocfs2_dinode *)di_bh->b_data;
2117
2118	ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
2119	if (ret) {
2120		mlog_errno(ret);
2121		goto out;
2122	}
2123	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2124
2125	if (le32_to_cpu(dx_root->dr_num_entries) != 2)
2126		priv->seen_other = 1;
2127
2128out:
2129	brelse(di_bh);
2130	brelse(dx_root_bh);
2131	return ret;
2132}
2133
2134/*
2135 * routine to check that the specified directory is empty (for rmdir)
2136 *
2137 * Returns 1 if dir is empty, zero otherwise.
2138 *
2139 * XXX: This is a performance problem for unindexed directories.
2140 */
2141int ocfs2_empty_dir(struct inode *inode)
2142{
2143	int ret;
2144	struct ocfs2_empty_dir_priv priv = {
2145		.ctx.actor = ocfs2_empty_dir_filldir,
2146	};
2147
2148	if (ocfs2_dir_indexed(inode)) {
2149		ret = ocfs2_empty_dir_dx(inode, &priv);
2150		if (ret)
2151			mlog_errno(ret);
2152		/*
2153		 * We still run ocfs2_dir_foreach to get the checks
2154		 * for "." and "..".
2155		 */
2156	}
2157
2158	ret = ocfs2_dir_foreach(inode, &priv.ctx);
2159	if (ret)
2160		mlog_errno(ret);
2161
2162	if (!priv.seen_dot || !priv.seen_dot_dot) {
2163		mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
2164		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
2165		/*
2166		 * XXX: Is it really safe to allow an unlink to continue?
2167		 */
2168		return 1;
2169	}
2170
2171	return !priv.seen_other;
2172}
2173
2174/*
2175 * Fills "." and ".." dirents in a new directory block. Returns dirent for
2176 * "..", which might be used during creation of a directory with a trailing
2177 * header. It is otherwise safe to ignore the return code.
2178 */
2179static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
2180							  struct inode *parent,
2181							  char *start,
2182							  unsigned int size)
2183{
2184	struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
2185
2186	de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
2187	de->name_len = 1;
2188	de->rec_len =
2189		cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
2190	strcpy(de->name, ".");
2191	ocfs2_set_de_type(de, S_IFDIR);
2192
2193	de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
2194	de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
2195	de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
2196	de->name_len = 2;
2197	strcpy(de->name, "..");
2198	ocfs2_set_de_type(de, S_IFDIR);
2199
2200	return de;
2201}
2202
2203/*
2204 * This works together with code in ocfs2_mknod_locked() which sets
2205 * the inline-data flag and initializes the inline-data section.
2206 */
2207static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
2208				 handle_t *handle,
2209				 struct inode *parent,
2210				 struct inode *inode,
2211				 struct buffer_head *di_bh)
2212{
2213	int ret;
2214	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2215	struct ocfs2_inline_data *data = &di->id2.i_data;
2216	unsigned int size = le16_to_cpu(data->id_count);
2217
2218	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2219				      OCFS2_JOURNAL_ACCESS_WRITE);
2220	if (ret) {
2221		mlog_errno(ret);
2222		goto out;
2223	}
2224
2225	ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
2226	ocfs2_journal_dirty(handle, di_bh);
2227
2228	i_size_write(inode, size);
2229	set_nlink(inode, 2);
2230	inode->i_blocks = ocfs2_inode_sector_count(inode);
2231
2232	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2233	if (ret < 0)
2234		mlog_errno(ret);
2235
2236out:
2237	return ret;
2238}
2239
2240static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2241				 handle_t *handle,
2242				 struct inode *parent,
2243				 struct inode *inode,
2244				 struct buffer_head *fe_bh,
2245				 struct ocfs2_alloc_context *data_ac,
2246				 struct buffer_head **ret_new_bh)
2247{
2248	int status;
2249	unsigned int size = osb->sb->s_blocksize;
2250	struct buffer_head *new_bh = NULL;
2251	struct ocfs2_dir_entry *de;
2252
2253	if (ocfs2_new_dir_wants_trailer(inode))
2254		size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2255
2256	status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
2257				     data_ac, NULL, &new_bh);
2258	if (status < 0) {
2259		mlog_errno(status);
2260		goto bail;
2261	}
2262
2263	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2264
2265	status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
2266					 OCFS2_JOURNAL_ACCESS_CREATE);
2267	if (status < 0) {
2268		mlog_errno(status);
2269		goto bail;
2270	}
2271	memset(new_bh->b_data, 0, osb->sb->s_blocksize);
2272
2273	de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
2274	if (ocfs2_new_dir_wants_trailer(inode)) {
2275		int size = le16_to_cpu(de->rec_len);
2276
2277		/*
2278		 * Figure out the size of the hole left over after
2279		 * insertion of '.' and '..'. The trailer wants this
2280		 * information.
2281		 */
2282		size -= OCFS2_DIR_REC_LEN(2);
2283		size -= sizeof(struct ocfs2_dir_block_trailer);
2284
2285		ocfs2_init_dir_trailer(inode, new_bh, size);
2286	}
2287
2288	ocfs2_journal_dirty(handle, new_bh);
2289
2290	i_size_write(inode, inode->i_sb->s_blocksize);
2291	set_nlink(inode, 2);
2292	inode->i_blocks = ocfs2_inode_sector_count(inode);
2293	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
2294	if (status < 0) {
2295		mlog_errno(status);
2296		goto bail;
2297	}
2298
2299	status = 0;
2300	if (ret_new_bh) {
2301		*ret_new_bh = new_bh;
2302		new_bh = NULL;
2303	}
2304bail:
2305	brelse(new_bh);
2306
2307	return status;
2308}
2309
2310static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2311				     handle_t *handle, struct inode *dir,
2312				     struct buffer_head *di_bh,
2313				     struct buffer_head *dirdata_bh,
2314				     struct ocfs2_alloc_context *meta_ac,
2315				     int dx_inline, u32 num_entries,
2316				     struct buffer_head **ret_dx_root_bh)
2317{
2318	int ret;
2319	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2320	u16 dr_suballoc_bit;
2321	u64 suballoc_loc, dr_blkno;
2322	unsigned int num_bits;
2323	struct buffer_head *dx_root_bh = NULL;
2324	struct ocfs2_dx_root_block *dx_root;
2325	struct ocfs2_dir_block_trailer *trailer =
2326		ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
2327
2328	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
2329				   &dr_suballoc_bit, &num_bits, &dr_blkno);
2330	if (ret) {
2331		mlog_errno(ret);
2332		goto out;
2333	}
2334
2335	trace_ocfs2_dx_dir_attach_index(
2336				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2337				(unsigned long long)dr_blkno);
2338
2339	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2340	if (dx_root_bh == NULL) {
2341		ret = -ENOMEM;
2342		goto out;
2343	}
2344	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
2345
2346	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
2347				      OCFS2_JOURNAL_ACCESS_CREATE);
2348	if (ret < 0) {
2349		mlog_errno(ret);
2350		goto out;
2351	}
2352
2353	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2354	memset(dx_root, 0, osb->sb->s_blocksize);
2355	strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2356	dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2357	dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
2358	dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2359	dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2360	dx_root->dr_blkno = cpu_to_le64(dr_blkno);
2361	dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
2362	dx_root->dr_num_entries = cpu_to_le32(num_entries);
2363	if (le16_to_cpu(trailer->db_free_rec_len))
2364		dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
2365	else
2366		dx_root->dr_free_blk = cpu_to_le64(0);
2367
2368	if (dx_inline) {
2369		dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
2370		dx_root->dr_entries.de_count =
2371			cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
2372	} else {
2373		dx_root->dr_list.l_count =
2374			cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
2375	}
2376	ocfs2_journal_dirty(handle, dx_root_bh);
2377
2378	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2379				      OCFS2_JOURNAL_ACCESS_CREATE);
2380	if (ret) {
2381		mlog_errno(ret);
2382		goto out;
2383	}
2384
2385	di->i_dx_root = cpu_to_le64(dr_blkno);
2386
2387	spin_lock(&OCFS2_I(dir)->ip_lock);
2388	OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2389	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2390	spin_unlock(&OCFS2_I(dir)->ip_lock);
2391
2392	ocfs2_journal_dirty(handle, di_bh);
2393
2394	*ret_dx_root_bh = dx_root_bh;
2395	dx_root_bh = NULL;
2396
2397out:
2398	brelse(dx_root_bh);
2399	return ret;
2400}
2401
2402static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2403				       handle_t *handle, struct inode *dir,
2404				       struct buffer_head **dx_leaves,
2405				       int num_dx_leaves, u64 start_blk)
2406{
2407	int ret, i;
2408	struct ocfs2_dx_leaf *dx_leaf;
2409	struct buffer_head *bh;
2410
2411	for (i = 0; i < num_dx_leaves; i++) {
2412		bh = sb_getblk(osb->sb, start_blk + i);
2413		if (bh == NULL) {
2414			ret = -ENOMEM;
2415			goto out;
2416		}
2417		dx_leaves[i] = bh;
2418
2419		ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
2420
2421		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
2422					      OCFS2_JOURNAL_ACCESS_CREATE);
2423		if (ret < 0) {
2424			mlog_errno(ret);
2425			goto out;
2426		}
2427
2428		dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
2429
2430		memset(dx_leaf, 0, osb->sb->s_blocksize);
2431		strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
2432		dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
2433		dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
2434		dx_leaf->dl_list.de_count =
2435			cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2436
2437		trace_ocfs2_dx_dir_format_cluster(
2438				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2439				(unsigned long long)bh->b_blocknr,
2440				le16_to_cpu(dx_leaf->dl_list.de_count));
2441
2442		ocfs2_journal_dirty(handle, bh);
2443	}
2444
2445	ret = 0;
2446out:
2447	return ret;
2448}
2449
2450/*
2451 * Allocates and formats a new cluster for use in an indexed dir
2452 * leaf. This version will not do the extent insert, so that it can be
2453 * used by operations which need careful ordering.
2454 */
2455static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
2456				      u32 cpos, handle_t *handle,
2457				      struct ocfs2_alloc_context *data_ac,
2458				      struct buffer_head **dx_leaves,
2459				      int num_dx_leaves, u64 *ret_phys_blkno)
2460{
2461	int ret;
2462	u32 phys, num;
2463	u64 phys_blkno;
2464	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2465
2466	/*
2467	 * XXX: For create, this should claim cluster for the index
2468	 * *before* the unindexed insert so that we have a better
2469	 * chance of contiguousness as the directory grows in number
2470	 * of entries.
2471	 */
2472	ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
2473	if (ret) {
2474		mlog_errno(ret);
2475		goto out;
2476	}
2477
2478	/*
2479	 * Format the new cluster first. That way, we're inserting
2480	 * valid data.
2481	 */
2482	phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
2483	ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
2484					  num_dx_leaves, phys_blkno);
2485	if (ret) {
2486		mlog_errno(ret);
2487		goto out;
2488	}
2489
2490	*ret_phys_blkno = phys_blkno;
2491out:
2492	return ret;
2493}
2494
2495static int ocfs2_dx_dir_new_cluster(struct inode *dir,
2496				    struct ocfs2_extent_tree *et,
2497				    u32 cpos, handle_t *handle,
2498				    struct ocfs2_alloc_context *data_ac,
2499				    struct ocfs2_alloc_context *meta_ac,
2500				    struct buffer_head **dx_leaves,
2501				    int num_dx_leaves)
2502{
2503	int ret;
2504	u64 phys_blkno;
2505
2506	ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
2507					 num_dx_leaves, &phys_blkno);
2508	if (ret) {
2509		mlog_errno(ret);
2510		goto out;
2511	}
2512
2513	ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
2514				  meta_ac);
2515	if (ret)
2516		mlog_errno(ret);
2517out:
2518	return ret;
2519}
2520
2521static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
2522							int *ret_num_leaves)
2523{
2524	int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
2525	struct buffer_head **dx_leaves;
2526
2527	dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
2528			    GFP_NOFS);
2529	if (dx_leaves && ret_num_leaves)
2530		*ret_num_leaves = num_dx_leaves;
2531
2532	return dx_leaves;
2533}
2534
2535static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
2536				 handle_t *handle,
2537				 struct inode *parent,
2538				 struct inode *inode,
2539				 struct buffer_head *di_bh,
2540				 struct ocfs2_alloc_context *data_ac,
2541				 struct ocfs2_alloc_context *meta_ac)
2542{
2543	int ret;
2544	struct buffer_head *leaf_bh = NULL;
2545	struct buffer_head *dx_root_bh = NULL;
2546	struct ocfs2_dx_hinfo hinfo;
2547	struct ocfs2_dx_root_block *dx_root;
2548	struct ocfs2_dx_entry_list *entry_list;
2549
2550	/*
2551	 * Our strategy is to create the directory as though it were
2552	 * unindexed, then add the index block. This works with very
2553	 * little complication since the state of a new directory is a
2554	 * very well known quantity.
2555	 *
2556	 * Essentially, we have two dirents ("." and ".."), in the 1st
2557	 * block which need indexing. These are easily inserted into
2558	 * the index block.
2559	 */
2560
2561	ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
2562				    data_ac, &leaf_bh);
2563	if (ret) {
2564		mlog_errno(ret);
2565		goto out;
2566	}
2567
2568	ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
2569					meta_ac, 1, 2, &dx_root_bh);
2570	if (ret) {
2571		mlog_errno(ret);
2572		goto out;
2573	}
2574	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2575	entry_list = &dx_root->dr_entries;
2576
2577	/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
2578	ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
2579	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2580
2581	ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
2582	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2583
2584out:
2585	brelse(dx_root_bh);
2586	brelse(leaf_bh);
2587	return ret;
2588}
2589
2590int ocfs2_fill_new_dir(struct ocfs2_super *osb,
2591		       handle_t *handle,
2592		       struct inode *parent,
2593		       struct inode *inode,
2594		       struct buffer_head *fe_bh,
2595		       struct ocfs2_alloc_context *data_ac,
2596		       struct ocfs2_alloc_context *meta_ac)
2597
2598{
2599	BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
2600
2601	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2602		return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
2603
2604	if (ocfs2_supports_indexed_dirs(osb))
2605		return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
2606					     data_ac, meta_ac);
2607
2608	return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
2609				     data_ac, NULL);
2610}
2611
2612static int ocfs2_dx_dir_index_block(struct inode *dir,
2613				    handle_t *handle,
2614				    struct buffer_head **dx_leaves,
2615				    int num_dx_leaves,
2616				    u32 *num_dx_entries,
2617				    struct buffer_head *dirent_bh)
2618{
2619	int ret = 0, namelen, i;
2620	char *de_buf, *limit;
2621	struct ocfs2_dir_entry *de;
2622	struct buffer_head *dx_leaf_bh;
2623	struct ocfs2_dx_hinfo hinfo;
2624	u64 dirent_blk = dirent_bh->b_blocknr;
2625
2626	de_buf = dirent_bh->b_data;
2627	limit = de_buf + dir->i_sb->s_blocksize;
2628
2629	while (de_buf < limit) {
2630		de = (struct ocfs2_dir_entry *)de_buf;
2631
2632		namelen = de->name_len;
2633		if (!namelen || !de->inode)
2634			goto inc;
2635
2636		ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
2637
2638		i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
2639		dx_leaf_bh = dx_leaves[i];
2640
2641		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
2642						 dirent_blk, dx_leaf_bh);
2643		if (ret) {
2644			mlog_errno(ret);
2645			goto out;
2646		}
2647
2648		*num_dx_entries = *num_dx_entries + 1;
2649
2650inc:
2651		de_buf += le16_to_cpu(de->rec_len);
2652	}
2653
2654out:
2655	return ret;
2656}
2657
2658/*
2659 * XXX: This expects dx_root_bh to already be part of the transaction.
2660 */
2661static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2662					 struct buffer_head *dx_root_bh,
2663					 struct buffer_head *dirent_bh)
2664{
2665	char *de_buf, *limit;
2666	struct ocfs2_dx_root_block *dx_root;
2667	struct ocfs2_dir_entry *de;
2668	struct ocfs2_dx_hinfo hinfo;
2669	u64 dirent_blk = dirent_bh->b_blocknr;
2670
2671	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2672
2673	de_buf = dirent_bh->b_data;
2674	limit = de_buf + dir->i_sb->s_blocksize;
2675
2676	while (de_buf < limit) {
2677		de = (struct ocfs2_dir_entry *)de_buf;
2678
2679		if (!de->name_len || !de->inode)
2680			goto inc;
2681
2682		ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2683
2684		trace_ocfs2_dx_dir_index_root_block(
2685				(unsigned long long)dir->i_ino,
2686				hinfo.major_hash, hinfo.minor_hash,
2687				de->name_len, de->name,
2688				le16_to_cpu(dx_root->dr_entries.de_num_used));
2689
2690		ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2691					   dirent_blk);
2692
2693		le32_add_cpu(&dx_root->dr_num_entries, 1);
2694inc:
2695		de_buf += le16_to_cpu(de->rec_len);
2696	}
2697}
2698
2699/*
2700 * Count the number of inline directory entries in di_bh and compare
2701 * them against the number of entries we can hold in an inline dx root
2702 * block.
2703 */
2704static int ocfs2_new_dx_should_be_inline(struct inode *dir,
2705					 struct buffer_head *di_bh)
2706{
2707	int dirent_count = 0;
2708	char *de_buf, *limit;
2709	struct ocfs2_dir_entry *de;
2710	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2711
2712	de_buf = di->id2.i_data.id_data;
2713	limit = de_buf + i_size_read(dir);
2714
2715	while (de_buf < limit) {
2716		de = (struct ocfs2_dir_entry *)de_buf;
2717
2718		if (de->name_len && de->inode)
2719			dirent_count++;
2720
2721		de_buf += le16_to_cpu(de->rec_len);
2722	}
2723
2724	/* We are careful to leave room for one extra record. */
2725	return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
2726}
2727
2728/*
2729 * Expand rec_len of the rightmost dirent in a directory block so that it
2730 * contains the end of our valid space for dirents. We do this during
2731 * expansion from an inline directory to one with extents. The first dir block
2732 * in that case is taken from the inline data portion of the inode block.
2733 *
2734 * This will also return the largest amount of contiguous space for a dirent
2735 * in the block. That value is *not* necessarily the last dirent, even after
2736 * expansion. The directory indexing code wants this value for free space
2737 * accounting. We do this here since we're already walking the entire dir
2738 * block.
2739 *
2740 * We add the dir trailer if this filesystem wants it.
2741 */
2742static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
2743					     struct inode *dir)
2744{
2745	struct super_block *sb = dir->i_sb;
2746	struct ocfs2_dir_entry *de;
2747	struct ocfs2_dir_entry *prev_de;
2748	char *de_buf, *limit;
2749	unsigned int new_size = sb->s_blocksize;
2750	unsigned int bytes, this_hole;
2751	unsigned int largest_hole = 0;
2752
2753	if (ocfs2_new_dir_wants_trailer(dir))
2754		new_size = ocfs2_dir_trailer_blk_off(sb);
2755
2756	bytes = new_size - old_size;
2757
2758	limit = start + old_size;
2759	de_buf = start;
2760	de = (struct ocfs2_dir_entry *)de_buf;
2761	do {
2762		this_hole = ocfs2_figure_dirent_hole(de);
2763		if (this_hole > largest_hole)
2764			largest_hole = this_hole;
2765
2766		prev_de = de;
2767		de_buf += le16_to_cpu(de->rec_len);
2768		de = (struct ocfs2_dir_entry *)de_buf;
2769	} while (de_buf < limit);
2770
2771	le16_add_cpu(&prev_de->rec_len, bytes);
2772
2773	/* We need to double check this after modification of the final
2774	 * dirent. */
2775	this_hole = ocfs2_figure_dirent_hole(prev_de);
2776	if (this_hole > largest_hole)
2777		largest_hole = this_hole;
2778
2779	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
2780		return largest_hole;
2781	return 0;
2782}
2783
2784/*
2785 * We allocate enough clusters to fulfill "blocks_wanted", but set
2786 * i_size to exactly one block. Ocfs2_extend_dir() will handle the
2787 * rest automatically for us.
2788 *
2789 * *first_block_bh is a pointer to the 1st data block allocated to the
2790 *  directory.
2791 */
2792static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2793				   unsigned int blocks_wanted,
2794				   struct ocfs2_dir_lookup_result *lookup,
2795				   struct buffer_head **first_block_bh)
2796{
2797	u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
2798	struct super_block *sb = dir->i_sb;
2799	int ret, i, num_dx_leaves = 0, dx_inline = 0,
2800		credits = ocfs2_inline_to_extents_credits(sb);
2801	u64 dx_insert_blkno, blkno,
2802		bytes = blocks_wanted << sb->s_blocksize_bits;
2803	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2804	struct ocfs2_inode_info *oi = OCFS2_I(dir);
2805	struct ocfs2_alloc_context *data_ac = NULL;
2806	struct ocfs2_alloc_context *meta_ac = NULL;
2807	struct buffer_head *dirdata_bh = NULL;
2808	struct buffer_head *dx_root_bh = NULL;
2809	struct buffer_head **dx_leaves = NULL;
2810	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2811	handle_t *handle;
2812	struct ocfs2_extent_tree et;
2813	struct ocfs2_extent_tree dx_et;
2814	int did_quota = 0, bytes_allocated = 0;
2815
2816	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
2817
2818	alloc = ocfs2_clusters_for_bytes(sb, bytes);
2819	dx_alloc = 0;
2820
2821	down_write(&oi->ip_alloc_sem);
2822
2823	if (ocfs2_supports_indexed_dirs(osb)) {
2824		credits += ocfs2_add_dir_index_credits(sb);
2825
2826		dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
2827		if (!dx_inline) {
2828			/* Add one more cluster for an index leaf */
2829			dx_alloc++;
2830			dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
2831								&num_dx_leaves);
2832			if (!dx_leaves) {
2833				ret = -ENOMEM;
2834				mlog_errno(ret);
2835				goto out;
2836			}
2837		}
2838
2839		/* This gets us the dx_root */
2840		ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
2841		if (ret) {
2842			mlog_errno(ret);
2843			goto out;
2844		}
2845	}
2846
2847	/*
2848	 * We should never need more than 2 clusters for the unindexed
2849	 * tree - maximum dirent size is far less than one block. In
2850	 * fact, the only time we'd need more than one cluster is if
2851	 * blocksize == clustersize and the dirent won't fit in the
2852	 * extra space that the expansion to a single block gives. As
2853	 * of today, that only happens on 4k/4k file systems.
2854	 */
2855	BUG_ON(alloc > 2);
2856
2857	ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
2858	if (ret) {
2859		mlog_errno(ret);
2860		goto out;
2861	}
2862
2863	/*
2864	 * Prepare for worst case allocation scenario of two separate
2865	 * extents in the unindexed tree.
2866	 */
2867	if (alloc == 2)
2868		credits += OCFS2_SUBALLOC_ALLOC;
2869
2870	handle = ocfs2_start_trans(osb, credits);
2871	if (IS_ERR(handle)) {
2872		ret = PTR_ERR(handle);
2873		mlog_errno(ret);
2874		goto out;
2875	}
2876
2877	ret = dquot_alloc_space_nodirty(dir,
2878		ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
2879	if (ret)
2880		goto out_commit;
2881	did_quota = 1;
2882
2883	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2884		/*
2885		 * Allocate our index cluster first, to maximize the
2886		 * possibility that unindexed leaves grow
2887		 * contiguously.
2888		 */
2889		ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
2890						 dx_leaves, num_dx_leaves,
2891						 &dx_insert_blkno);
2892		if (ret) {
2893			mlog_errno(ret);
2894			goto out_commit;
2895		}
2896		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2897	}
2898
2899	/*
2900	 * Try to claim as many clusters as the bitmap can give though
2901	 * if we only get one now, that's enough to continue. The rest
2902	 * will be claimed after the conversion to extents.
2903	 */
2904	if (ocfs2_dir_resv_allowed(osb))
2905		data_ac->ac_resv = &oi->ip_la_data_resv;
2906	ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
2907	if (ret) {
2908		mlog_errno(ret);
2909		goto out_commit;
2910	}
2911	bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2912
2913	/*
2914	 * Operations are carefully ordered so that we set up the new
2915	 * data block first. The conversion from inline data to
2916	 * extents follows.
2917	 */
2918	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2919	dirdata_bh = sb_getblk(sb, blkno);
2920	if (!dirdata_bh) {
2921		ret = -ENOMEM;
2922		mlog_errno(ret);
2923		goto out_commit;
2924	}
2925
2926	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
2927
2928	ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
2929				      OCFS2_JOURNAL_ACCESS_CREATE);
2930	if (ret) {
2931		mlog_errno(ret);
2932		goto out_commit;
2933	}
2934
2935	memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
2936	memset(dirdata_bh->b_data + i_size_read(dir), 0,
2937	       sb->s_blocksize - i_size_read(dir));
2938	i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
2939	if (ocfs2_new_dir_wants_trailer(dir)) {
2940		/*
2941		 * Prepare the dir trailer up front. It will otherwise look
2942		 * like a valid dirent. Even if inserting the index fails
2943		 * (unlikely), then all we'll have done is given first dir
2944		 * block a small amount of fragmentation.
2945		 */
2946		ocfs2_init_dir_trailer(dir, dirdata_bh, i);
2947	}
2948
2949	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2950	ocfs2_journal_dirty(handle, dirdata_bh);
2951
2952	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2953		/*
2954		 * Dx dirs with an external cluster need to do this up
2955		 * front. Inline dx root's get handled later, after
2956		 * we've allocated our root block. We get passed back
2957		 * a total number of items so that dr_num_entries can
2958		 * be correctly set once the dx_root has been
2959		 * allocated.
2960		 */
2961		ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
2962					       num_dx_leaves, &num_dx_entries,
2963					       dirdata_bh);
2964		if (ret) {
2965			mlog_errno(ret);
2966			goto out_commit;
2967		}
2968	}
2969
2970	/*
2971	 * Set extent, i_size, etc on the directory. After this, the
2972	 * inode should contain the same exact dirents as before and
2973	 * be fully accessible from system calls.
2974	 *
2975	 * We let the later dirent insert modify c/mtime - to the user
2976	 * the data hasn't changed.
2977	 */
2978	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2979				      OCFS2_JOURNAL_ACCESS_CREATE);
2980	if (ret) {
2981		mlog_errno(ret);
2982		goto out_commit;
2983	}
2984
2985	spin_lock(&oi->ip_lock);
2986	oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
2987	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2988	spin_unlock(&oi->ip_lock);
2989
2990	ocfs2_dinode_new_extent_list(dir, di);
2991
2992	i_size_write(dir, sb->s_blocksize);
2993	dir->i_mtime = dir->i_ctime = current_time(dir);
2994
2995	di->i_size = cpu_to_le64(sb->s_blocksize);
2996	di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
2997	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
2998	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2999
3000	/*
3001	 * This should never fail as our extent list is empty and all
3002	 * related blocks have been journaled already.
3003	 */
3004	ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
3005				  0, NULL);
3006	if (ret) {
3007		mlog_errno(ret);
3008		goto out_commit;
3009	}
3010
3011	/*
3012	 * Set i_blocks after the extent insert for the most up to
3013	 * date ip_clusters value.
3014	 */
3015	dir->i_blocks = ocfs2_inode_sector_count(dir);
3016
3017	ocfs2_journal_dirty(handle, di_bh);
3018
3019	if (ocfs2_supports_indexed_dirs(osb)) {
3020		ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
3021						dirdata_bh, meta_ac, dx_inline,
3022						num_dx_entries, &dx_root_bh);
3023		if (ret) {
3024			mlog_errno(ret);
3025			goto out_commit;
3026		}
3027
3028		if (dx_inline) {
3029			ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
3030						      dirdata_bh);
3031		} else {
3032			ocfs2_init_dx_root_extent_tree(&dx_et,
3033						       INODE_CACHE(dir),
3034						       dx_root_bh);
3035			ret = ocfs2_insert_extent(handle, &dx_et, 0,
3036						  dx_insert_blkno, 1, 0, NULL);
3037			if (ret)
3038				mlog_errno(ret);
3039		}
3040	}
3041
3042	/*
3043	 * We asked for two clusters, but only got one in the 1st
3044	 * pass. Claim the 2nd cluster as a separate extent.
3045	 */
3046	if (alloc > len) {
3047		ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
3048					   &len);
3049		if (ret) {
3050			mlog_errno(ret);
3051			goto out_commit;
3052		}
3053		blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
3054
3055		ret = ocfs2_insert_extent(handle, &et, 1,
3056					  blkno, len, 0, NULL);
3057		if (ret) {
3058			mlog_errno(ret);
3059			goto out_commit;
3060		}
3061		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
3062	}
3063
3064	*first_block_bh = dirdata_bh;
3065	dirdata_bh = NULL;
3066	if (ocfs2_supports_indexed_dirs(osb)) {
3067		unsigned int off;
3068
3069		if (!dx_inline) {
3070			/*
3071			 * We need to return the correct block within the
3072			 * cluster which should hold our entry.
3073			 */
3074			off = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb),
3075						    &lookup->dl_hinfo);
3076			get_bh(dx_leaves[off]);
3077			lookup->dl_dx_leaf_bh = dx_leaves[off];
3078		}
3079		lookup->dl_dx_root_bh = dx_root_bh;
3080		dx_root_bh = NULL;
3081	}
3082
3083out_commit:
3084	if (ret < 0 && did_quota)
3085		dquot_free_space_nodirty(dir, bytes_allocated);
3086
3087	ocfs2_commit_trans(osb, handle);
3088
3089out:
3090	up_write(&oi->ip_alloc_sem);
3091	if (data_ac)
3092		ocfs2_free_alloc_context(data_ac);
3093	if (meta_ac)
3094		ocfs2_free_alloc_context(meta_ac);
3095
3096	if (dx_leaves) {
3097		for (i = 0; i < num_dx_leaves; i++)
3098			brelse(dx_leaves[i]);
3099		kfree(dx_leaves);
3100	}
3101
3102	brelse(dirdata_bh);
3103	brelse(dx_root_bh);
3104
3105	return ret;
3106}
3107
3108/* returns a bh of the 1st new block in the allocation. */
3109static int ocfs2_do_extend_dir(struct super_block *sb,
3110			       handle_t *handle,
3111			       struct inode *dir,
3112			       struct buffer_head *parent_fe_bh,
3113			       struct ocfs2_alloc_context *data_ac,
3114			       struct ocfs2_alloc_context *meta_ac,
3115			       struct buffer_head **new_bh)
3116{
3117	int status;
3118	int extend, did_quota = 0;
3119	u64 p_blkno, v_blkno;
3120
3121	spin_lock(&OCFS2_I(dir)->ip_lock);
3122	extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
3123	spin_unlock(&OCFS2_I(dir)->ip_lock);
3124
3125	if (extend) {
3126		u32 offset = OCFS2_I(dir)->ip_clusters;
3127
3128		status = dquot_alloc_space_nodirty(dir,
3129					ocfs2_clusters_to_bytes(sb, 1));
3130		if (status)
3131			goto bail;
3132		did_quota = 1;
3133
3134		status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
3135					      1, 0, parent_fe_bh, handle,
3136					      data_ac, meta_ac, NULL);
3137		BUG_ON(status == -EAGAIN);
3138		if (status < 0) {
3139			mlog_errno(status);
3140			goto bail;
3141		}
3142	}
3143
3144	v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
3145	status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
3146	if (status < 0) {
3147		mlog_errno(status);
3148		goto bail;
3149	}
3150
3151	*new_bh = sb_getblk(sb, p_blkno);
3152	if (!*new_bh) {
3153		status = -ENOMEM;
3154		mlog_errno(status);
3155		goto bail;
3156	}
3157	status = 0;
3158bail:
3159	if (did_quota && status < 0)
3160		dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3161	return status;
3162}
3163
3164/*
3165 * Assumes you already have a cluster lock on the directory.
3166 *
3167 * 'blocks_wanted' is only used if we have an inline directory which
3168 * is to be turned into an extent based one. The size of the dirent to
3169 * insert might be larger than the space gained by growing to just one
3170 * block, so we may have to grow the inode by two blocks in that case.
3171 *
3172 * If the directory is already indexed, dx_root_bh must be provided.
3173 */
3174static int ocfs2_extend_dir(struct ocfs2_super *osb,
3175			    struct inode *dir,
3176			    struct buffer_head *parent_fe_bh,
3177			    unsigned int blocks_wanted,
3178			    struct ocfs2_dir_lookup_result *lookup,
3179			    struct buffer_head **new_de_bh)
3180{
3181	int status = 0;
3182	int credits, num_free_extents, drop_alloc_sem = 0;
3183	loff_t dir_i_size;
3184	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
3185	struct ocfs2_extent_list *el = &fe->id2.i_list;
3186	struct ocfs2_alloc_context *data_ac = NULL;
3187	struct ocfs2_alloc_context *meta_ac = NULL;
3188	handle_t *handle = NULL;
3189	struct buffer_head *new_bh = NULL;
3190	struct ocfs2_dir_entry * de;
3191	struct super_block *sb = osb->sb;
3192	struct ocfs2_extent_tree et;
3193	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3194
3195	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3196		/*
3197		 * This would be a code error as an inline directory should
3198		 * never have an index root.
3199		 */
3200		BUG_ON(dx_root_bh);
3201
3202		status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
3203						 blocks_wanted, lookup,
3204						 &new_bh);
3205		if (status) {
3206			mlog_errno(status);
3207			goto bail;
3208		}
3209
3210		/* Expansion from inline to an indexed directory will
3211		 * have given us this. */
3212		dx_root_bh = lookup->dl_dx_root_bh;
3213
3214		if (blocks_wanted == 1) {
3215			/*
3216			 * If the new dirent will fit inside the space
3217			 * created by pushing out to one block, then
3218			 * we can complete the operation
3219			 * here. Otherwise we have to expand i_size
3220			 * and format the 2nd block below.
3221			 */
3222			BUG_ON(new_bh == NULL);
3223			goto bail_bh;
3224		}
3225
3226		/*
3227		 * Get rid of 'new_bh' - we want to format the 2nd
3228		 * data block and return that instead.
3229		 */
3230		brelse(new_bh);
3231		new_bh = NULL;
3232
3233		down_write(&OCFS2_I(dir)->ip_alloc_sem);
3234		drop_alloc_sem = 1;
3235		dir_i_size = i_size_read(dir);
3236		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3237		goto do_extend;
3238	}
3239
3240	down_write(&OCFS2_I(dir)->ip_alloc_sem);
3241	drop_alloc_sem = 1;
3242	dir_i_size = i_size_read(dir);
3243	trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3244			       dir_i_size);
3245
3246	/* dir->i_size is always block aligned. */
3247	spin_lock(&OCFS2_I(dir)->ip_lock);
3248	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
3249		spin_unlock(&OCFS2_I(dir)->ip_lock);
3250		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
3251					      parent_fe_bh);
3252		num_free_extents = ocfs2_num_free_extents(osb, &et);
3253		if (num_free_extents < 0) {
3254			status = num_free_extents;
3255			mlog_errno(status);
3256			goto bail;
3257		}
3258
3259		if (!num_free_extents) {
3260			status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
3261			if (status < 0) {
3262				if (status != -ENOSPC)
3263					mlog_errno(status);
3264				goto bail;
3265			}
3266		}
3267
3268		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
3269		if (status < 0) {
3270			if (status != -ENOSPC)
3271				mlog_errno(status);
3272			goto bail;
3273		}
3274
3275		if (ocfs2_dir_resv_allowed(osb))
3276			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3277
3278		credits = ocfs2_calc_extend_credits(sb, el);
3279	} else {
3280		spin_unlock(&OCFS2_I(dir)->ip_lock);
3281		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3282	}
3283
3284do_extend:
3285	if (ocfs2_dir_indexed(dir))
3286		credits++; /* For attaching the new dirent block to the
3287			    * dx_root */
3288
3289	handle = ocfs2_start_trans(osb, credits);
3290	if (IS_ERR(handle)) {
3291		status = PTR_ERR(handle);
3292		handle = NULL;
3293		mlog_errno(status);
3294		goto bail;
3295	}
3296
3297	status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
3298				     data_ac, meta_ac, &new_bh);
3299	if (status < 0) {
3300		mlog_errno(status);
3301		goto bail;
3302	}
3303
3304	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
3305
3306	status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
3307					 OCFS2_JOURNAL_ACCESS_CREATE);
3308	if (status < 0) {
3309		mlog_errno(status);
3310		goto bail;
3311	}
3312	memset(new_bh->b_data, 0, sb->s_blocksize);
3313
3314	de = (struct ocfs2_dir_entry *) new_bh->b_data;
3315	de->inode = 0;
3316	if (ocfs2_supports_dir_trailer(dir)) {
3317		de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
3318
3319		ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
3320
3321		if (ocfs2_dir_indexed(dir)) {
3322			status = ocfs2_dx_dir_link_trailer(dir, handle,
3323							   dx_root_bh, new_bh);
3324			if (status) {
3325				mlog_errno(status);
3326				goto bail;
3327			}
3328		}
3329	} else {
3330		de->rec_len = cpu_to_le16(sb->s_blocksize);
3331	}
3332	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3333	ocfs2_journal_dirty(handle, new_bh);
3334
3335	dir_i_size += dir->i_sb->s_blocksize;
3336	i_size_write(dir, dir_i_size);
3337	dir->i_blocks = ocfs2_inode_sector_count(dir);
3338	status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
3339	if (status < 0) {
3340		mlog_errno(status);
3341		goto bail;
3342	}
3343
3344bail_bh:
3345	*new_de_bh = new_bh;
3346	get_bh(*new_de_bh);
3347bail:
3348	if (handle)
3349		ocfs2_commit_trans(osb, handle);
3350	if (drop_alloc_sem)
3351		up_write(&OCFS2_I(dir)->ip_alloc_sem);
3352
3353	if (data_ac)
3354		ocfs2_free_alloc_context(data_ac);
3355	if (meta_ac)
3356		ocfs2_free_alloc_context(meta_ac);
3357
3358	brelse(new_bh);
3359
3360	return status;
3361}
3362
3363static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
3364				   const char *name, int namelen,
3365				   struct buffer_head **ret_de_bh,
3366				   unsigned int *blocks_wanted)
3367{
3368	int ret;
3369	struct super_block *sb = dir->i_sb;
3370	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3371	struct ocfs2_dir_entry *de, *last_de = NULL;
3372	char *de_buf, *limit;
3373	unsigned long offset = 0;
3374	unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize;
3375
3376	/*
3377	 * This calculates how many free bytes we'd have in block zero, should
3378	 * this function force expansion to an extent tree.
3379	 */
3380	if (ocfs2_new_dir_wants_trailer(dir))
3381		free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
3382	else
3383		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
3384
3385	de_buf = di->id2.i_data.id_data;
3386	limit = de_buf + i_size_read(dir);
3387	rec_len = OCFS2_DIR_REC_LEN(namelen);
3388
3389	while (de_buf < limit) {
3390		de = (struct ocfs2_dir_entry *)de_buf;
3391
3392		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
3393			ret = -ENOENT;
3394			goto out;
3395		}
3396		if (ocfs2_match(namelen, name, de)) {
3397			ret = -EEXIST;
3398			goto out;
3399		}
3400		/*
3401		 * No need to check for a trailing dirent record here as
3402		 * they're not used for inline dirs.
3403		 */
3404
3405		if (ocfs2_dirent_would_fit(de, rec_len)) {
3406			/* Ok, we found a spot. Return this bh and let
3407			 * the caller actually fill it in. */
3408			*ret_de_bh = di_bh;
3409			get_bh(*ret_de_bh);
3410			ret = 0;
3411			goto out;
3412		}
3413
3414		last_de = de;
3415		de_buf += le16_to_cpu(de->rec_len);
3416		offset += le16_to_cpu(de->rec_len);
3417	}
3418
3419	/*
3420	 * We're going to require expansion of the directory - figure
3421	 * out how many blocks we'll need so that a place for the
3422	 * dirent can be found.
3423	 */
3424	*blocks_wanted = 1;
3425	new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
3426	if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
3427		*blocks_wanted = 2;
3428
3429	ret = -ENOSPC;
3430out:
3431	return ret;
3432}
3433
3434static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
3435				   int namelen, struct buffer_head **ret_de_bh)
3436{
3437	unsigned long offset;
3438	struct buffer_head *bh = NULL;
3439	unsigned short rec_len;
3440	struct ocfs2_dir_entry *de;
3441	struct super_block *sb = dir->i_sb;
3442	int status;
3443	int blocksize = dir->i_sb->s_blocksize;
3444
3445	status = ocfs2_read_dir_block(dir, 0, &bh, 0);
3446	if (status)
3447		goto bail;
3448
3449	rec_len = OCFS2_DIR_REC_LEN(namelen);
3450	offset = 0;
3451	de = (struct ocfs2_dir_entry *) bh->b_data;
3452	while (1) {
3453		if ((char *)de >= sb->s_blocksize + bh->b_data) {
3454			brelse(bh);
3455			bh = NULL;
3456
3457			if (i_size_read(dir) <= offset) {
3458				/*
3459				 * Caller will have to expand this
3460				 * directory.
3461				 */
3462				status = -ENOSPC;
3463				goto bail;
3464			}
3465			status = ocfs2_read_dir_block(dir,
3466					     offset >> sb->s_blocksize_bits,
3467					     &bh, 0);
3468			if (status)
3469				goto bail;
3470
3471			/* move to next block */
3472			de = (struct ocfs2_dir_entry *) bh->b_data;
3473		}
3474		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
3475			status = -ENOENT;
3476			goto bail;
3477		}
3478		if (ocfs2_match(namelen, name, de)) {
3479			status = -EEXIST;
3480			goto bail;
3481		}
3482
3483		if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
3484					   blocksize))
3485			goto next;
3486
3487		if (ocfs2_dirent_would_fit(de, rec_len)) {
3488			/* Ok, we found a spot. Return this bh and let
3489			 * the caller actually fill it in. */
3490			*ret_de_bh = bh;
3491			get_bh(*ret_de_bh);
3492			status = 0;
3493			goto bail;
3494		}
3495next:
3496		offset += le16_to_cpu(de->rec_len);
3497		de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
3498	}
3499
3500bail:
3501	brelse(bh);
3502	if (status)
3503		mlog_errno(status);
3504
3505	return status;
3506}
3507
3508static int dx_leaf_sort_cmp(const void *a, const void *b)
3509{
3510	const struct ocfs2_dx_entry *entry1 = a;
3511	const struct ocfs2_dx_entry *entry2 = b;
3512	u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
3513	u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
3514	u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
3515	u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
3516
3517	if (major_hash1 > major_hash2)
3518		return 1;
3519	if (major_hash1 < major_hash2)
3520		return -1;
3521
3522	/*
3523	 * It is not strictly necessary to sort by minor
3524	 */
3525	if (minor_hash1 > minor_hash2)
3526		return 1;
3527	if (minor_hash1 < minor_hash2)
3528		return -1;
3529	return 0;
3530}
3531
3532static void dx_leaf_sort_swap(void *a, void *b, int size)
3533{
3534	struct ocfs2_dx_entry *entry1 = a;
3535	struct ocfs2_dx_entry *entry2 = b;
3536
3537	BUG_ON(size != sizeof(*entry1));
3538
3539	swap(*entry1, *entry2);
3540}
3541
3542static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
3543{
3544	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3545	int i, num = le16_to_cpu(dl_list->de_num_used);
3546
3547	for (i = 0; i < (num - 1); i++) {
3548		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
3549		    le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
3550			return 0;
3551	}
3552
3553	return 1;
3554}
3555
3556/*
3557 * Find the optimal value to split this leaf on. This expects the leaf
3558 * entries to be in sorted order.
3559 *
3560 * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
3561 * the hash we want to insert.
3562 *
3563 * This function is only concerned with the major hash - that which
3564 * determines which cluster an item belongs to.
3565 */
3566static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
3567					u32 leaf_cpos, u32 insert_hash,
3568					u32 *split_hash)
3569{
3570	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3571	int i, num_used = le16_to_cpu(dl_list->de_num_used);
3572	int allsame;
3573
3574	/*
3575	 * There's a couple rare, but nasty corner cases we have to
3576	 * check for here. All of them involve a leaf where all value
3577	 * have the same hash, which is what we look for first.
3578	 *
3579	 * Most of the time, all of the above is false, and we simply
3580	 * pick the median value for a split.
3581	 */
3582	allsame = ocfs2_dx_leaf_same_major(dx_leaf);
3583	if (allsame) {
3584		u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
3585
3586		if (val == insert_hash) {
3587			/*
3588			 * No matter where we would choose to split,
3589			 * the new entry would want to occupy the same
3590			 * block as these. Since there's no space left
3591			 * in their existing block, we know there
3592			 * won't be space after the split.
3593			 */
3594			return -ENOSPC;
3595		}
3596
3597		if (val == leaf_cpos) {
3598			/*
3599			 * Because val is the same as leaf_cpos (which
3600			 * is the smallest value this leaf can have),
3601			 * yet is not equal to insert_hash, then we
3602			 * know that insert_hash *must* be larger than
3603			 * val (and leaf_cpos). At least cpos+1 in value.
3604			 *
3605			 * We also know then, that there cannot be an
3606			 * adjacent extent (otherwise we'd be looking
3607			 * at it). Choosing this value gives us a
3608			 * chance to get some contiguousness.
3609			 */
3610			*split_hash = leaf_cpos + 1;
3611			return 0;
3612		}
3613
3614		if (val > insert_hash) {
3615			/*
3616			 * val can not be the same as insert hash, and
3617			 * also must be larger than leaf_cpos. Also,
3618			 * we know that there can't be a leaf between
3619			 * cpos and val, otherwise the entries with
3620			 * hash 'val' would be there.
3621			 */
3622			*split_hash = val;
3623			return 0;
3624		}
3625
3626		*split_hash = insert_hash;
3627		return 0;
3628	}
3629
3630	/*
3631	 * Since the records are sorted and the checks above
3632	 * guaranteed that not all records in this block are the same,
3633	 * we simple travel forward, from the median, and pick the 1st
3634	 * record whose value is larger than leaf_cpos.
3635	 */
3636	for (i = (num_used / 2); i < num_used; i++)
3637		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
3638		    leaf_cpos)
3639			break;
3640
3641	BUG_ON(i == num_used); /* Should be impossible */
3642	*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
3643	return 0;
3644}
3645
3646/*
3647 * Transfer all entries in orig_dx_leaves whose major hash is equal to or
3648 * larger than split_hash into new_dx_leaves. We use a temporary
3649 * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
3650 *
3651 * Since the block offset inside a leaf (cluster) is a constant mask
3652 * of minor_hash, we can optimize - an item at block offset X within
3653 * the original cluster, will be at offset X within the new cluster.
3654 */
3655static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3656				       handle_t *handle,
3657				       struct ocfs2_dx_leaf *tmp_dx_leaf,
3658				       struct buffer_head **orig_dx_leaves,
3659				       struct buffer_head **new_dx_leaves,
3660				       int num_dx_leaves)
3661{
3662	int i, j, num_used;
3663	u32 major_hash;
3664	struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
3665	struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list;
3666	struct ocfs2_dx_entry *dx_entry;
3667
3668	tmp_list = &tmp_dx_leaf->dl_list;
3669
3670	for (i = 0; i < num_dx_leaves; i++) {
3671		orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
3672		orig_list = &orig_dx_leaf->dl_list;
3673		new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
3674		new_list = &new_dx_leaf->dl_list;
3675
3676		num_used = le16_to_cpu(orig_list->de_num_used);
3677
3678		memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
3679		tmp_list->de_num_used = cpu_to_le16(0);
3680		memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
3681
3682		for (j = 0; j < num_used; j++) {
3683			dx_entry = &orig_list->de_entries[j];
3684			major_hash = le32_to_cpu(dx_entry->dx_major_hash);
3685			if (major_hash >= split_hash)
3686				ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
3687							      dx_entry);
3688			else
3689				ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
3690							      dx_entry);
3691		}
3692		memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
3693
3694		ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
3695		ocfs2_journal_dirty(handle, new_dx_leaves[i]);
3696	}
3697}
3698
3699static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3700					  struct ocfs2_dx_root_block *dx_root)
3701{
3702	int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3703
3704	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3705	credits += ocfs2_quota_trans_credits(osb->sb);
3706	return credits;
3707}
3708
3709/*
3710 * Find the median value in dx_leaf_bh and allocate a new leaf to move
3711 * half our entries into.
3712 */
3713static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3714				  struct buffer_head *dx_root_bh,
3715				  struct buffer_head *dx_leaf_bh,
3716				  struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
3717				  u64 leaf_blkno)
3718{
3719	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3720	int credits, ret, i, num_used, did_quota = 0;
3721	u32 cpos, split_hash, insert_hash = hinfo->major_hash;
3722	u64 orig_leaves_start;
3723	int num_dx_leaves;
3724	struct buffer_head **orig_dx_leaves = NULL;
3725	struct buffer_head **new_dx_leaves = NULL;
3726	struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
3727	struct ocfs2_extent_tree et;
3728	handle_t *handle = NULL;
3729	struct ocfs2_dx_root_block *dx_root;
3730	struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3731
3732	trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3733				     (unsigned long long)leaf_blkno,
3734				     insert_hash);
3735
3736	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3737
3738	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3739	/*
3740	 * XXX: This is a rather large limit. We should use a more
3741	 * realistic value.
3742	 */
3743	if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
3744		return -ENOSPC;
3745
3746	num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
3747	if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
3748		mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
3749		     "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
3750		     (unsigned long long)leaf_blkno, num_used);
3751		ret = -EIO;
3752		goto out;
3753	}
3754
3755	orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
3756	if (!orig_dx_leaves) {
3757		ret = -ENOMEM;
3758		mlog_errno(ret);
3759		goto out;
3760	}
3761
3762	new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
3763	if (!new_dx_leaves) {
3764		ret = -ENOMEM;
3765		mlog_errno(ret);
3766		goto out;
3767	}
3768
3769	ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
3770	if (ret) {
3771		if (ret != -ENOSPC)
3772			mlog_errno(ret);
3773		goto out;
3774	}
3775
3776	credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
3777	handle = ocfs2_start_trans(osb, credits);
3778	if (IS_ERR(handle)) {
3779		ret = PTR_ERR(handle);
3780		handle = NULL;
3781		mlog_errno(ret);
3782		goto out;
3783	}
3784
3785	ret = dquot_alloc_space_nodirty(dir,
3786				       ocfs2_clusters_to_bytes(dir->i_sb, 1));
3787	if (ret)
3788		goto out_commit;
3789	did_quota = 1;
3790
3791	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
3792				      OCFS2_JOURNAL_ACCESS_WRITE);
3793	if (ret) {
3794		mlog_errno(ret);
3795		goto out_commit;
3796	}
3797
3798	/*
3799	 * This block is changing anyway, so we can sort it in place.
3800	 */
3801	sort(dx_leaf->dl_list.de_entries, num_used,
3802	     sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
3803	     dx_leaf_sort_swap);
3804
3805	ocfs2_journal_dirty(handle, dx_leaf_bh);
3806
3807	ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
3808					   &split_hash);
3809	if (ret) {
3810		mlog_errno(ret);
3811		goto  out_commit;
3812	}
3813
3814	trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3815
3816	/*
3817	 * We have to carefully order operations here. There are items
3818	 * which want to be in the new cluster before insert, but in
3819	 * order to put those items in the new cluster, we alter the
3820	 * old cluster. A failure to insert gets nasty.
3821	 *
3822	 * So, start by reserving writes to the old
3823	 * cluster. ocfs2_dx_dir_new_cluster will reserve writes on
3824	 * the new cluster for us, before inserting it. The insert
3825	 * won't happen if there's an error before that. Once the
3826	 * insert is done then, we can transfer from one leaf into the
3827	 * other without fear of hitting any error.
3828	 */
3829
3830	/*
3831	 * The leaf transfer wants some scratch space so that we don't
3832	 * wind up doing a bunch of expensive memmove().
3833	 */
3834	tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
3835	if (!tmp_dx_leaf) {
3836		ret = -ENOMEM;
3837		mlog_errno(ret);
3838		goto out_commit;
3839	}
3840
3841	orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
3842	ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
3843				   orig_dx_leaves);
3844	if (ret) {
3845		mlog_errno(ret);
3846		goto out_commit;
3847	}
3848
3849	cpos = split_hash;
3850	ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3851				       data_ac, meta_ac, new_dx_leaves,
3852				       num_dx_leaves);
3853	if (ret) {
3854		mlog_errno(ret);
3855		goto out_commit;
3856	}
3857
3858	for (i = 0; i < num_dx_leaves; i++) {
3859		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3860					      orig_dx_leaves[i],
3861					      OCFS2_JOURNAL_ACCESS_WRITE);
3862		if (ret) {
3863			mlog_errno(ret);
3864			goto out_commit;
3865		}
3866
3867		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3868					      new_dx_leaves[i],
3869					      OCFS2_JOURNAL_ACCESS_WRITE);
3870		if (ret) {
3871			mlog_errno(ret);
3872			goto out_commit;
3873		}
3874	}
3875
3876	ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
3877				   orig_dx_leaves, new_dx_leaves, num_dx_leaves);
3878
3879out_commit:
3880	if (ret < 0 && did_quota)
3881		dquot_free_space_nodirty(dir,
3882				ocfs2_clusters_to_bytes(dir->i_sb, 1));
3883
3884	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3885	ocfs2_commit_trans(osb, handle);
3886
3887out:
3888	if (orig_dx_leaves || new_dx_leaves) {
3889		for (i = 0; i < num_dx_leaves; i++) {
3890			if (orig_dx_leaves)
3891				brelse(orig_dx_leaves[i]);
3892			if (new_dx_leaves)
3893				brelse(new_dx_leaves[i]);
3894		}
3895		kfree(orig_dx_leaves);
3896		kfree(new_dx_leaves);
3897	}
3898
3899	if (meta_ac)
3900		ocfs2_free_alloc_context(meta_ac);
3901	if (data_ac)
3902		ocfs2_free_alloc_context(data_ac);
3903
3904	kfree(tmp_dx_leaf);
3905	return ret;
3906}
3907
3908static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
3909				   struct buffer_head *di_bh,
3910				   struct buffer_head *dx_root_bh,
3911				   const char *name, int namelen,
3912				   struct ocfs2_dir_lookup_result *lookup)
3913{
3914	int ret, rebalanced = 0;
3915	struct ocfs2_dx_root_block *dx_root;
3916	struct buffer_head *dx_leaf_bh = NULL;
3917	struct ocfs2_dx_leaf *dx_leaf;
3918	u64 blkno;
3919	u32 leaf_cpos;
3920
3921	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3922
3923restart_search:
3924	ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
3925				  &leaf_cpos, &blkno);
3926	if (ret) {
3927		mlog_errno(ret);
3928		goto out;
3929	}
3930
3931	ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
3932	if (ret) {
3933		mlog_errno(ret);
3934		goto out;
3935	}
3936
3937	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3938
3939	if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
3940	    le16_to_cpu(dx_leaf->dl_list.de_count)) {
3941		if (rebalanced) {
3942			/*
3943			 * Rebalancing should have provided us with
3944			 * space in an appropriate leaf.
3945			 *
3946			 * XXX: Is this an abnormal condition then?
3947			 * Should we print a message here?
3948			 */
3949			ret = -ENOSPC;
3950			goto out;
3951		}
3952
3953		ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
3954					     &lookup->dl_hinfo, leaf_cpos,
3955					     blkno);
3956		if (ret) {
3957			if (ret != -ENOSPC)
3958				mlog_errno(ret);
3959			goto out;
3960		}
3961
3962		/*
3963		 * Restart the lookup. The rebalance might have
3964		 * changed which block our item fits into. Mark our
3965		 * progress, so we only execute this once.
3966		 */
3967		brelse(dx_leaf_bh);
3968		dx_leaf_bh = NULL;
3969		rebalanced = 1;
3970		goto restart_search;
3971	}
3972
3973	lookup->dl_dx_leaf_bh = dx_leaf_bh;
3974	dx_leaf_bh = NULL;
3975
3976out:
3977	brelse(dx_leaf_bh);
3978	return ret;
3979}
3980
3981static int ocfs2_search_dx_free_list(struct inode *dir,
3982				     struct buffer_head *dx_root_bh,
3983				     int namelen,
3984				     struct ocfs2_dir_lookup_result *lookup)
3985{
3986	int ret = -ENOSPC;
3987	struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
3988	struct ocfs2_dir_block_trailer *db;
3989	u64 next_block;
3990	int rec_len = OCFS2_DIR_REC_LEN(namelen);
3991	struct ocfs2_dx_root_block *dx_root;
3992
3993	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3994	next_block = le64_to_cpu(dx_root->dr_free_blk);
3995
3996	while (next_block) {
3997		brelse(prev_leaf_bh);
3998		prev_leaf_bh = leaf_bh;
3999		leaf_bh = NULL;
4000
4001		ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
4002		if (ret) {
4003			mlog_errno(ret);
4004			goto out;
4005		}
4006
4007		db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
4008		if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
4009			lookup->dl_leaf_bh = leaf_bh;
4010			lookup->dl_prev_leaf_bh = prev_leaf_bh;
4011			leaf_bh = NULL;
4012			prev_leaf_bh = NULL;
4013			break;
4014		}
4015
4016		next_block = le64_to_cpu(db->db_free_next);
4017	}
4018
4019	if (!next_block)
4020		ret = -ENOSPC;
4021
4022out:
4023
4024	brelse(leaf_bh);
4025	brelse(prev_leaf_bh);
4026	return ret;
4027}
4028
4029static int ocfs2_expand_inline_dx_root(struct inode *dir,
4030				       struct buffer_head *dx_root_bh)
4031{
4032	int ret, num_dx_leaves, i, j, did_quota = 0;
4033	struct buffer_head **dx_leaves = NULL;
4034	struct ocfs2_extent_tree et;
4035	u64 insert_blkno;
4036	struct ocfs2_alloc_context *data_ac = NULL;
4037	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4038	handle_t *handle = NULL;
4039	struct ocfs2_dx_root_block *dx_root;
4040	struct ocfs2_dx_entry_list *entry_list;
4041	struct ocfs2_dx_entry *dx_entry;
4042	struct ocfs2_dx_leaf *target_leaf;
4043
4044	ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4045	if (ret) {
4046		mlog_errno(ret);
4047		goto out;
4048	}
4049
4050	dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
4051	if (!dx_leaves) {
4052		ret = -ENOMEM;
4053		mlog_errno(ret);
4054		goto out;
4055	}
4056
4057	handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
4058	if (IS_ERR(handle)) {
4059		ret = PTR_ERR(handle);
4060		mlog_errno(ret);
4061		goto out;
4062	}
4063
4064	ret = dquot_alloc_space_nodirty(dir,
4065				       ocfs2_clusters_to_bytes(osb->sb, 1));
4066	if (ret)
4067		goto out_commit;
4068	did_quota = 1;
4069
4070	/*
4071	 * We do this up front, before the allocation, so that a
4072	 * failure to add the dx_root_bh to the journal won't result
4073	 * us losing clusters.
4074	 */
4075	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
4076				      OCFS2_JOURNAL_ACCESS_WRITE);
4077	if (ret) {
4078		mlog_errno(ret);
4079		goto out_commit;
4080	}
4081
4082	ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
4083					 num_dx_leaves, &insert_blkno);
4084	if (ret) {
4085		mlog_errno(ret);
4086		goto out_commit;
4087	}
4088
4089	/*
4090	 * Transfer the entries from our dx_root into the appropriate
4091	 * block
4092	 */
4093	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4094	entry_list = &dx_root->dr_entries;
4095
4096	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
4097		dx_entry = &entry_list->de_entries[i];
4098
4099		j = __ocfs2_dx_dir_hash_idx(osb,
4100					    le32_to_cpu(dx_entry->dx_minor_hash));
4101		target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
4102
4103		ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
4104
4105		/* Each leaf has been passed to the journal already
4106		 * via __ocfs2_dx_dir_new_cluster() */
4107	}
4108
4109	dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
4110	memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
4111	       offsetof(struct ocfs2_dx_root_block, dr_list));
4112	dx_root->dr_list.l_count =
4113		cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
4114
4115	/* This should never fail considering we start with an empty
4116	 * dx_root. */
4117	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4118	ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
4119	if (ret)
4120		mlog_errno(ret);
4121	did_quota = 0;
4122
4123	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4124	ocfs2_journal_dirty(handle, dx_root_bh);
4125
4126out_commit:
4127	if (ret < 0 && did_quota)
4128		dquot_free_space_nodirty(dir,
4129					  ocfs2_clusters_to_bytes(dir->i_sb, 1));
4130
4131	ocfs2_commit_trans(osb, handle);
4132
4133out:
4134	if (data_ac)
4135		ocfs2_free_alloc_context(data_ac);
4136
4137	if (dx_leaves) {
4138		for (i = 0; i < num_dx_leaves; i++)
4139			brelse(dx_leaves[i]);
4140		kfree(dx_leaves);
4141	}
4142	return ret;
4143}
4144
4145static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
4146{
4147	struct ocfs2_dx_root_block *dx_root;
4148	struct ocfs2_dx_entry_list *entry_list;
4149
4150	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4151	entry_list = &dx_root->dr_entries;
4152
4153	if (le16_to_cpu(entry_list->de_num_used) >=
4154	    le16_to_cpu(entry_list->de_count))
4155		return -ENOSPC;
4156
4157	return 0;
4158}
4159
4160static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
4161					   struct buffer_head *di_bh,
4162					   const char *name,
4163					   int namelen,
4164					   struct ocfs2_dir_lookup_result *lookup)
4165{
4166	int ret, free_dx_root = 1;
4167	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4168	struct buffer_head *dx_root_bh = NULL;
4169	struct buffer_head *leaf_bh = NULL;
4170	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4171	struct ocfs2_dx_root_block *dx_root;
4172
4173	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4174	if (ret) {
4175		mlog_errno(ret);
4176		goto out;
4177	}
4178
4179	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4180	if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
4181		ret = -ENOSPC;
4182		mlog_errno(ret);
4183		goto out;
4184	}
4185
4186	if (ocfs2_dx_root_inline(dx_root)) {
4187		ret = ocfs2_inline_dx_has_space(dx_root_bh);
4188
4189		if (ret == 0)
4190			goto search_el;
4191
4192		/*
4193		 * We ran out of room in the root block. Expand it to
4194		 * an extent, then allow ocfs2_find_dir_space_dx to do
4195		 * the rest.
4196		 */
4197		ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
4198		if (ret) {
4199			mlog_errno(ret);
4200			goto out;
4201		}
4202	}
4203
4204	/*
4205	 * Insert preparation for an indexed directory is split into two
4206	 * steps. The call to find_dir_space_dx reserves room in the index for
4207	 * an additional item. If we run out of space there, it's a real error
4208	 * we can't continue on.
4209	 */
4210	ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
4211				      namelen, lookup);
4212	if (ret) {
4213		mlog_errno(ret);
4214		goto out;
4215	}
4216
4217search_el:
4218	/*
4219	 * Next, we need to find space in the unindexed tree. This call
4220	 * searches using the free space linked list. If the unindexed tree
4221	 * lacks sufficient space, we'll expand it below. The expansion code
4222	 * is smart enough to add any new blocks to the free space list.
4223	 */
4224	ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
4225	if (ret && ret != -ENOSPC) {
4226		mlog_errno(ret);
4227		goto out;
4228	}
4229
4230	/* Do this up here - ocfs2_extend_dir might need the dx_root */
4231	lookup->dl_dx_root_bh = dx_root_bh;
4232	free_dx_root = 0;
4233
4234	if (ret == -ENOSPC) {
4235		ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
4236
4237		if (ret) {
4238			mlog_errno(ret);
4239			goto out;
4240		}
4241
4242		/*
4243		 * We make the assumption here that new leaf blocks are added
4244		 * to the front of our free list.
4245		 */
4246		lookup->dl_prev_leaf_bh = NULL;
4247		lookup->dl_leaf_bh = leaf_bh;
4248	}
4249
4250out:
4251	if (free_dx_root)
4252		brelse(dx_root_bh);
4253	return ret;
4254}
4255
4256/*
4257 * Get a directory ready for insert. Any directory allocation required
4258 * happens here. Success returns zero, and enough context in the dir
4259 * lookup result that ocfs2_add_entry() will be able complete the task
4260 * with minimal performance impact.
4261 */
4262int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4263				 struct inode *dir,
4264				 struct buffer_head *parent_fe_bh,
4265				 const char *name,
4266				 int namelen,
4267				 struct ocfs2_dir_lookup_result *lookup)
4268{
4269	int ret;
4270	unsigned int blocks_wanted = 1;
4271	struct buffer_head *bh = NULL;
4272
4273	trace_ocfs2_prepare_dir_for_insert(
4274		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4275
4276	if (!namelen) {
4277		ret = -EINVAL;
4278		mlog_errno(ret);
4279		goto out;
4280	}
4281
4282	/*
4283	 * Do this up front to reduce confusion.
4284	 *
4285	 * The directory might start inline, then be turned into an
4286	 * indexed one, in which case we'd need to hash deep inside
4287	 * ocfs2_find_dir_space_id(). Since
4288	 * ocfs2_prepare_dx_dir_for_insert() also needs this hash
4289	 * done, there seems no point in spreading out the calls. We
4290	 * can optimize away the case where the file system doesn't
4291	 * support indexing.
4292	 */
4293	if (ocfs2_supports_indexed_dirs(osb))
4294		ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
4295
4296	if (ocfs2_dir_indexed(dir)) {
4297		ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
4298						      name, namelen, lookup);
4299		if (ret)
4300			mlog_errno(ret);
4301		goto out;
4302	}
4303
4304	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4305		ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
4306					      namelen, &bh, &blocks_wanted);
4307	} else
4308		ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
4309
4310	if (ret && ret != -ENOSPC) {
4311		mlog_errno(ret);
4312		goto out;
4313	}
4314
4315	if (ret == -ENOSPC) {
4316		/*
4317		 * We have to expand the directory to add this name.
4318		 */
4319		BUG_ON(bh);
4320
4321		ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
4322				       lookup, &bh);
4323		if (ret) {
4324			if (ret != -ENOSPC)
4325				mlog_errno(ret);
4326			goto out;
4327		}
4328
4329		BUG_ON(!bh);
4330	}
4331
4332	lookup->dl_leaf_bh = bh;
4333	bh = NULL;
4334out:
4335	brelse(bh);
4336	return ret;
4337}
4338
4339static int ocfs2_dx_dir_remove_index(struct inode *dir,
4340				     struct buffer_head *di_bh,
4341				     struct buffer_head *dx_root_bh)
4342{
4343	int ret;
4344	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4345	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4346	struct ocfs2_dx_root_block *dx_root;
4347	struct inode *dx_alloc_inode = NULL;
4348	struct buffer_head *dx_alloc_bh = NULL;
4349	handle_t *handle;
4350	u64 blk;
4351	u16 bit;
4352	u64 bg_blkno;
4353
4354	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4355
4356	dx_alloc_inode = ocfs2_get_system_file_inode(osb,
4357					EXTENT_ALLOC_SYSTEM_INODE,
4358					le16_to_cpu(dx_root->dr_suballoc_slot));
4359	if (!dx_alloc_inode) {
4360		ret = -ENOMEM;
4361		mlog_errno(ret);
4362		goto out;
4363	}
4364	inode_lock(dx_alloc_inode);
4365
4366	ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
4367	if (ret) {
4368		mlog_errno(ret);
4369		goto out_mutex;
4370	}
4371
4372	handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
4373	if (IS_ERR(handle)) {
4374		ret = PTR_ERR(handle);
4375		mlog_errno(ret);
4376		goto out_unlock;
4377	}
4378
4379	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
4380				      OCFS2_JOURNAL_ACCESS_WRITE);
4381	if (ret) {
4382		mlog_errno(ret);
4383		goto out_commit;
4384	}
4385
4386	spin_lock(&OCFS2_I(dir)->ip_lock);
4387	OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4388	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4389	spin_unlock(&OCFS2_I(dir)->ip_lock);
4390	di->i_dx_root = cpu_to_le64(0ULL);
4391	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4392
4393	ocfs2_journal_dirty(handle, di_bh);
4394
4395	blk = le64_to_cpu(dx_root->dr_blkno);
4396	bit = le16_to_cpu(dx_root->dr_suballoc_bit);
4397	if (dx_root->dr_suballoc_loc)
4398		bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
4399	else
4400		bg_blkno = ocfs2_which_suballoc_group(blk, bit);
4401	ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
4402				       bit, bg_blkno, 1);
4403	if (ret)
4404		mlog_errno(ret);
4405
4406out_commit:
4407	ocfs2_commit_trans(osb, handle);
4408
4409out_unlock:
4410	ocfs2_inode_unlock(dx_alloc_inode, 1);
4411
4412out_mutex:
4413	inode_unlock(dx_alloc_inode);
4414	brelse(dx_alloc_bh);
4415out:
4416	iput(dx_alloc_inode);
4417	return ret;
4418}
4419
4420int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
4421{
4422	int ret;
4423	unsigned int uninitialized_var(clen);
4424	u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos);
4425	u64 uninitialized_var(blkno);
4426	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4427	struct buffer_head *dx_root_bh = NULL;
4428	struct ocfs2_dx_root_block *dx_root;
4429	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4430	struct ocfs2_cached_dealloc_ctxt dealloc;
4431	struct ocfs2_extent_tree et;
4432
4433	ocfs2_init_dealloc_ctxt(&dealloc);
4434
4435	if (!ocfs2_dir_indexed(dir))
4436		return 0;
4437
4438	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4439	if (ret) {
4440		mlog_errno(ret);
4441		goto out;
4442	}
4443	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4444
4445	if (ocfs2_dx_root_inline(dx_root))
4446		goto remove_index;
4447
4448	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4449
4450	/* XXX: What if dr_clusters is too large? */
4451	while (le32_to_cpu(dx_root->dr_clusters)) {
4452		ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
4453					      major_hash, &cpos, &blkno, &clen);
4454		if (ret) {
4455			mlog_errno(ret);
4456			goto out;
4457		}
4458
4459		p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
4460
4461		ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
4462					       &dealloc, 0, false);
4463		if (ret) {
4464			mlog_errno(ret);
4465			goto out;
4466		}
4467
4468		if (cpos == 0)
4469			break;
4470
4471		major_hash = cpos - 1;
4472	}
4473
4474remove_index:
4475	ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
4476	if (ret) {
4477		mlog_errno(ret);
4478		goto out;
4479	}
4480
4481	ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
4482out:
4483	ocfs2_schedule_truncate_log_flush(osb, 1);
4484	ocfs2_run_deallocs(osb, &dealloc);
4485
4486	brelse(dx_root_bh);
4487	return ret;
4488}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
 
   3 * dir.c
   4 *
   5 * Creates, reads, walks and deletes directory-nodes
   6 *
   7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   8 *
   9 *  Portions of this code from linux/fs/ext3/dir.c
  10 *
  11 *  Copyright (C) 1992, 1993, 1994, 1995
  12 *  Remy Card (card@masi.ibp.fr)
  13 *  Laboratoire MASI - Institut Blaise pascal
  14 *  Universite Pierre et Marie Curie (Paris VI)
  15 *
  16 *   from
  17 *
  18 *   linux/fs/minix/dir.c
  19 *
  20 *   Copyright (C) 1991, 1992 Linus Torvalds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/types.h>
  25#include <linux/slab.h>
  26#include <linux/highmem.h>
  27#include <linux/quotaops.h>
  28#include <linux/sort.h>
  29#include <linux/iversion.h>
  30
  31#include <cluster/masklog.h>
  32
  33#include "ocfs2.h"
  34
  35#include "alloc.h"
  36#include "blockcheck.h"
  37#include "dir.h"
  38#include "dlmglue.h"
  39#include "extent_map.h"
  40#include "file.h"
  41#include "inode.h"
  42#include "journal.h"
  43#include "namei.h"
  44#include "suballoc.h"
  45#include "super.h"
  46#include "sysfile.h"
  47#include "uptodate.h"
  48#include "ocfs2_trace.h"
  49
  50#include "buffer_head_io.h"
  51
  52#define NAMEI_RA_CHUNKS  2
  53#define NAMEI_RA_BLOCKS  4
  54#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  55
 
 
 
 
  56static int ocfs2_do_extend_dir(struct super_block *sb,
  57			       handle_t *handle,
  58			       struct inode *dir,
  59			       struct buffer_head *parent_fe_bh,
  60			       struct ocfs2_alloc_context *data_ac,
  61			       struct ocfs2_alloc_context *meta_ac,
  62			       struct buffer_head **new_bh);
  63static int ocfs2_dir_indexed(struct inode *inode);
  64
  65/*
  66 * These are distinct checks because future versions of the file system will
  67 * want to have a trailing dirent structure independent of indexing.
  68 */
  69static int ocfs2_supports_dir_trailer(struct inode *dir)
  70{
  71	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  72
  73	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  74		return 0;
  75
  76	return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
  77}
  78
  79/*
  80 * "new' here refers to the point at which we're creating a new
  81 * directory via "mkdir()", but also when we're expanding an inline
  82 * directory. In either case, we don't yet have the indexing bit set
  83 * on the directory, so the standard checks will fail in when metaecc
  84 * is turned off. Only directory-initialization type functions should
  85 * use this then. Everything else wants ocfs2_supports_dir_trailer()
  86 */
  87static int ocfs2_new_dir_wants_trailer(struct inode *dir)
  88{
  89	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  90
  91	return ocfs2_meta_ecc(osb) ||
  92		ocfs2_supports_indexed_dirs(osb);
  93}
  94
  95static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
  96{
  97	return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
  98}
  99
 100#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
 101
 102/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
 103 * them more consistent? */
 104struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
 105							    void *data)
 106{
 107	char *p = data;
 108
 109	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
 110	return (struct ocfs2_dir_block_trailer *)p;
 111}
 112
 113/*
 114 * XXX: This is executed once on every dirent. We should consider optimizing
 115 * it.
 116 */
 117static int ocfs2_skip_dir_trailer(struct inode *dir,
 118				  struct ocfs2_dir_entry *de,
 119				  unsigned long offset,
 120				  unsigned long blklen)
 121{
 122	unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
 123
 124	if (!ocfs2_supports_dir_trailer(dir))
 125		return 0;
 126
 127	if (offset != toff)
 128		return 0;
 129
 130	return 1;
 131}
 132
 133static void ocfs2_init_dir_trailer(struct inode *inode,
 134				   struct buffer_head *bh, u16 rec_len)
 135{
 136	struct ocfs2_dir_block_trailer *trailer;
 137
 138	trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
 139	strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
 140	trailer->db_compat_rec_len =
 141			cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
 142	trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
 143	trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
 144	trailer->db_free_rec_len = cpu_to_le16(rec_len);
 145}
 146/*
 147 * Link an unindexed block with a dir trailer structure into the index free
 148 * list. This function will modify dirdata_bh, but assumes you've already
 149 * passed it to the journal.
 150 */
 151static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
 152				     struct buffer_head *dx_root_bh,
 153				     struct buffer_head *dirdata_bh)
 154{
 155	int ret;
 156	struct ocfs2_dx_root_block *dx_root;
 157	struct ocfs2_dir_block_trailer *trailer;
 158
 159	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
 160				      OCFS2_JOURNAL_ACCESS_WRITE);
 161	if (ret) {
 162		mlog_errno(ret);
 163		goto out;
 164	}
 165	trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
 166	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
 167
 168	trailer->db_free_next = dx_root->dr_free_blk;
 169	dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
 170
 171	ocfs2_journal_dirty(handle, dx_root_bh);
 172
 173out:
 174	return ret;
 175}
 176
 177static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
 178{
 179	return res->dl_prev_leaf_bh == NULL;
 180}
 181
 182void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
 183{
 184	brelse(res->dl_dx_root_bh);
 185	brelse(res->dl_leaf_bh);
 186	brelse(res->dl_dx_leaf_bh);
 187	brelse(res->dl_prev_leaf_bh);
 188}
 189
 190static int ocfs2_dir_indexed(struct inode *inode)
 191{
 192	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
 193		return 1;
 194	return 0;
 195}
 196
 197static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
 198{
 199	return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
 200}
 201
 202/*
 203 * Hashing code adapted from ext3
 204 */
 205#define DELTA 0x9E3779B9
 206
 207static void TEA_transform(__u32 buf[4], __u32 const in[])
 208{
 209	__u32	sum = 0;
 210	__u32	b0 = buf[0], b1 = buf[1];
 211	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
 212	int	n = 16;
 213
 214	do {
 215		sum += DELTA;
 216		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
 217		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
 218	} while (--n);
 219
 220	buf[0] += b0;
 221	buf[1] += b1;
 222}
 223
 224static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
 225{
 226	__u32	pad, val;
 227	int	i;
 228
 229	pad = (__u32)len | ((__u32)len << 8);
 230	pad |= pad << 16;
 231
 232	val = pad;
 233	if (len > num*4)
 234		len = num * 4;
 235	for (i = 0; i < len; i++) {
 236		if ((i % 4) == 0)
 237			val = pad;
 238		val = msg[i] + (val << 8);
 239		if ((i % 4) == 3) {
 240			*buf++ = val;
 241			val = pad;
 242			num--;
 243		}
 244	}
 245	if (--num >= 0)
 246		*buf++ = val;
 247	while (--num >= 0)
 248		*buf++ = pad;
 249}
 250
 251static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
 252				   struct ocfs2_dx_hinfo *hinfo)
 253{
 254	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 255	const char	*p;
 256	__u32		in[8], buf[4];
 257
 258	/*
 259	 * XXX: Is this really necessary, if the index is never looked
 260	 * at by readdir? Is a hash value of '0' a bad idea?
 261	 */
 262	if ((len == 1 && !strncmp(".", name, 1)) ||
 263	    (len == 2 && !strncmp("..", name, 2))) {
 264		buf[0] = buf[1] = 0;
 265		goto out;
 266	}
 267
 268#ifdef OCFS2_DEBUG_DX_DIRS
 269	/*
 270	 * This makes it very easy to debug indexing problems. We
 271	 * should never allow this to be selected without hand editing
 272	 * this file though.
 273	 */
 274	buf[0] = buf[1] = len;
 275	goto out;
 276#endif
 277
 278	memcpy(buf, osb->osb_dx_seed, sizeof(buf));
 279
 280	p = name;
 281	while (len > 0) {
 282		str2hashbuf(p, len, in, 4);
 283		TEA_transform(buf, in);
 284		len -= 16;
 285		p += 16;
 286	}
 287
 288out:
 289	hinfo->major_hash = buf[0];
 290	hinfo->minor_hash = buf[1];
 291}
 292
 293/*
 294 * bh passed here can be an inode block or a dir data block, depending
 295 * on the inode inline data flag.
 296 */
 297static int ocfs2_check_dir_entry(struct inode * dir,
 298				 struct ocfs2_dir_entry * de,
 299				 struct buffer_head * bh,
 300				 unsigned long offset)
 301{
 302	const char *error_msg = NULL;
 303	const int rlen = le16_to_cpu(de->rec_len);
 304
 305	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
 306		error_msg = "rec_len is smaller than minimal";
 307	else if (unlikely(rlen % 4 != 0))
 308		error_msg = "rec_len % 4 != 0";
 309	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
 310		error_msg = "rec_len is too small for name_len";
 311	else if (unlikely(
 312		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
 313		error_msg = "directory entry across blocks";
 314
 315	if (unlikely(error_msg != NULL))
 316		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
 317		     "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
 318		     (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
 319		     offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
 320		     de->name_len);
 321
 322	return error_msg == NULL ? 1 : 0;
 323}
 324
 325static inline int ocfs2_match(int len,
 326			      const char * const name,
 327			      struct ocfs2_dir_entry *de)
 328{
 329	if (len != de->name_len)
 330		return 0;
 331	if (!de->inode)
 332		return 0;
 333	return !memcmp(name, de->name, len);
 334}
 335
 336/*
 337 * Returns 0 if not found, -1 on failure, and 1 on success
 338 */
 339static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 340					struct inode *dir,
 341					const char *name, int namelen,
 342					unsigned long offset,
 343					char *first_de,
 344					unsigned int bytes,
 345					struct ocfs2_dir_entry **res_dir)
 346{
 347	struct ocfs2_dir_entry *de;
 348	char *dlimit, *de_buf;
 349	int de_len;
 350	int ret = 0;
 351
 352	de_buf = first_de;
 353	dlimit = de_buf + bytes;
 354
 355	while (de_buf < dlimit) {
 356		/* this code is executed quadratically often */
 357		/* do minimal checking `by hand' */
 358
 359		de = (struct ocfs2_dir_entry *) de_buf;
 360
 361		if (de_buf + namelen <= dlimit &&
 362		    ocfs2_match(namelen, name, de)) {
 363			/* found a match - just to be sure, do a full check */
 364			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
 365				ret = -1;
 366				goto bail;
 367			}
 368			*res_dir = de;
 369			ret = 1;
 370			goto bail;
 371		}
 372
 373		/* prevent looping on a bad block */
 374		de_len = le16_to_cpu(de->rec_len);
 375		if (de_len <= 0) {
 376			ret = -1;
 377			goto bail;
 378		}
 379
 380		de_buf += de_len;
 381		offset += de_len;
 382	}
 383
 384bail:
 385	trace_ocfs2_search_dirblock(ret);
 386	return ret;
 387}
 388
 389static struct buffer_head *ocfs2_find_entry_id(const char *name,
 390					       int namelen,
 391					       struct inode *dir,
 392					       struct ocfs2_dir_entry **res_dir)
 393{
 394	int ret, found;
 395	struct buffer_head *di_bh = NULL;
 396	struct ocfs2_dinode *di;
 397	struct ocfs2_inline_data *data;
 398
 399	ret = ocfs2_read_inode_block(dir, &di_bh);
 400	if (ret) {
 401		mlog_errno(ret);
 402		goto out;
 403	}
 404
 405	di = (struct ocfs2_dinode *)di_bh->b_data;
 406	data = &di->id2.i_data;
 407
 408	found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
 409				      data->id_data, i_size_read(dir), res_dir);
 410	if (found == 1)
 411		return di_bh;
 412
 413	brelse(di_bh);
 414out:
 415	return NULL;
 416}
 417
 418static int ocfs2_validate_dir_block(struct super_block *sb,
 419				    struct buffer_head *bh)
 420{
 421	int rc;
 422	struct ocfs2_dir_block_trailer *trailer =
 423		ocfs2_trailer_from_bh(bh, sb);
 424
 425
 426	/*
 427	 * We don't validate dirents here, that's handled
 428	 * in-place when the code walks them.
 429	 */
 430	trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
 431
 432	BUG_ON(!buffer_uptodate(bh));
 433
 434	/*
 435	 * If the ecc fails, we return the error but otherwise
 436	 * leave the filesystem running.  We know any error is
 437	 * local to this block.
 438	 *
 439	 * Note that we are safe to call this even if the directory
 440	 * doesn't have a trailer.  Filesystems without metaecc will do
 441	 * nothing, and filesystems with it will have one.
 442	 */
 443	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
 444	if (rc)
 445		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
 446		     (unsigned long long)bh->b_blocknr);
 447
 448	return rc;
 449}
 450
 451/*
 452 * Validate a directory trailer.
 453 *
 454 * We check the trailer here rather than in ocfs2_validate_dir_block()
 455 * because that function doesn't have the inode to test.
 456 */
 457static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
 458{
 459	int rc = 0;
 460	struct ocfs2_dir_block_trailer *trailer;
 461
 462	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 463	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
 464		rc = ocfs2_error(dir->i_sb,
 465				 "Invalid dirblock #%llu: signature = %.*s\n",
 466				 (unsigned long long)bh->b_blocknr, 7,
 467				 trailer->db_signature);
 468		goto out;
 469	}
 470	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
 471		rc = ocfs2_error(dir->i_sb,
 472				 "Directory block #%llu has an invalid db_blkno of %llu\n",
 473				 (unsigned long long)bh->b_blocknr,
 474				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 475		goto out;
 476	}
 477	if (le64_to_cpu(trailer->db_parent_dinode) !=
 478	    OCFS2_I(dir)->ip_blkno) {
 479		rc = ocfs2_error(dir->i_sb,
 480				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
 481				 (unsigned long long)bh->b_blocknr,
 482				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
 483				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 484		goto out;
 485	}
 486out:
 487	return rc;
 488}
 489
 490/*
 491 * This function forces all errors to -EIO for consistency with its
 492 * predecessor, ocfs2_bread().  We haven't audited what returning the
 493 * real error codes would do to callers.  We log the real codes with
 494 * mlog_errno() before we squash them.
 495 */
 496static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
 497				struct buffer_head **bh, int flags)
 498{
 499	int rc = 0;
 500	struct buffer_head *tmp = *bh;
 501
 502	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
 503				    ocfs2_validate_dir_block);
 504	if (rc) {
 505		mlog_errno(rc);
 506		goto out;
 507	}
 508
 509	if (!(flags & OCFS2_BH_READAHEAD) &&
 510	    ocfs2_supports_dir_trailer(inode)) {
 511		rc = ocfs2_check_dir_trailer(inode, tmp);
 512		if (rc) {
 513			if (!*bh)
 514				brelse(tmp);
 515			mlog_errno(rc);
 516			goto out;
 517		}
 518	}
 519
 520	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
 521	if (!*bh)
 522		*bh = tmp;
 523
 524out:
 525	return rc ? -EIO : 0;
 526}
 527
 528/*
 529 * Read the block at 'phys' which belongs to this directory
 530 * inode. This function does no virtual->physical block translation -
 531 * what's passed in is assumed to be a valid directory block.
 532 */
 533static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
 534				       struct buffer_head **bh)
 535{
 536	int ret;
 537	struct buffer_head *tmp = *bh;
 538
 539	ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
 540			       ocfs2_validate_dir_block);
 541	if (ret) {
 542		mlog_errno(ret);
 543		goto out;
 544	}
 545
 546	if (ocfs2_supports_dir_trailer(dir)) {
 547		ret = ocfs2_check_dir_trailer(dir, tmp);
 548		if (ret) {
 549			if (!*bh)
 550				brelse(tmp);
 551			mlog_errno(ret);
 552			goto out;
 553		}
 554	}
 555
 556	if (!ret && !*bh)
 557		*bh = tmp;
 558out:
 559	return ret;
 560}
 561
 562static int ocfs2_validate_dx_root(struct super_block *sb,
 563				  struct buffer_head *bh)
 564{
 565	int ret;
 566	struct ocfs2_dx_root_block *dx_root;
 567
 568	BUG_ON(!buffer_uptodate(bh));
 569
 570	dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
 571
 572	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
 573	if (ret) {
 574		mlog(ML_ERROR,
 575		     "Checksum failed for dir index root block %llu\n",
 576		     (unsigned long long)bh->b_blocknr);
 577		return ret;
 578	}
 579
 580	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
 581		ret = ocfs2_error(sb,
 582				  "Dir Index Root # %llu has bad signature %.*s\n",
 583				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
 584				  7, dx_root->dr_signature);
 585	}
 586
 587	return ret;
 588}
 589
 590static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
 591			      struct buffer_head **dx_root_bh)
 592{
 593	int ret;
 594	u64 blkno = le64_to_cpu(di->i_dx_root);
 595	struct buffer_head *tmp = *dx_root_bh;
 596
 597	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 598			       ocfs2_validate_dx_root);
 599
 600	/* If ocfs2_read_block() got us a new bh, pass it up. */
 601	if (!ret && !*dx_root_bh)
 602		*dx_root_bh = tmp;
 603
 604	return ret;
 605}
 606
 607static int ocfs2_validate_dx_leaf(struct super_block *sb,
 608				  struct buffer_head *bh)
 609{
 610	int ret;
 611	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
 612
 613	BUG_ON(!buffer_uptodate(bh));
 614
 615	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
 616	if (ret) {
 617		mlog(ML_ERROR,
 618		     "Checksum failed for dir index leaf block %llu\n",
 619		     (unsigned long long)bh->b_blocknr);
 620		return ret;
 621	}
 622
 623	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
 624		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
 625				  7, dx_leaf->dl_signature);
 626	}
 627
 628	return ret;
 629}
 630
 631static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
 632			      struct buffer_head **dx_leaf_bh)
 633{
 634	int ret;
 635	struct buffer_head *tmp = *dx_leaf_bh;
 636
 637	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 638			       ocfs2_validate_dx_leaf);
 639
 640	/* If ocfs2_read_block() got us a new bh, pass it up. */
 641	if (!ret && !*dx_leaf_bh)
 642		*dx_leaf_bh = tmp;
 643
 644	return ret;
 645}
 646
 647/*
 648 * Read a series of dx_leaf blocks. This expects all buffer_head
 649 * pointers to be NULL on function entry.
 650 */
 651static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
 652				struct buffer_head **dx_leaf_bhs)
 653{
 654	int ret;
 655
 656	ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
 657				ocfs2_validate_dx_leaf);
 658	if (ret)
 659		mlog_errno(ret);
 660
 661	return ret;
 662}
 663
 664static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
 665					       struct inode *dir,
 666					       struct ocfs2_dir_entry **res_dir)
 667{
 668	struct super_block *sb;
 669	struct buffer_head *bh_use[NAMEI_RA_SIZE];
 670	struct buffer_head *bh, *ret = NULL;
 671	unsigned long start, block, b;
 672	int ra_max = 0;		/* Number of bh's in the readahead
 673				   buffer, bh_use[] */
 674	int ra_ptr = 0;		/* Current index into readahead
 675				   buffer */
 676	int num = 0;
 677	int nblocks, i;
 678
 679	sb = dir->i_sb;
 680
 681	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 682	start = OCFS2_I(dir)->ip_dir_start_lookup;
 683	if (start >= nblocks)
 684		start = 0;
 685	block = start;
 686
 687restart:
 688	do {
 689		/*
 690		 * We deal with the read-ahead logic here.
 691		 */
 692		if (ra_ptr >= ra_max) {
 693			/* Refill the readahead buffer */
 694			ra_ptr = 0;
 695			b = block;
 696			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
 697				/*
 698				 * Terminate if we reach the end of the
 699				 * directory and must wrap, or if our
 700				 * search has finished at this block.
 701				 */
 702				if (b >= nblocks || (num && block == start)) {
 703					bh_use[ra_max] = NULL;
 704					break;
 705				}
 706				num++;
 707
 708				bh = NULL;
 709				ocfs2_read_dir_block(dir, b++, &bh,
 710							   OCFS2_BH_READAHEAD);
 711				bh_use[ra_max] = bh;
 712			}
 713		}
 714		if ((bh = bh_use[ra_ptr++]) == NULL)
 715			goto next;
 716		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
 717			/* read error, skip block & hope for the best.
 718			 * ocfs2_read_dir_block() has released the bh. */
 719			mlog(ML_ERROR, "reading directory %llu, "
 720				    "offset %lu\n",
 721				    (unsigned long long)OCFS2_I(dir)->ip_blkno,
 722				    block);
 723			goto next;
 724		}
 725		i = ocfs2_search_dirblock(bh, dir, name, namelen,
 726					  block << sb->s_blocksize_bits,
 727					  bh->b_data, sb->s_blocksize,
 728					  res_dir);
 729		if (i == 1) {
 730			OCFS2_I(dir)->ip_dir_start_lookup = block;
 731			ret = bh;
 732			goto cleanup_and_exit;
 733		} else {
 734			brelse(bh);
 735			if (i < 0)
 736				goto cleanup_and_exit;
 737		}
 738	next:
 739		if (++block >= nblocks)
 740			block = 0;
 741	} while (block != start);
 742
 743	/*
 744	 * If the directory has grown while we were searching, then
 745	 * search the last part of the directory before giving up.
 746	 */
 747	block = nblocks;
 748	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 749	if (block < nblocks) {
 750		start = 0;
 751		goto restart;
 752	}
 753
 754cleanup_and_exit:
 755	/* Clean up the read-ahead blocks */
 756	for (; ra_ptr < ra_max; ra_ptr++)
 757		brelse(bh_use[ra_ptr]);
 758
 759	trace_ocfs2_find_entry_el(ret);
 760	return ret;
 761}
 762
 763static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
 764				   struct ocfs2_extent_list *el,
 765				   u32 major_hash,
 766				   u32 *ret_cpos,
 767				   u64 *ret_phys_blkno,
 768				   unsigned int *ret_clen)
 769{
 770	int ret = 0, i, found;
 771	struct buffer_head *eb_bh = NULL;
 772	struct ocfs2_extent_block *eb;
 773	struct ocfs2_extent_rec *rec = NULL;
 774
 775	if (el->l_tree_depth) {
 776		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
 777				      &eb_bh);
 778		if (ret) {
 779			mlog_errno(ret);
 780			goto out;
 781		}
 782
 783		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
 784		el = &eb->h_list;
 785
 786		if (el->l_tree_depth) {
 787			ret = ocfs2_error(inode->i_sb,
 788					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
 789					  inode->i_ino,
 790					  (unsigned long long)eb_bh->b_blocknr);
 791			goto out;
 792		}
 793	}
 794
 795	found = 0;
 796	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
 797		rec = &el->l_recs[i];
 798
 799		if (le32_to_cpu(rec->e_cpos) <= major_hash) {
 800			found = 1;
 801			break;
 802		}
 803	}
 804
 805	if (!found) {
 806		ret = ocfs2_error(inode->i_sb,
 807				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
 808				  inode->i_ino,
 809				  le32_to_cpu(rec->e_cpos),
 810				  ocfs2_rec_clusters(el, rec));
 811		goto out;
 812	}
 813
 814	if (ret_phys_blkno)
 815		*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
 816	if (ret_cpos)
 817		*ret_cpos = le32_to_cpu(rec->e_cpos);
 818	if (ret_clen)
 819		*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
 820
 821out:
 822	brelse(eb_bh);
 823	return ret;
 824}
 825
 826/*
 827 * Returns the block index, from the start of the cluster which this
 828 * hash belongs too.
 829 */
 830static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 831						   u32 minor_hash)
 832{
 833	return minor_hash & osb->osb_dx_mask;
 834}
 835
 836static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 837					  struct ocfs2_dx_hinfo *hinfo)
 838{
 839	return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
 840}
 841
 842static int ocfs2_dx_dir_lookup(struct inode *inode,
 843			       struct ocfs2_extent_list *el,
 844			       struct ocfs2_dx_hinfo *hinfo,
 845			       u32 *ret_cpos,
 846			       u64 *ret_phys_blkno)
 847{
 848	int ret = 0;
 849	unsigned int cend, clen;
 850	u32 cpos;
 851	u64 blkno;
 852	u32 name_hash = hinfo->major_hash;
 853
 854	ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
 855				      &clen);
 856	if (ret) {
 857		mlog_errno(ret);
 858		goto out;
 859	}
 860
 861	cend = cpos + clen;
 862	if (name_hash >= cend) {
 863		/* We want the last cluster */
 864		blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
 865		cpos += clen - 1;
 866	} else {
 867		blkno += ocfs2_clusters_to_blocks(inode->i_sb,
 868						  name_hash - cpos);
 869		cpos = name_hash;
 870	}
 871
 872	/*
 873	 * We now have the cluster which should hold our entry. To
 874	 * find the exact block from the start of the cluster to
 875	 * search, we take the lower bits of the hash.
 876	 */
 877	blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
 878
 879	if (ret_phys_blkno)
 880		*ret_phys_blkno = blkno;
 881	if (ret_cpos)
 882		*ret_cpos = cpos;
 883
 884out:
 885
 886	return ret;
 887}
 888
 889static int ocfs2_dx_dir_search(const char *name, int namelen,
 890			       struct inode *dir,
 891			       struct ocfs2_dx_root_block *dx_root,
 892			       struct ocfs2_dir_lookup_result *res)
 893{
 894	int ret, i, found;
 895	u64 phys;
 896	struct buffer_head *dx_leaf_bh = NULL;
 897	struct ocfs2_dx_leaf *dx_leaf;
 898	struct ocfs2_dx_entry *dx_entry = NULL;
 899	struct buffer_head *dir_ent_bh = NULL;
 900	struct ocfs2_dir_entry *dir_ent = NULL;
 901	struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
 902	struct ocfs2_extent_list *dr_el;
 903	struct ocfs2_dx_entry_list *entry_list;
 904
 905	ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
 906
 907	if (ocfs2_dx_root_inline(dx_root)) {
 908		entry_list = &dx_root->dr_entries;
 909		goto search;
 910	}
 911
 912	dr_el = &dx_root->dr_list;
 913
 914	ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
 915	if (ret) {
 916		mlog_errno(ret);
 917		goto out;
 918	}
 919
 920	trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
 921				  namelen, name, hinfo->major_hash,
 922				  hinfo->minor_hash, (unsigned long long)phys);
 923
 924	ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
 925	if (ret) {
 926		mlog_errno(ret);
 927		goto out;
 928	}
 929
 930	dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
 931
 932	trace_ocfs2_dx_dir_search_leaf_info(
 933			le16_to_cpu(dx_leaf->dl_list.de_num_used),
 934			le16_to_cpu(dx_leaf->dl_list.de_count));
 935
 936	entry_list = &dx_leaf->dl_list;
 937
 938search:
 939	/*
 940	 * Empty leaf is legal, so no need to check for that.
 941	 */
 942	found = 0;
 943	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
 944		dx_entry = &entry_list->de_entries[i];
 945
 946		if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
 947		    || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
 948			continue;
 949
 950		/*
 951		 * Search unindexed leaf block now. We're not
 952		 * guaranteed to find anything.
 953		 */
 954		ret = ocfs2_read_dir_block_direct(dir,
 955					  le64_to_cpu(dx_entry->dx_dirent_blk),
 956					  &dir_ent_bh);
 957		if (ret) {
 958			mlog_errno(ret);
 959			goto out;
 960		}
 961
 962		/*
 963		 * XXX: We should check the unindexed block here,
 964		 * before using it.
 965		 */
 966
 967		found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
 968					      0, dir_ent_bh->b_data,
 969					      dir->i_sb->s_blocksize, &dir_ent);
 970		if (found == 1)
 971			break;
 972
 973		if (found == -1) {
 974			/* This means we found a bad directory entry. */
 975			ret = -EIO;
 976			mlog_errno(ret);
 977			goto out;
 978		}
 979
 980		brelse(dir_ent_bh);
 981		dir_ent_bh = NULL;
 982	}
 983
 984	if (found <= 0) {
 985		ret = -ENOENT;
 986		goto out;
 987	}
 988
 989	res->dl_leaf_bh = dir_ent_bh;
 990	res->dl_entry = dir_ent;
 991	res->dl_dx_leaf_bh = dx_leaf_bh;
 992	res->dl_dx_entry = dx_entry;
 993
 994	ret = 0;
 995out:
 996	if (ret) {
 997		brelse(dx_leaf_bh);
 998		brelse(dir_ent_bh);
 999	}
1000	return ret;
1001}
1002
1003static int ocfs2_find_entry_dx(const char *name, int namelen,
1004			       struct inode *dir,
1005			       struct ocfs2_dir_lookup_result *lookup)
1006{
1007	int ret;
1008	struct buffer_head *di_bh = NULL;
1009	struct ocfs2_dinode *di;
1010	struct buffer_head *dx_root_bh = NULL;
1011	struct ocfs2_dx_root_block *dx_root;
1012
1013	ret = ocfs2_read_inode_block(dir, &di_bh);
1014	if (ret) {
1015		mlog_errno(ret);
1016		goto out;
1017	}
1018
1019	di = (struct ocfs2_dinode *)di_bh->b_data;
1020
1021	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
1022	if (ret) {
1023		mlog_errno(ret);
1024		goto out;
1025	}
1026	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
1027
1028	ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
1029	if (ret) {
1030		if (ret != -ENOENT)
1031			mlog_errno(ret);
1032		goto out;
1033	}
1034
1035	lookup->dl_dx_root_bh = dx_root_bh;
1036	dx_root_bh = NULL;
1037out:
1038	brelse(di_bh);
1039	brelse(dx_root_bh);
1040	return ret;
1041}
1042
1043/*
1044 * Try to find an entry of the provided name within 'dir'.
1045 *
1046 * If nothing was found, -ENOENT is returned. Otherwise, zero is
1047 * returned and the struct 'res' will contain information useful to
1048 * other directory manipulation functions.
1049 *
1050 * Caller can NOT assume anything about the contents of the
1051 * buffer_heads - they are passed back only so that it can be passed
1052 * into any one of the manipulation functions (add entry, delete
1053 * entry, etc). As an example, bh in the extent directory case is a
1054 * data block, in the inline-data case it actually points to an inode,
1055 * in the indexed directory case, multiple buffers are involved.
1056 */
1057int ocfs2_find_entry(const char *name, int namelen,
1058		     struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
1059{
1060	struct buffer_head *bh;
1061	struct ocfs2_dir_entry *res_dir = NULL;
1062
1063	if (ocfs2_dir_indexed(dir))
1064		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
1065
1066	/*
1067	 * The unindexed dir code only uses part of the lookup
1068	 * structure, so there's no reason to push it down further
1069	 * than this.
1070	 */
1071	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1072		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
1073	else
1074		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
1075
1076	if (bh == NULL)
1077		return -ENOENT;
1078
1079	lookup->dl_leaf_bh = bh;
1080	lookup->dl_entry = res_dir;
1081	return 0;
1082}
1083
1084/*
1085 * Update inode number and type of a previously found directory entry.
1086 */
1087int ocfs2_update_entry(struct inode *dir, handle_t *handle,
1088		       struct ocfs2_dir_lookup_result *res,
1089		       struct inode *new_entry_inode)
1090{
1091	int ret;
1092	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1093	struct ocfs2_dir_entry *de = res->dl_entry;
1094	struct buffer_head *de_bh = res->dl_leaf_bh;
1095
1096	/*
1097	 * The same code works fine for both inline-data and extent
1098	 * based directories, so no need to split this up.  The only
1099	 * difference is the journal_access function.
1100	 */
1101
1102	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1103		access = ocfs2_journal_access_di;
1104
1105	ret = access(handle, INODE_CACHE(dir), de_bh,
1106		     OCFS2_JOURNAL_ACCESS_WRITE);
1107	if (ret) {
1108		mlog_errno(ret);
1109		goto out;
1110	}
1111
1112	de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
1113	ocfs2_set_de_type(de, new_entry_inode->i_mode);
1114
1115	ocfs2_journal_dirty(handle, de_bh);
1116
1117out:
1118	return ret;
1119}
1120
1121/*
1122 * __ocfs2_delete_entry deletes a directory entry by merging it with the
1123 * previous entry
1124 */
1125static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1126				struct ocfs2_dir_entry *de_del,
1127				struct buffer_head *bh, char *first_de,
1128				unsigned int bytes)
1129{
1130	struct ocfs2_dir_entry *de, *pde;
1131	int i, status = -ENOENT;
1132	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1133
1134	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1135		access = ocfs2_journal_access_di;
1136
1137	i = 0;
1138	pde = NULL;
1139	de = (struct ocfs2_dir_entry *) first_de;
1140	while (i < bytes) {
1141		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
1142			status = -EIO;
1143			mlog_errno(status);
1144			goto bail;
1145		}
1146		if (de == de_del)  {
1147			status = access(handle, INODE_CACHE(dir), bh,
1148					OCFS2_JOURNAL_ACCESS_WRITE);
1149			if (status < 0) {
1150				status = -EIO;
1151				mlog_errno(status);
1152				goto bail;
1153			}
1154			if (pde)
1155				le16_add_cpu(&pde->rec_len,
1156						le16_to_cpu(de->rec_len));
1157			de->inode = 0;
1158			inode_inc_iversion(dir);
1159			ocfs2_journal_dirty(handle, bh);
1160			goto bail;
1161		}
1162		i += le16_to_cpu(de->rec_len);
1163		pde = de;
1164		de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1165	}
1166bail:
1167	return status;
1168}
1169
1170static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
1171{
1172	unsigned int hole;
1173
1174	if (le64_to_cpu(de->inode) == 0)
1175		hole = le16_to_cpu(de->rec_len);
1176	else
1177		hole = le16_to_cpu(de->rec_len) -
1178			OCFS2_DIR_REC_LEN(de->name_len);
1179
1180	return hole;
1181}
1182
1183static int ocfs2_find_max_rec_len(struct super_block *sb,
1184				  struct buffer_head *dirblock_bh)
1185{
1186	int size, this_hole, largest_hole = 0;
1187	char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
1188	struct ocfs2_dir_entry *de;
1189
1190	trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
1191	size = ocfs2_dir_trailer_blk_off(sb);
1192	limit = start + size;
1193	de_buf = start;
1194	de = (struct ocfs2_dir_entry *)de_buf;
1195	do {
1196		if (de_buf != trailer) {
1197			this_hole = ocfs2_figure_dirent_hole(de);
1198			if (this_hole > largest_hole)
1199				largest_hole = this_hole;
1200		}
1201
1202		de_buf += le16_to_cpu(de->rec_len);
1203		de = (struct ocfs2_dir_entry *)de_buf;
1204	} while (de_buf < limit);
1205
1206	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
1207		return largest_hole;
1208	return 0;
1209}
1210
1211static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
1212				       int index)
1213{
1214	int num_used = le16_to_cpu(entry_list->de_num_used);
1215
1216	if (num_used == 1 || index == (num_used - 1))
1217		goto clear;
1218
1219	memmove(&entry_list->de_entries[index],
1220		&entry_list->de_entries[index + 1],
1221		(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
1222clear:
1223	num_used--;
1224	memset(&entry_list->de_entries[num_used], 0,
1225	       sizeof(struct ocfs2_dx_entry));
1226	entry_list->de_num_used = cpu_to_le16(num_used);
1227}
1228
1229static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1230				 struct ocfs2_dir_lookup_result *lookup)
1231{
1232	int ret, index, max_rec_len, add_to_free_list = 0;
1233	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1234	struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
1235	struct ocfs2_dx_leaf *dx_leaf;
1236	struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
1237	struct ocfs2_dir_block_trailer *trailer;
1238	struct ocfs2_dx_root_block *dx_root;
1239	struct ocfs2_dx_entry_list *entry_list;
1240
1241	/*
1242	 * This function gets a bit messy because we might have to
1243	 * modify the root block, regardless of whether the indexed
1244	 * entries are stored inline.
1245	 */
1246
1247	/*
1248	 * *Only* set 'entry_list' here, based on where we're looking
1249	 * for the indexed entries. Later, we might still want to
1250	 * journal both blocks, based on free list state.
1251	 */
1252	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
1253	if (ocfs2_dx_root_inline(dx_root)) {
1254		entry_list = &dx_root->dr_entries;
1255	} else {
1256		dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
1257		entry_list = &dx_leaf->dl_list;
1258	}
1259
1260	/* Neither of these are a disk corruption - that should have
1261	 * been caught by lookup, before we got here. */
1262	BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
1263	BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
1264
1265	index = (char *)dx_entry - (char *)entry_list->de_entries;
1266	index /= sizeof(*dx_entry);
1267
1268	if (index >= le16_to_cpu(entry_list->de_num_used)) {
1269		mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
1270		     (unsigned long long)OCFS2_I(dir)->ip_blkno, index,
1271		     entry_list, dx_entry);
1272		return -EIO;
1273	}
1274
1275	/*
1276	 * We know that removal of this dirent will leave enough room
1277	 * for a new one, so add this block to the free list if it
1278	 * isn't already there.
1279	 */
1280	trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
1281	if (trailer->db_free_rec_len == 0)
1282		add_to_free_list = 1;
1283
1284	/*
1285	 * Add the block holding our index into the journal before
1286	 * removing the unindexed entry. If we get an error return
1287	 * from __ocfs2_delete_entry(), then it hasn't removed the
1288	 * entry yet. Likewise, successful return means we *must*
1289	 * remove the indexed entry.
1290	 *
1291	 * We're also careful to journal the root tree block here as
1292	 * the entry count needs to be updated. Also, we might be
1293	 * adding to the start of the free list.
1294	 */
1295	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1296				      OCFS2_JOURNAL_ACCESS_WRITE);
1297	if (ret) {
1298		mlog_errno(ret);
1299		goto out;
1300	}
1301
1302	if (!ocfs2_dx_root_inline(dx_root)) {
1303		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
1304					      lookup->dl_dx_leaf_bh,
1305					      OCFS2_JOURNAL_ACCESS_WRITE);
1306		if (ret) {
1307			mlog_errno(ret);
1308			goto out;
1309		}
1310	}
1311
1312	trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1313				    index);
1314
1315	ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1316				   leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
1317	if (ret) {
1318		mlog_errno(ret);
1319		goto out;
1320	}
1321
1322	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
1323	trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1324	if (add_to_free_list) {
1325		trailer->db_free_next = dx_root->dr_free_blk;
1326		dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
1327		ocfs2_journal_dirty(handle, dx_root_bh);
1328	}
1329
1330	/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
1331	ocfs2_journal_dirty(handle, leaf_bh);
1332
1333	le32_add_cpu(&dx_root->dr_num_entries, -1);
1334	ocfs2_journal_dirty(handle, dx_root_bh);
1335
1336	ocfs2_dx_list_remove_entry(entry_list, index);
1337
1338	if (!ocfs2_dx_root_inline(dx_root))
1339		ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
1340
1341out:
1342	return ret;
1343}
1344
1345static inline int ocfs2_delete_entry_id(handle_t *handle,
1346					struct inode *dir,
1347					struct ocfs2_dir_entry *de_del,
1348					struct buffer_head *bh)
1349{
1350	int ret;
1351	struct buffer_head *di_bh = NULL;
1352	struct ocfs2_dinode *di;
1353	struct ocfs2_inline_data *data;
1354
1355	ret = ocfs2_read_inode_block(dir, &di_bh);
1356	if (ret) {
1357		mlog_errno(ret);
1358		goto out;
1359	}
1360
1361	di = (struct ocfs2_dinode *)di_bh->b_data;
1362	data = &di->id2.i_data;
1363
1364	ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
1365				   i_size_read(dir));
1366
1367	brelse(di_bh);
1368out:
1369	return ret;
1370}
1371
1372static inline int ocfs2_delete_entry_el(handle_t *handle,
1373					struct inode *dir,
1374					struct ocfs2_dir_entry *de_del,
1375					struct buffer_head *bh)
1376{
1377	return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
1378				    bh->b_size);
1379}
1380
1381/*
1382 * Delete a directory entry. Hide the details of directory
1383 * implementation from the caller.
1384 */
1385int ocfs2_delete_entry(handle_t *handle,
1386		       struct inode *dir,
1387		       struct ocfs2_dir_lookup_result *res)
1388{
1389	if (ocfs2_dir_indexed(dir))
1390		return ocfs2_delete_entry_dx(handle, dir, res);
1391
1392	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1393		return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
1394					     res->dl_leaf_bh);
1395
1396	return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
1397				     res->dl_leaf_bh);
1398}
1399
1400/*
1401 * Check whether 'de' has enough room to hold an entry of
1402 * 'new_rec_len' bytes.
1403 */
1404static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
1405					 unsigned int new_rec_len)
1406{
1407	unsigned int de_really_used;
1408
1409	/* Check whether this is an empty record with enough space */
1410	if (le64_to_cpu(de->inode) == 0 &&
1411	    le16_to_cpu(de->rec_len) >= new_rec_len)
1412		return 1;
1413
1414	/*
1415	 * Record might have free space at the end which we can
1416	 * use.
1417	 */
1418	de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
1419	if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
1420	    return 1;
1421
1422	return 0;
1423}
1424
1425static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
1426					  struct ocfs2_dx_entry *dx_new_entry)
1427{
1428	int i;
1429
1430	i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
1431	dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
1432
1433	le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
1434}
1435
1436static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
1437				       struct ocfs2_dx_hinfo *hinfo,
1438				       u64 dirent_blk)
1439{
1440	int i;
1441	struct ocfs2_dx_entry *dx_entry;
1442
1443	i = le16_to_cpu(entry_list->de_num_used);
1444	dx_entry = &entry_list->de_entries[i];
1445
1446	memset(dx_entry, 0, sizeof(*dx_entry));
1447	dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
1448	dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
1449	dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
1450
1451	le16_add_cpu(&entry_list->de_num_used, 1);
1452}
1453
1454static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
1455				      struct ocfs2_dx_hinfo *hinfo,
1456				      u64 dirent_blk,
1457				      struct buffer_head *dx_leaf_bh)
1458{
1459	int ret;
1460	struct ocfs2_dx_leaf *dx_leaf;
1461
1462	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
1463				      OCFS2_JOURNAL_ACCESS_WRITE);
1464	if (ret) {
1465		mlog_errno(ret);
1466		goto out;
1467	}
1468
1469	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
1470	ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
1471	ocfs2_journal_dirty(handle, dx_leaf_bh);
1472
1473out:
1474	return ret;
1475}
1476
1477static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
1478					struct ocfs2_dx_hinfo *hinfo,
1479					u64 dirent_blk,
1480					struct ocfs2_dx_root_block *dx_root)
1481{
1482	ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
1483}
1484
1485static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
1486			       struct ocfs2_dir_lookup_result *lookup)
1487{
1488	int ret = 0;
1489	struct ocfs2_dx_root_block *dx_root;
1490	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1491
1492	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1493				      OCFS2_JOURNAL_ACCESS_WRITE);
1494	if (ret) {
1495		mlog_errno(ret);
1496		goto out;
1497	}
1498
1499	dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
1500	if (ocfs2_dx_root_inline(dx_root)) {
1501		ocfs2_dx_inline_root_insert(dir, handle,
1502					    &lookup->dl_hinfo,
1503					    lookup->dl_leaf_bh->b_blocknr,
1504					    dx_root);
1505	} else {
1506		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
1507						 lookup->dl_leaf_bh->b_blocknr,
1508						 lookup->dl_dx_leaf_bh);
1509		if (ret)
1510			goto out;
1511	}
1512
1513	le32_add_cpu(&dx_root->dr_num_entries, 1);
1514	ocfs2_journal_dirty(handle, dx_root_bh);
1515
1516out:
1517	return ret;
1518}
1519
1520static void ocfs2_remove_block_from_free_list(struct inode *dir,
1521				       handle_t *handle,
1522				       struct ocfs2_dir_lookup_result *lookup)
1523{
1524	struct ocfs2_dir_block_trailer *trailer, *prev;
1525	struct ocfs2_dx_root_block *dx_root;
1526	struct buffer_head *bh;
1527
1528	trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1529
1530	if (ocfs2_free_list_at_root(lookup)) {
1531		bh = lookup->dl_dx_root_bh;
1532		dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
1533		dx_root->dr_free_blk = trailer->db_free_next;
1534	} else {
1535		bh = lookup->dl_prev_leaf_bh;
1536		prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
1537		prev->db_free_next = trailer->db_free_next;
1538	}
1539
1540	trailer->db_free_rec_len = cpu_to_le16(0);
1541	trailer->db_free_next = cpu_to_le64(0);
1542
1543	ocfs2_journal_dirty(handle, bh);
1544	ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1545}
1546
1547/*
1548 * This expects that a journal write has been reserved on
1549 * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
1550 */
1551static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
1552				   struct ocfs2_dir_lookup_result *lookup)
1553{
1554	int max_rec_len;
1555	struct ocfs2_dir_block_trailer *trailer;
1556
1557	/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
1558	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
1559	if (max_rec_len) {
1560		/*
1561		 * There's still room in this block, so no need to remove it
1562		 * from the free list. In this case, we just want to update
1563		 * the rec len accounting.
1564		 */
1565		trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1566		trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1567		ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1568	} else {
1569		ocfs2_remove_block_from_free_list(dir, handle, lookup);
1570	}
1571}
1572
1573/* we don't always have a dentry for what we want to add, so people
1574 * like orphan dir can call this instead.
1575 *
1576 * The lookup context must have been filled from
1577 * ocfs2_prepare_dir_for_insert.
1578 */
1579int __ocfs2_add_entry(handle_t *handle,
1580		      struct inode *dir,
1581		      const char *name, int namelen,
1582		      struct inode *inode, u64 blkno,
1583		      struct buffer_head *parent_fe_bh,
1584		      struct ocfs2_dir_lookup_result *lookup)
1585{
1586	unsigned long offset;
1587	unsigned short rec_len;
1588	struct ocfs2_dir_entry *de, *de1;
1589	struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
1590	struct super_block *sb = dir->i_sb;
1591	int retval;
1592	unsigned int size = sb->s_blocksize;
1593	struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1594	char *data_start = insert_bh->b_data;
1595
1596	if (!namelen)
1597		return -EINVAL;
1598
1599	if (ocfs2_dir_indexed(dir)) {
1600		struct buffer_head *bh;
1601
1602		/*
1603		 * An indexed dir may require that we update the free space
1604		 * list. Reserve a write to the previous node in the list so
1605		 * that we don't fail later.
1606		 *
1607		 * XXX: This can be either a dx_root_block, or an unindexed
1608		 * directory tree leaf block.
1609		 */
1610		if (ocfs2_free_list_at_root(lookup)) {
1611			bh = lookup->dl_dx_root_bh;
1612			retval = ocfs2_journal_access_dr(handle,
1613						 INODE_CACHE(dir), bh,
1614						 OCFS2_JOURNAL_ACCESS_WRITE);
1615		} else {
1616			bh = lookup->dl_prev_leaf_bh;
1617			retval = ocfs2_journal_access_db(handle,
1618						 INODE_CACHE(dir), bh,
1619						 OCFS2_JOURNAL_ACCESS_WRITE);
1620		}
1621		if (retval) {
1622			mlog_errno(retval);
1623			return retval;
1624		}
1625	} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1626		data_start = di->id2.i_data.id_data;
1627		size = i_size_read(dir);
1628
1629		BUG_ON(insert_bh != parent_fe_bh);
1630	}
1631
1632	rec_len = OCFS2_DIR_REC_LEN(namelen);
1633	offset = 0;
1634	de = (struct ocfs2_dir_entry *) data_start;
1635	while (1) {
1636		BUG_ON((char *)de >= (size + data_start));
1637
1638		/* These checks should've already been passed by the
1639		 * prepare function, but I guess we can leave them
1640		 * here anyway. */
1641		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
1642			retval = -ENOENT;
1643			goto bail;
1644		}
1645		if (ocfs2_match(namelen, name, de)) {
1646			retval = -EEXIST;
1647			goto bail;
1648		}
1649
1650		/* We're guaranteed that we should have space, so we
1651		 * can't possibly have hit the trailer...right? */
1652		mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
1653				"Hit dir trailer trying to insert %.*s "
1654			        "(namelen %d) into directory %llu.  "
1655				"offset is %lu, trailer offset is %d\n",
1656				namelen, name, namelen,
1657				(unsigned long long)parent_fe_bh->b_blocknr,
1658				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
1659
1660		if (ocfs2_dirent_would_fit(de, rec_len)) {
1661			dir->i_mtime = dir->i_ctime = current_time(dir);
1662			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
1663			if (retval < 0) {
1664				mlog_errno(retval);
1665				goto bail;
1666			}
1667
1668			if (insert_bh == parent_fe_bh)
1669				retval = ocfs2_journal_access_di(handle,
1670								 INODE_CACHE(dir),
1671								 insert_bh,
1672								 OCFS2_JOURNAL_ACCESS_WRITE);
1673			else {
1674				retval = ocfs2_journal_access_db(handle,
1675								 INODE_CACHE(dir),
1676								 insert_bh,
1677					      OCFS2_JOURNAL_ACCESS_WRITE);
1678
1679				if (!retval && ocfs2_dir_indexed(dir))
1680					retval = ocfs2_dx_dir_insert(dir,
1681								handle,
1682								lookup);
1683			}
1684
1685			if (retval) {
1686				mlog_errno(retval);
1687				goto bail;
1688			}
1689
1690			/* By now the buffer is marked for journaling */
1691			offset += le16_to_cpu(de->rec_len);
1692			if (le64_to_cpu(de->inode)) {
1693				de1 = (struct ocfs2_dir_entry *)((char *) de +
1694					OCFS2_DIR_REC_LEN(de->name_len));
1695				de1->rec_len =
1696					cpu_to_le16(le16_to_cpu(de->rec_len) -
1697					OCFS2_DIR_REC_LEN(de->name_len));
1698				de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
1699				de = de1;
1700			}
1701			de->file_type = FT_UNKNOWN;
1702			if (blkno) {
1703				de->inode = cpu_to_le64(blkno);
1704				ocfs2_set_de_type(de, inode->i_mode);
1705			} else
1706				de->inode = 0;
1707			de->name_len = namelen;
1708			memcpy(de->name, name, namelen);
1709
1710			if (ocfs2_dir_indexed(dir))
1711				ocfs2_recalc_free_list(dir, handle, lookup);
1712
1713			inode_inc_iversion(dir);
1714			ocfs2_journal_dirty(handle, insert_bh);
1715			retval = 0;
1716			goto bail;
1717		}
1718
1719		offset += le16_to_cpu(de->rec_len);
1720		de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
1721	}
1722
1723	/* when you think about it, the assert above should prevent us
1724	 * from ever getting here. */
1725	retval = -ENOSPC;
1726bail:
1727	if (retval)
1728		mlog_errno(retval);
1729
1730	return retval;
1731}
1732
1733static int ocfs2_dir_foreach_blk_id(struct inode *inode,
1734				    u64 *f_version,
1735				    struct dir_context *ctx)
1736{
1737	int ret, i;
1738	unsigned long offset = ctx->pos;
1739	struct buffer_head *di_bh = NULL;
1740	struct ocfs2_dinode *di;
1741	struct ocfs2_inline_data *data;
1742	struct ocfs2_dir_entry *de;
1743
1744	ret = ocfs2_read_inode_block(inode, &di_bh);
1745	if (ret) {
1746		mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
1747		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1748		goto out;
1749	}
1750
1751	di = (struct ocfs2_dinode *)di_bh->b_data;
1752	data = &di->id2.i_data;
1753
1754	while (ctx->pos < i_size_read(inode)) {
1755		/* If the dir block has changed since the last call to
1756		 * readdir(2), then we might be pointing to an invalid
1757		 * dirent right now.  Scan from the start of the block
1758		 * to make sure. */
1759		if (!inode_eq_iversion(inode, *f_version)) {
1760			for (i = 0; i < i_size_read(inode) && i < offset; ) {
1761				de = (struct ocfs2_dir_entry *)
1762					(data->id_data + i);
1763				/* It's too expensive to do a full
1764				 * dirent test each time round this
1765				 * loop, but we do have to test at
1766				 * least that it is non-zero.  A
1767				 * failure will be detected in the
1768				 * dirent test below. */
1769				if (le16_to_cpu(de->rec_len) <
1770				    OCFS2_DIR_REC_LEN(1))
1771					break;
1772				i += le16_to_cpu(de->rec_len);
1773			}
1774			ctx->pos = offset = i;
1775			*f_version = inode_query_iversion(inode);
1776		}
1777
1778		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
1779		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
1780			/* On error, skip the f_pos to the end. */
1781			ctx->pos = i_size_read(inode);
1782			break;
1783		}
1784		offset += le16_to_cpu(de->rec_len);
1785		if (le64_to_cpu(de->inode)) {
 
 
 
 
 
1786			if (!dir_emit(ctx, de->name, de->name_len,
1787				      le64_to_cpu(de->inode),
1788				      fs_ftype_to_dtype(de->file_type)))
1789				goto out;
1790		}
1791		ctx->pos += le16_to_cpu(de->rec_len);
1792	}
1793out:
1794	brelse(di_bh);
1795	return 0;
1796}
1797
1798/*
1799 * NOTE: This function can be called against unindexed directories,
1800 * and indexed ones.
1801 */
1802static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1803				    u64 *f_version,
1804				    struct dir_context *ctx,
1805				    bool persist)
1806{
1807	unsigned long offset, blk, last_ra_blk = 0;
1808	int i;
1809	struct buffer_head * bh, * tmp;
1810	struct ocfs2_dir_entry * de;
1811	struct super_block * sb = inode->i_sb;
1812	unsigned int ra_sectors = 16;
1813	int stored = 0;
1814
1815	bh = NULL;
1816
1817	offset = ctx->pos & (sb->s_blocksize - 1);
1818
1819	while (ctx->pos < i_size_read(inode)) {
1820		blk = ctx->pos >> sb->s_blocksize_bits;
1821		if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
1822			/* Skip the corrupt dirblock and keep trying */
1823			ctx->pos += sb->s_blocksize - offset;
1824			continue;
1825		}
1826
1827		/* The idea here is to begin with 8k read-ahead and to stay
1828		 * 4k ahead of our current position.
1829		 *
1830		 * TODO: Use the pagecache for this. We just need to
1831		 * make sure it's cluster-safe... */
1832		if (!last_ra_blk
1833		    || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
1834			for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
1835			     i > 0; i--) {
1836				tmp = NULL;
1837				if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
1838							  OCFS2_BH_READAHEAD))
1839					brelse(tmp);
1840			}
1841			last_ra_blk = blk;
1842			ra_sectors = 8;
1843		}
1844
1845		/* If the dir block has changed since the last call to
1846		 * readdir(2), then we might be pointing to an invalid
1847		 * dirent right now.  Scan from the start of the block
1848		 * to make sure. */
1849		if (!inode_eq_iversion(inode, *f_version)) {
1850			for (i = 0; i < sb->s_blocksize && i < offset; ) {
1851				de = (struct ocfs2_dir_entry *) (bh->b_data + i);
1852				/* It's too expensive to do a full
1853				 * dirent test each time round this
1854				 * loop, but we do have to test at
1855				 * least that it is non-zero.  A
1856				 * failure will be detected in the
1857				 * dirent test below. */
1858				if (le16_to_cpu(de->rec_len) <
1859				    OCFS2_DIR_REC_LEN(1))
1860					break;
1861				i += le16_to_cpu(de->rec_len);
1862			}
1863			offset = i;
1864			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
1865				| offset;
1866			*f_version = inode_query_iversion(inode);
1867		}
1868
1869		while (ctx->pos < i_size_read(inode)
1870		       && offset < sb->s_blocksize) {
1871			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
1872			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
1873				/* On error, skip the f_pos to the
1874				   next block. */
1875				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1876				break;
 
1877			}
1878			if (le64_to_cpu(de->inode)) {
 
 
 
 
1879				if (!dir_emit(ctx, de->name,
1880						de->name_len,
1881						le64_to_cpu(de->inode),
1882					fs_ftype_to_dtype(de->file_type))) {
1883					brelse(bh);
1884					return 0;
1885				}
1886				stored++;
1887			}
1888			offset += le16_to_cpu(de->rec_len);
1889			ctx->pos += le16_to_cpu(de->rec_len);
1890		}
1891		offset = 0;
1892		brelse(bh);
1893		bh = NULL;
1894		if (!persist && stored)
1895			break;
1896	}
1897	return 0;
1898}
1899
1900static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
1901				 struct dir_context *ctx,
1902				 bool persist)
1903{
1904	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1905		return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
1906	return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
1907}
1908
1909/*
1910 * This is intended to be called from inside other kernel functions,
1911 * so we fake some arguments.
1912 */
1913int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
1914{
1915	u64 version = inode_query_iversion(inode);
1916	ocfs2_dir_foreach_blk(inode, &version, ctx, true);
1917	return 0;
1918}
1919
1920/*
1921 * ocfs2_readdir()
1922 *
1923 */
1924int ocfs2_readdir(struct file *file, struct dir_context *ctx)
1925{
1926	int error = 0;
1927	struct inode *inode = file_inode(file);
1928	int lock_level = 0;
1929
1930	trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
1931
1932	error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level, 1);
1933	if (lock_level && error >= 0) {
1934		/* We release EX lock which used to update atime
1935		 * and get PR lock again to reduce contention
1936		 * on commonly accessed directories. */
1937		ocfs2_inode_unlock(inode, 1);
1938		lock_level = 0;
1939		error = ocfs2_inode_lock(inode, NULL, 0);
1940	}
1941	if (error < 0) {
1942		if (error != -ENOENT)
1943			mlog_errno(error);
1944		/* we haven't got any yet, so propagate the error. */
1945		goto bail_nolock;
1946	}
1947
1948	error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
1949
1950	ocfs2_inode_unlock(inode, lock_level);
1951	if (error)
1952		mlog_errno(error);
1953
1954bail_nolock:
1955
1956	return error;
1957}
1958
1959/*
1960 * NOTE: this should always be called with parent dir i_rwsem taken.
1961 */
1962int ocfs2_find_files_on_disk(const char *name,
1963			     int namelen,
1964			     u64 *blkno,
1965			     struct inode *inode,
1966			     struct ocfs2_dir_lookup_result *lookup)
1967{
1968	int status = -ENOENT;
1969
1970	trace_ocfs2_find_files_on_disk(namelen, name, blkno,
1971				(unsigned long long)OCFS2_I(inode)->ip_blkno);
1972
1973	status = ocfs2_find_entry(name, namelen, inode, lookup);
1974	if (status)
1975		goto leave;
1976
1977	*blkno = le64_to_cpu(lookup->dl_entry->inode);
1978
1979	status = 0;
1980leave:
1981
1982	return status;
1983}
1984
1985/*
1986 * Convenience function for callers which just want the block number
1987 * mapped to a name and don't require the full dirent info, etc.
1988 */
1989int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
1990			       int namelen, u64 *blkno)
1991{
1992	int ret;
1993	struct ocfs2_dir_lookup_result lookup = { NULL, };
1994
1995	ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
1996	ocfs2_free_dir_lookup_result(&lookup);
1997
1998	return ret;
1999}
2000
2001/* Check for a name within a directory.
2002 *
2003 * Return 0 if the name does not exist
2004 * Return -EEXIST if the directory contains the name
2005 *
2006 * Callers should have i_rwsem + a cluster lock on dir
2007 */
2008int ocfs2_check_dir_for_entry(struct inode *dir,
2009			      const char *name,
2010			      int namelen)
2011{
2012	int ret = 0;
2013	struct ocfs2_dir_lookup_result lookup = { NULL, };
2014
2015	trace_ocfs2_check_dir_for_entry(
2016		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2017
2018	if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
2019		ret = -EEXIST;
2020		mlog_errno(ret);
2021	}
2022
2023	ocfs2_free_dir_lookup_result(&lookup);
2024
2025	return ret;
2026}
2027
2028struct ocfs2_empty_dir_priv {
2029	struct dir_context ctx;
2030	unsigned seen_dot;
2031	unsigned seen_dot_dot;
2032	unsigned seen_other;
2033	unsigned dx_dir;
2034};
2035static bool ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
2036				   int name_len, loff_t pos, u64 ino,
2037				   unsigned type)
2038{
2039	struct ocfs2_empty_dir_priv *p =
2040		container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
2041
2042	/*
2043	 * Check the positions of "." and ".." records to be sure
2044	 * they're in the correct place.
2045	 *
2046	 * Indexed directories don't need to proceed past the first
2047	 * two entries, so we end the scan after seeing '..'. Despite
2048	 * that, we allow the scan to proceed In the event that we
2049	 * have a corrupted indexed directory (no dot or dot dot
2050	 * entries). This allows us to double check for existing
2051	 * entries which might not have been found in the index.
2052	 */
2053	if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
2054		p->seen_dot = 1;
2055		return true;
2056	}
2057
2058	if (name_len == 2 && !strncmp("..", name, 2) &&
2059	    pos == OCFS2_DIR_REC_LEN(1)) {
2060		p->seen_dot_dot = 1;
2061
2062		if (p->dx_dir && p->seen_dot)
2063			return false;
2064
2065		return true;
2066	}
2067
2068	p->seen_other = 1;
2069	return false;
2070}
2071
2072static int ocfs2_empty_dir_dx(struct inode *inode,
2073			      struct ocfs2_empty_dir_priv *priv)
2074{
2075	int ret;
2076	struct buffer_head *di_bh = NULL;
2077	struct buffer_head *dx_root_bh = NULL;
2078	struct ocfs2_dinode *di;
2079	struct ocfs2_dx_root_block *dx_root;
2080
2081	priv->dx_dir = 1;
2082
2083	ret = ocfs2_read_inode_block(inode, &di_bh);
2084	if (ret) {
2085		mlog_errno(ret);
2086		goto out;
2087	}
2088	di = (struct ocfs2_dinode *)di_bh->b_data;
2089
2090	ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
2091	if (ret) {
2092		mlog_errno(ret);
2093		goto out;
2094	}
2095	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2096
2097	if (le32_to_cpu(dx_root->dr_num_entries) != 2)
2098		priv->seen_other = 1;
2099
2100out:
2101	brelse(di_bh);
2102	brelse(dx_root_bh);
2103	return ret;
2104}
2105
2106/*
2107 * routine to check that the specified directory is empty (for rmdir)
2108 *
2109 * Returns 1 if dir is empty, zero otherwise.
2110 *
2111 * XXX: This is a performance problem for unindexed directories.
2112 */
2113int ocfs2_empty_dir(struct inode *inode)
2114{
2115	int ret;
2116	struct ocfs2_empty_dir_priv priv = {
2117		.ctx.actor = ocfs2_empty_dir_filldir,
2118	};
2119
2120	if (ocfs2_dir_indexed(inode)) {
2121		ret = ocfs2_empty_dir_dx(inode, &priv);
2122		if (ret)
2123			mlog_errno(ret);
2124		/*
2125		 * We still run ocfs2_dir_foreach to get the checks
2126		 * for "." and "..".
2127		 */
2128	}
2129
2130	ret = ocfs2_dir_foreach(inode, &priv.ctx);
2131	if (ret)
2132		mlog_errno(ret);
2133
2134	if (!priv.seen_dot || !priv.seen_dot_dot) {
2135		mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
2136		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
2137		/*
2138		 * XXX: Is it really safe to allow an unlink to continue?
2139		 */
2140		return 1;
2141	}
2142
2143	return !priv.seen_other;
2144}
2145
2146/*
2147 * Fills "." and ".." dirents in a new directory block. Returns dirent for
2148 * "..", which might be used during creation of a directory with a trailing
2149 * header. It is otherwise safe to ignore the return code.
2150 */
2151static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
2152							  struct inode *parent,
2153							  char *start,
2154							  unsigned int size)
2155{
2156	struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
2157
2158	de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
2159	de->name_len = 1;
2160	de->rec_len =
2161		cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
2162	strcpy(de->name, ".");
2163	ocfs2_set_de_type(de, S_IFDIR);
2164
2165	de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
2166	de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
2167	de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
2168	de->name_len = 2;
2169	strcpy(de->name, "..");
2170	ocfs2_set_de_type(de, S_IFDIR);
2171
2172	return de;
2173}
2174
2175/*
2176 * This works together with code in ocfs2_mknod_locked() which sets
2177 * the inline-data flag and initializes the inline-data section.
2178 */
2179static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
2180				 handle_t *handle,
2181				 struct inode *parent,
2182				 struct inode *inode,
2183				 struct buffer_head *di_bh)
2184{
2185	int ret;
2186	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2187	struct ocfs2_inline_data *data = &di->id2.i_data;
2188	unsigned int size = le16_to_cpu(data->id_count);
2189
2190	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2191				      OCFS2_JOURNAL_ACCESS_WRITE);
2192	if (ret) {
2193		mlog_errno(ret);
2194		goto out;
2195	}
2196
2197	ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
2198	ocfs2_journal_dirty(handle, di_bh);
2199
2200	i_size_write(inode, size);
2201	set_nlink(inode, 2);
2202	inode->i_blocks = ocfs2_inode_sector_count(inode);
2203
2204	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2205	if (ret < 0)
2206		mlog_errno(ret);
2207
2208out:
2209	return ret;
2210}
2211
2212static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2213				 handle_t *handle,
2214				 struct inode *parent,
2215				 struct inode *inode,
2216				 struct buffer_head *fe_bh,
2217				 struct ocfs2_alloc_context *data_ac,
2218				 struct buffer_head **ret_new_bh)
2219{
2220	int status;
2221	unsigned int size = osb->sb->s_blocksize;
2222	struct buffer_head *new_bh = NULL;
2223	struct ocfs2_dir_entry *de;
2224
2225	if (ocfs2_new_dir_wants_trailer(inode))
2226		size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2227
2228	status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
2229				     data_ac, NULL, &new_bh);
2230	if (status < 0) {
2231		mlog_errno(status);
2232		goto bail;
2233	}
2234
2235	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2236
2237	status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
2238					 OCFS2_JOURNAL_ACCESS_CREATE);
2239	if (status < 0) {
2240		mlog_errno(status);
2241		goto bail;
2242	}
2243	memset(new_bh->b_data, 0, osb->sb->s_blocksize);
2244
2245	de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
2246	if (ocfs2_new_dir_wants_trailer(inode)) {
2247		int size = le16_to_cpu(de->rec_len);
2248
2249		/*
2250		 * Figure out the size of the hole left over after
2251		 * insertion of '.' and '..'. The trailer wants this
2252		 * information.
2253		 */
2254		size -= OCFS2_DIR_REC_LEN(2);
2255		size -= sizeof(struct ocfs2_dir_block_trailer);
2256
2257		ocfs2_init_dir_trailer(inode, new_bh, size);
2258	}
2259
2260	ocfs2_journal_dirty(handle, new_bh);
2261
2262	i_size_write(inode, inode->i_sb->s_blocksize);
2263	set_nlink(inode, 2);
2264	inode->i_blocks = ocfs2_inode_sector_count(inode);
2265	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
2266	if (status < 0) {
2267		mlog_errno(status);
2268		goto bail;
2269	}
2270
2271	status = 0;
2272	if (ret_new_bh) {
2273		*ret_new_bh = new_bh;
2274		new_bh = NULL;
2275	}
2276bail:
2277	brelse(new_bh);
2278
2279	return status;
2280}
2281
2282static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2283				     handle_t *handle, struct inode *dir,
2284				     struct buffer_head *di_bh,
2285				     struct buffer_head *dirdata_bh,
2286				     struct ocfs2_alloc_context *meta_ac,
2287				     int dx_inline, u32 num_entries,
2288				     struct buffer_head **ret_dx_root_bh)
2289{
2290	int ret;
2291	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2292	u16 dr_suballoc_bit;
2293	u64 suballoc_loc, dr_blkno;
2294	unsigned int num_bits;
2295	struct buffer_head *dx_root_bh = NULL;
2296	struct ocfs2_dx_root_block *dx_root;
2297	struct ocfs2_dir_block_trailer *trailer =
2298		ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
2299
2300	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
2301				   &dr_suballoc_bit, &num_bits, &dr_blkno);
2302	if (ret) {
2303		mlog_errno(ret);
2304		goto out;
2305	}
2306
2307	trace_ocfs2_dx_dir_attach_index(
2308				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2309				(unsigned long long)dr_blkno);
2310
2311	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2312	if (dx_root_bh == NULL) {
2313		ret = -ENOMEM;
2314		goto out;
2315	}
2316	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
2317
2318	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
2319				      OCFS2_JOURNAL_ACCESS_CREATE);
2320	if (ret < 0) {
2321		mlog_errno(ret);
2322		goto out;
2323	}
2324
2325	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2326	memset(dx_root, 0, osb->sb->s_blocksize);
2327	strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2328	dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2329	dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
2330	dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2331	dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2332	dx_root->dr_blkno = cpu_to_le64(dr_blkno);
2333	dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
2334	dx_root->dr_num_entries = cpu_to_le32(num_entries);
2335	if (le16_to_cpu(trailer->db_free_rec_len))
2336		dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
2337	else
2338		dx_root->dr_free_blk = cpu_to_le64(0);
2339
2340	if (dx_inline) {
2341		dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
2342		dx_root->dr_entries.de_count =
2343			cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
2344	} else {
2345		dx_root->dr_list.l_count =
2346			cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
2347	}
2348	ocfs2_journal_dirty(handle, dx_root_bh);
2349
2350	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2351				      OCFS2_JOURNAL_ACCESS_CREATE);
2352	if (ret) {
2353		mlog_errno(ret);
2354		goto out;
2355	}
2356
2357	di->i_dx_root = cpu_to_le64(dr_blkno);
2358
2359	spin_lock(&OCFS2_I(dir)->ip_lock);
2360	OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2361	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2362	spin_unlock(&OCFS2_I(dir)->ip_lock);
2363
2364	ocfs2_journal_dirty(handle, di_bh);
2365
2366	*ret_dx_root_bh = dx_root_bh;
2367	dx_root_bh = NULL;
2368
2369out:
2370	brelse(dx_root_bh);
2371	return ret;
2372}
2373
2374static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2375				       handle_t *handle, struct inode *dir,
2376				       struct buffer_head **dx_leaves,
2377				       int num_dx_leaves, u64 start_blk)
2378{
2379	int ret, i;
2380	struct ocfs2_dx_leaf *dx_leaf;
2381	struct buffer_head *bh;
2382
2383	for (i = 0; i < num_dx_leaves; i++) {
2384		bh = sb_getblk(osb->sb, start_blk + i);
2385		if (bh == NULL) {
2386			ret = -ENOMEM;
2387			goto out;
2388		}
2389		dx_leaves[i] = bh;
2390
2391		ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
2392
2393		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
2394					      OCFS2_JOURNAL_ACCESS_CREATE);
2395		if (ret < 0) {
2396			mlog_errno(ret);
2397			goto out;
2398		}
2399
2400		dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
2401
2402		memset(dx_leaf, 0, osb->sb->s_blocksize);
2403		strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
2404		dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
2405		dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
2406		dx_leaf->dl_list.de_count =
2407			cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2408
2409		trace_ocfs2_dx_dir_format_cluster(
2410				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2411				(unsigned long long)bh->b_blocknr,
2412				le16_to_cpu(dx_leaf->dl_list.de_count));
2413
2414		ocfs2_journal_dirty(handle, bh);
2415	}
2416
2417	ret = 0;
2418out:
2419	return ret;
2420}
2421
2422/*
2423 * Allocates and formats a new cluster for use in an indexed dir
2424 * leaf. This version will not do the extent insert, so that it can be
2425 * used by operations which need careful ordering.
2426 */
2427static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
2428				      u32 cpos, handle_t *handle,
2429				      struct ocfs2_alloc_context *data_ac,
2430				      struct buffer_head **dx_leaves,
2431				      int num_dx_leaves, u64 *ret_phys_blkno)
2432{
2433	int ret;
2434	u32 phys, num;
2435	u64 phys_blkno;
2436	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2437
2438	/*
2439	 * XXX: For create, this should claim cluster for the index
2440	 * *before* the unindexed insert so that we have a better
2441	 * chance of contiguousness as the directory grows in number
2442	 * of entries.
2443	 */
2444	ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
2445	if (ret) {
2446		mlog_errno(ret);
2447		goto out;
2448	}
2449
2450	/*
2451	 * Format the new cluster first. That way, we're inserting
2452	 * valid data.
2453	 */
2454	phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
2455	ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
2456					  num_dx_leaves, phys_blkno);
2457	if (ret) {
2458		mlog_errno(ret);
2459		goto out;
2460	}
2461
2462	*ret_phys_blkno = phys_blkno;
2463out:
2464	return ret;
2465}
2466
2467static int ocfs2_dx_dir_new_cluster(struct inode *dir,
2468				    struct ocfs2_extent_tree *et,
2469				    u32 cpos, handle_t *handle,
2470				    struct ocfs2_alloc_context *data_ac,
2471				    struct ocfs2_alloc_context *meta_ac,
2472				    struct buffer_head **dx_leaves,
2473				    int num_dx_leaves)
2474{
2475	int ret;
2476	u64 phys_blkno;
2477
2478	ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
2479					 num_dx_leaves, &phys_blkno);
2480	if (ret) {
2481		mlog_errno(ret);
2482		goto out;
2483	}
2484
2485	ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
2486				  meta_ac);
2487	if (ret)
2488		mlog_errno(ret);
2489out:
2490	return ret;
2491}
2492
2493static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
2494							int *ret_num_leaves)
2495{
2496	int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
2497	struct buffer_head **dx_leaves;
2498
2499	dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
2500			    GFP_NOFS);
2501	if (dx_leaves && ret_num_leaves)
2502		*ret_num_leaves = num_dx_leaves;
2503
2504	return dx_leaves;
2505}
2506
2507static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
2508				 handle_t *handle,
2509				 struct inode *parent,
2510				 struct inode *inode,
2511				 struct buffer_head *di_bh,
2512				 struct ocfs2_alloc_context *data_ac,
2513				 struct ocfs2_alloc_context *meta_ac)
2514{
2515	int ret;
2516	struct buffer_head *leaf_bh = NULL;
2517	struct buffer_head *dx_root_bh = NULL;
2518	struct ocfs2_dx_hinfo hinfo;
2519	struct ocfs2_dx_root_block *dx_root;
2520	struct ocfs2_dx_entry_list *entry_list;
2521
2522	/*
2523	 * Our strategy is to create the directory as though it were
2524	 * unindexed, then add the index block. This works with very
2525	 * little complication since the state of a new directory is a
2526	 * very well known quantity.
2527	 *
2528	 * Essentially, we have two dirents ("." and ".."), in the 1st
2529	 * block which need indexing. These are easily inserted into
2530	 * the index block.
2531	 */
2532
2533	ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
2534				    data_ac, &leaf_bh);
2535	if (ret) {
2536		mlog_errno(ret);
2537		goto out;
2538	}
2539
2540	ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
2541					meta_ac, 1, 2, &dx_root_bh);
2542	if (ret) {
2543		mlog_errno(ret);
2544		goto out;
2545	}
2546	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2547	entry_list = &dx_root->dr_entries;
2548
2549	/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
2550	ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
2551	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2552
2553	ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
2554	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2555
2556out:
2557	brelse(dx_root_bh);
2558	brelse(leaf_bh);
2559	return ret;
2560}
2561
2562int ocfs2_fill_new_dir(struct ocfs2_super *osb,
2563		       handle_t *handle,
2564		       struct inode *parent,
2565		       struct inode *inode,
2566		       struct buffer_head *fe_bh,
2567		       struct ocfs2_alloc_context *data_ac,
2568		       struct ocfs2_alloc_context *meta_ac)
2569
2570{
2571	BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
2572
2573	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2574		return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
2575
2576	if (ocfs2_supports_indexed_dirs(osb))
2577		return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
2578					     data_ac, meta_ac);
2579
2580	return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
2581				     data_ac, NULL);
2582}
2583
2584static int ocfs2_dx_dir_index_block(struct inode *dir,
2585				    handle_t *handle,
2586				    struct buffer_head **dx_leaves,
2587				    int num_dx_leaves,
2588				    u32 *num_dx_entries,
2589				    struct buffer_head *dirent_bh)
2590{
2591	int ret = 0, namelen, i;
2592	char *de_buf, *limit;
2593	struct ocfs2_dir_entry *de;
2594	struct buffer_head *dx_leaf_bh;
2595	struct ocfs2_dx_hinfo hinfo;
2596	u64 dirent_blk = dirent_bh->b_blocknr;
2597
2598	de_buf = dirent_bh->b_data;
2599	limit = de_buf + dir->i_sb->s_blocksize;
2600
2601	while (de_buf < limit) {
2602		de = (struct ocfs2_dir_entry *)de_buf;
2603
2604		namelen = de->name_len;
2605		if (!namelen || !de->inode)
2606			goto inc;
2607
2608		ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
2609
2610		i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
2611		dx_leaf_bh = dx_leaves[i];
2612
2613		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
2614						 dirent_blk, dx_leaf_bh);
2615		if (ret) {
2616			mlog_errno(ret);
2617			goto out;
2618		}
2619
2620		*num_dx_entries = *num_dx_entries + 1;
2621
2622inc:
2623		de_buf += le16_to_cpu(de->rec_len);
2624	}
2625
2626out:
2627	return ret;
2628}
2629
2630/*
2631 * XXX: This expects dx_root_bh to already be part of the transaction.
2632 */
2633static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2634					 struct buffer_head *dx_root_bh,
2635					 struct buffer_head *dirent_bh)
2636{
2637	char *de_buf, *limit;
2638	struct ocfs2_dx_root_block *dx_root;
2639	struct ocfs2_dir_entry *de;
2640	struct ocfs2_dx_hinfo hinfo;
2641	u64 dirent_blk = dirent_bh->b_blocknr;
2642
2643	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2644
2645	de_buf = dirent_bh->b_data;
2646	limit = de_buf + dir->i_sb->s_blocksize;
2647
2648	while (de_buf < limit) {
2649		de = (struct ocfs2_dir_entry *)de_buf;
2650
2651		if (!de->name_len || !de->inode)
2652			goto inc;
2653
2654		ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2655
2656		trace_ocfs2_dx_dir_index_root_block(
2657				(unsigned long long)dir->i_ino,
2658				hinfo.major_hash, hinfo.minor_hash,
2659				de->name_len, de->name,
2660				le16_to_cpu(dx_root->dr_entries.de_num_used));
2661
2662		ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2663					   dirent_blk);
2664
2665		le32_add_cpu(&dx_root->dr_num_entries, 1);
2666inc:
2667		de_buf += le16_to_cpu(de->rec_len);
2668	}
2669}
2670
2671/*
2672 * Count the number of inline directory entries in di_bh and compare
2673 * them against the number of entries we can hold in an inline dx root
2674 * block.
2675 */
2676static int ocfs2_new_dx_should_be_inline(struct inode *dir,
2677					 struct buffer_head *di_bh)
2678{
2679	int dirent_count = 0;
2680	char *de_buf, *limit;
2681	struct ocfs2_dir_entry *de;
2682	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2683
2684	de_buf = di->id2.i_data.id_data;
2685	limit = de_buf + i_size_read(dir);
2686
2687	while (de_buf < limit) {
2688		de = (struct ocfs2_dir_entry *)de_buf;
2689
2690		if (de->name_len && de->inode)
2691			dirent_count++;
2692
2693		de_buf += le16_to_cpu(de->rec_len);
2694	}
2695
2696	/* We are careful to leave room for one extra record. */
2697	return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
2698}
2699
2700/*
2701 * Expand rec_len of the rightmost dirent in a directory block so that it
2702 * contains the end of our valid space for dirents. We do this during
2703 * expansion from an inline directory to one with extents. The first dir block
2704 * in that case is taken from the inline data portion of the inode block.
2705 *
2706 * This will also return the largest amount of contiguous space for a dirent
2707 * in the block. That value is *not* necessarily the last dirent, even after
2708 * expansion. The directory indexing code wants this value for free space
2709 * accounting. We do this here since we're already walking the entire dir
2710 * block.
2711 *
2712 * We add the dir trailer if this filesystem wants it.
2713 */
2714static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
2715					     struct inode *dir)
2716{
2717	struct super_block *sb = dir->i_sb;
2718	struct ocfs2_dir_entry *de;
2719	struct ocfs2_dir_entry *prev_de;
2720	char *de_buf, *limit;
2721	unsigned int new_size = sb->s_blocksize;
2722	unsigned int bytes, this_hole;
2723	unsigned int largest_hole = 0;
2724
2725	if (ocfs2_new_dir_wants_trailer(dir))
2726		new_size = ocfs2_dir_trailer_blk_off(sb);
2727
2728	bytes = new_size - old_size;
2729
2730	limit = start + old_size;
2731	de_buf = start;
2732	de = (struct ocfs2_dir_entry *)de_buf;
2733	do {
2734		this_hole = ocfs2_figure_dirent_hole(de);
2735		if (this_hole > largest_hole)
2736			largest_hole = this_hole;
2737
2738		prev_de = de;
2739		de_buf += le16_to_cpu(de->rec_len);
2740		de = (struct ocfs2_dir_entry *)de_buf;
2741	} while (de_buf < limit);
2742
2743	le16_add_cpu(&prev_de->rec_len, bytes);
2744
2745	/* We need to double check this after modification of the final
2746	 * dirent. */
2747	this_hole = ocfs2_figure_dirent_hole(prev_de);
2748	if (this_hole > largest_hole)
2749		largest_hole = this_hole;
2750
2751	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
2752		return largest_hole;
2753	return 0;
2754}
2755
2756/*
2757 * We allocate enough clusters to fulfill "blocks_wanted", but set
2758 * i_size to exactly one block. Ocfs2_extend_dir() will handle the
2759 * rest automatically for us.
2760 *
2761 * *first_block_bh is a pointer to the 1st data block allocated to the
2762 *  directory.
2763 */
2764static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2765				   unsigned int blocks_wanted,
2766				   struct ocfs2_dir_lookup_result *lookup,
2767				   struct buffer_head **first_block_bh)
2768{
2769	u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
2770	struct super_block *sb = dir->i_sb;
2771	int ret, i, num_dx_leaves = 0, dx_inline = 0,
2772		credits = ocfs2_inline_to_extents_credits(sb);
2773	u64 dx_insert_blkno, blkno,
2774		bytes = blocks_wanted << sb->s_blocksize_bits;
2775	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2776	struct ocfs2_inode_info *oi = OCFS2_I(dir);
2777	struct ocfs2_alloc_context *data_ac = NULL;
2778	struct ocfs2_alloc_context *meta_ac = NULL;
2779	struct buffer_head *dirdata_bh = NULL;
2780	struct buffer_head *dx_root_bh = NULL;
2781	struct buffer_head **dx_leaves = NULL;
2782	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2783	handle_t *handle;
2784	struct ocfs2_extent_tree et;
2785	struct ocfs2_extent_tree dx_et;
2786	int did_quota = 0, bytes_allocated = 0;
2787
2788	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
2789
2790	alloc = ocfs2_clusters_for_bytes(sb, bytes);
2791	dx_alloc = 0;
2792
2793	down_write(&oi->ip_alloc_sem);
2794
2795	if (ocfs2_supports_indexed_dirs(osb)) {
2796		credits += ocfs2_add_dir_index_credits(sb);
2797
2798		dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
2799		if (!dx_inline) {
2800			/* Add one more cluster for an index leaf */
2801			dx_alloc++;
2802			dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
2803								&num_dx_leaves);
2804			if (!dx_leaves) {
2805				ret = -ENOMEM;
2806				mlog_errno(ret);
2807				goto out;
2808			}
2809		}
2810
2811		/* This gets us the dx_root */
2812		ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
2813		if (ret) {
2814			mlog_errno(ret);
2815			goto out;
2816		}
2817	}
2818
2819	/*
2820	 * We should never need more than 2 clusters for the unindexed
2821	 * tree - maximum dirent size is far less than one block. In
2822	 * fact, the only time we'd need more than one cluster is if
2823	 * blocksize == clustersize and the dirent won't fit in the
2824	 * extra space that the expansion to a single block gives. As
2825	 * of today, that only happens on 4k/4k file systems.
2826	 */
2827	BUG_ON(alloc > 2);
2828
2829	ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
2830	if (ret) {
2831		mlog_errno(ret);
2832		goto out;
2833	}
2834
2835	/*
2836	 * Prepare for worst case allocation scenario of two separate
2837	 * extents in the unindexed tree.
2838	 */
2839	if (alloc == 2)
2840		credits += OCFS2_SUBALLOC_ALLOC;
2841
2842	handle = ocfs2_start_trans(osb, credits);
2843	if (IS_ERR(handle)) {
2844		ret = PTR_ERR(handle);
2845		mlog_errno(ret);
2846		goto out;
2847	}
2848
2849	ret = dquot_alloc_space_nodirty(dir,
2850		ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
2851	if (ret)
2852		goto out_commit;
2853	did_quota = 1;
2854
2855	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2856		/*
2857		 * Allocate our index cluster first, to maximize the
2858		 * possibility that unindexed leaves grow
2859		 * contiguously.
2860		 */
2861		ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
2862						 dx_leaves, num_dx_leaves,
2863						 &dx_insert_blkno);
2864		if (ret) {
2865			mlog_errno(ret);
2866			goto out_commit;
2867		}
2868		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2869	}
2870
2871	/*
2872	 * Try to claim as many clusters as the bitmap can give though
2873	 * if we only get one now, that's enough to continue. The rest
2874	 * will be claimed after the conversion to extents.
2875	 */
2876	if (ocfs2_dir_resv_allowed(osb))
2877		data_ac->ac_resv = &oi->ip_la_data_resv;
2878	ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
2879	if (ret) {
2880		mlog_errno(ret);
2881		goto out_commit;
2882	}
2883	bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2884
2885	/*
2886	 * Operations are carefully ordered so that we set up the new
2887	 * data block first. The conversion from inline data to
2888	 * extents follows.
2889	 */
2890	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2891	dirdata_bh = sb_getblk(sb, blkno);
2892	if (!dirdata_bh) {
2893		ret = -ENOMEM;
2894		mlog_errno(ret);
2895		goto out_commit;
2896	}
2897
2898	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
2899
2900	ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
2901				      OCFS2_JOURNAL_ACCESS_CREATE);
2902	if (ret) {
2903		mlog_errno(ret);
2904		goto out_commit;
2905	}
2906
2907	memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
2908	memset(dirdata_bh->b_data + i_size_read(dir), 0,
2909	       sb->s_blocksize - i_size_read(dir));
2910	i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
2911	if (ocfs2_new_dir_wants_trailer(dir)) {
2912		/*
2913		 * Prepare the dir trailer up front. It will otherwise look
2914		 * like a valid dirent. Even if inserting the index fails
2915		 * (unlikely), then all we'll have done is given first dir
2916		 * block a small amount of fragmentation.
2917		 */
2918		ocfs2_init_dir_trailer(dir, dirdata_bh, i);
2919	}
2920
2921	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2922	ocfs2_journal_dirty(handle, dirdata_bh);
2923
2924	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2925		/*
2926		 * Dx dirs with an external cluster need to do this up
2927		 * front. Inline dx root's get handled later, after
2928		 * we've allocated our root block. We get passed back
2929		 * a total number of items so that dr_num_entries can
2930		 * be correctly set once the dx_root has been
2931		 * allocated.
2932		 */
2933		ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
2934					       num_dx_leaves, &num_dx_entries,
2935					       dirdata_bh);
2936		if (ret) {
2937			mlog_errno(ret);
2938			goto out_commit;
2939		}
2940	}
2941
2942	/*
2943	 * Set extent, i_size, etc on the directory. After this, the
2944	 * inode should contain the same exact dirents as before and
2945	 * be fully accessible from system calls.
2946	 *
2947	 * We let the later dirent insert modify c/mtime - to the user
2948	 * the data hasn't changed.
2949	 */
2950	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2951				      OCFS2_JOURNAL_ACCESS_CREATE);
2952	if (ret) {
2953		mlog_errno(ret);
2954		goto out_commit;
2955	}
2956
2957	spin_lock(&oi->ip_lock);
2958	oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
2959	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2960	spin_unlock(&oi->ip_lock);
2961
2962	ocfs2_dinode_new_extent_list(dir, di);
2963
2964	i_size_write(dir, sb->s_blocksize);
2965	dir->i_mtime = dir->i_ctime = current_time(dir);
2966
2967	di->i_size = cpu_to_le64(sb->s_blocksize);
2968	di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
2969	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
2970	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2971
2972	/*
2973	 * This should never fail as our extent list is empty and all
2974	 * related blocks have been journaled already.
2975	 */
2976	ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
2977				  0, NULL);
2978	if (ret) {
2979		mlog_errno(ret);
2980		goto out_commit;
2981	}
2982
2983	/*
2984	 * Set i_blocks after the extent insert for the most up to
2985	 * date ip_clusters value.
2986	 */
2987	dir->i_blocks = ocfs2_inode_sector_count(dir);
2988
2989	ocfs2_journal_dirty(handle, di_bh);
2990
2991	if (ocfs2_supports_indexed_dirs(osb)) {
2992		ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
2993						dirdata_bh, meta_ac, dx_inline,
2994						num_dx_entries, &dx_root_bh);
2995		if (ret) {
2996			mlog_errno(ret);
2997			goto out_commit;
2998		}
2999
3000		if (dx_inline) {
3001			ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
3002						      dirdata_bh);
3003		} else {
3004			ocfs2_init_dx_root_extent_tree(&dx_et,
3005						       INODE_CACHE(dir),
3006						       dx_root_bh);
3007			ret = ocfs2_insert_extent(handle, &dx_et, 0,
3008						  dx_insert_blkno, 1, 0, NULL);
3009			if (ret)
3010				mlog_errno(ret);
3011		}
3012	}
3013
3014	/*
3015	 * We asked for two clusters, but only got one in the 1st
3016	 * pass. Claim the 2nd cluster as a separate extent.
3017	 */
3018	if (alloc > len) {
3019		ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
3020					   &len);
3021		if (ret) {
3022			mlog_errno(ret);
3023			goto out_commit;
3024		}
3025		blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
3026
3027		ret = ocfs2_insert_extent(handle, &et, 1,
3028					  blkno, len, 0, NULL);
3029		if (ret) {
3030			mlog_errno(ret);
3031			goto out_commit;
3032		}
3033		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
3034	}
3035
3036	*first_block_bh = dirdata_bh;
3037	dirdata_bh = NULL;
3038	if (ocfs2_supports_indexed_dirs(osb)) {
3039		unsigned int off;
3040
3041		if (!dx_inline) {
3042			/*
3043			 * We need to return the correct block within the
3044			 * cluster which should hold our entry.
3045			 */
3046			off = ocfs2_dx_dir_hash_idx(osb,
3047						    &lookup->dl_hinfo);
3048			get_bh(dx_leaves[off]);
3049			lookup->dl_dx_leaf_bh = dx_leaves[off];
3050		}
3051		lookup->dl_dx_root_bh = dx_root_bh;
3052		dx_root_bh = NULL;
3053	}
3054
3055out_commit:
3056	if (ret < 0 && did_quota)
3057		dquot_free_space_nodirty(dir, bytes_allocated);
3058
3059	ocfs2_commit_trans(osb, handle);
3060
3061out:
3062	up_write(&oi->ip_alloc_sem);
3063	if (data_ac)
3064		ocfs2_free_alloc_context(data_ac);
3065	if (meta_ac)
3066		ocfs2_free_alloc_context(meta_ac);
3067
3068	if (dx_leaves) {
3069		for (i = 0; i < num_dx_leaves; i++)
3070			brelse(dx_leaves[i]);
3071		kfree(dx_leaves);
3072	}
3073
3074	brelse(dirdata_bh);
3075	brelse(dx_root_bh);
3076
3077	return ret;
3078}
3079
3080/* returns a bh of the 1st new block in the allocation. */
3081static int ocfs2_do_extend_dir(struct super_block *sb,
3082			       handle_t *handle,
3083			       struct inode *dir,
3084			       struct buffer_head *parent_fe_bh,
3085			       struct ocfs2_alloc_context *data_ac,
3086			       struct ocfs2_alloc_context *meta_ac,
3087			       struct buffer_head **new_bh)
3088{
3089	int status;
3090	int extend, did_quota = 0;
3091	u64 p_blkno, v_blkno;
3092
3093	spin_lock(&OCFS2_I(dir)->ip_lock);
3094	extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
3095	spin_unlock(&OCFS2_I(dir)->ip_lock);
3096
3097	if (extend) {
3098		u32 offset = OCFS2_I(dir)->ip_clusters;
3099
3100		status = dquot_alloc_space_nodirty(dir,
3101					ocfs2_clusters_to_bytes(sb, 1));
3102		if (status)
3103			goto bail;
3104		did_quota = 1;
3105
3106		status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
3107					      1, 0, parent_fe_bh, handle,
3108					      data_ac, meta_ac, NULL);
3109		BUG_ON(status == -EAGAIN);
3110		if (status < 0) {
3111			mlog_errno(status);
3112			goto bail;
3113		}
3114	}
3115
3116	v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
3117	status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
3118	if (status < 0) {
3119		mlog_errno(status);
3120		goto bail;
3121	}
3122
3123	*new_bh = sb_getblk(sb, p_blkno);
3124	if (!*new_bh) {
3125		status = -ENOMEM;
3126		mlog_errno(status);
3127		goto bail;
3128	}
3129	status = 0;
3130bail:
3131	if (did_quota && status < 0)
3132		dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3133	return status;
3134}
3135
3136/*
3137 * Assumes you already have a cluster lock on the directory.
3138 *
3139 * 'blocks_wanted' is only used if we have an inline directory which
3140 * is to be turned into an extent based one. The size of the dirent to
3141 * insert might be larger than the space gained by growing to just one
3142 * block, so we may have to grow the inode by two blocks in that case.
3143 *
3144 * If the directory is already indexed, dx_root_bh must be provided.
3145 */
3146static int ocfs2_extend_dir(struct ocfs2_super *osb,
3147			    struct inode *dir,
3148			    struct buffer_head *parent_fe_bh,
3149			    unsigned int blocks_wanted,
3150			    struct ocfs2_dir_lookup_result *lookup,
3151			    struct buffer_head **new_de_bh)
3152{
3153	int status = 0;
3154	int credits, num_free_extents, drop_alloc_sem = 0;
3155	loff_t dir_i_size;
3156	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
3157	struct ocfs2_extent_list *el = &fe->id2.i_list;
3158	struct ocfs2_alloc_context *data_ac = NULL;
3159	struct ocfs2_alloc_context *meta_ac = NULL;
3160	handle_t *handle = NULL;
3161	struct buffer_head *new_bh = NULL;
3162	struct ocfs2_dir_entry * de;
3163	struct super_block *sb = osb->sb;
3164	struct ocfs2_extent_tree et;
3165	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3166
3167	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3168		/*
3169		 * This would be a code error as an inline directory should
3170		 * never have an index root.
3171		 */
3172		BUG_ON(dx_root_bh);
3173
3174		status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
3175						 blocks_wanted, lookup,
3176						 &new_bh);
3177		if (status) {
3178			mlog_errno(status);
3179			goto bail;
3180		}
3181
3182		/* Expansion from inline to an indexed directory will
3183		 * have given us this. */
3184		dx_root_bh = lookup->dl_dx_root_bh;
3185
3186		if (blocks_wanted == 1) {
3187			/*
3188			 * If the new dirent will fit inside the space
3189			 * created by pushing out to one block, then
3190			 * we can complete the operation
3191			 * here. Otherwise we have to expand i_size
3192			 * and format the 2nd block below.
3193			 */
3194			BUG_ON(new_bh == NULL);
3195			goto bail_bh;
3196		}
3197
3198		/*
3199		 * Get rid of 'new_bh' - we want to format the 2nd
3200		 * data block and return that instead.
3201		 */
3202		brelse(new_bh);
3203		new_bh = NULL;
3204
3205		down_write(&OCFS2_I(dir)->ip_alloc_sem);
3206		drop_alloc_sem = 1;
3207		dir_i_size = i_size_read(dir);
3208		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3209		goto do_extend;
3210	}
3211
3212	down_write(&OCFS2_I(dir)->ip_alloc_sem);
3213	drop_alloc_sem = 1;
3214	dir_i_size = i_size_read(dir);
3215	trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3216			       dir_i_size);
3217
3218	/* dir->i_size is always block aligned. */
3219	spin_lock(&OCFS2_I(dir)->ip_lock);
3220	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
3221		spin_unlock(&OCFS2_I(dir)->ip_lock);
3222		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
3223					      parent_fe_bh);
3224		num_free_extents = ocfs2_num_free_extents(&et);
3225		if (num_free_extents < 0) {
3226			status = num_free_extents;
3227			mlog_errno(status);
3228			goto bail;
3229		}
3230
3231		if (!num_free_extents) {
3232			status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
3233			if (status < 0) {
3234				if (status != -ENOSPC)
3235					mlog_errno(status);
3236				goto bail;
3237			}
3238		}
3239
3240		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
3241		if (status < 0) {
3242			if (status != -ENOSPC)
3243				mlog_errno(status);
3244			goto bail;
3245		}
3246
3247		if (ocfs2_dir_resv_allowed(osb))
3248			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3249
3250		credits = ocfs2_calc_extend_credits(sb, el);
3251	} else {
3252		spin_unlock(&OCFS2_I(dir)->ip_lock);
3253		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3254	}
3255
3256do_extend:
3257	if (ocfs2_dir_indexed(dir))
3258		credits++; /* For attaching the new dirent block to the
3259			    * dx_root */
3260
3261	handle = ocfs2_start_trans(osb, credits);
3262	if (IS_ERR(handle)) {
3263		status = PTR_ERR(handle);
3264		handle = NULL;
3265		mlog_errno(status);
3266		goto bail;
3267	}
3268
3269	status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
3270				     data_ac, meta_ac, &new_bh);
3271	if (status < 0) {
3272		mlog_errno(status);
3273		goto bail;
3274	}
3275
3276	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
3277
3278	status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
3279					 OCFS2_JOURNAL_ACCESS_CREATE);
3280	if (status < 0) {
3281		mlog_errno(status);
3282		goto bail;
3283	}
3284	memset(new_bh->b_data, 0, sb->s_blocksize);
3285
3286	de = (struct ocfs2_dir_entry *) new_bh->b_data;
3287	de->inode = 0;
3288	if (ocfs2_supports_dir_trailer(dir)) {
3289		de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
3290
3291		ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
3292
3293		if (ocfs2_dir_indexed(dir)) {
3294			status = ocfs2_dx_dir_link_trailer(dir, handle,
3295							   dx_root_bh, new_bh);
3296			if (status) {
3297				mlog_errno(status);
3298				goto bail;
3299			}
3300		}
3301	} else {
3302		de->rec_len = cpu_to_le16(sb->s_blocksize);
3303	}
3304	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3305	ocfs2_journal_dirty(handle, new_bh);
3306
3307	dir_i_size += dir->i_sb->s_blocksize;
3308	i_size_write(dir, dir_i_size);
3309	dir->i_blocks = ocfs2_inode_sector_count(dir);
3310	status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
3311	if (status < 0) {
3312		mlog_errno(status);
3313		goto bail;
3314	}
3315
3316bail_bh:
3317	*new_de_bh = new_bh;
3318	get_bh(*new_de_bh);
3319bail:
3320	if (handle)
3321		ocfs2_commit_trans(osb, handle);
3322	if (drop_alloc_sem)
3323		up_write(&OCFS2_I(dir)->ip_alloc_sem);
3324
3325	if (data_ac)
3326		ocfs2_free_alloc_context(data_ac);
3327	if (meta_ac)
3328		ocfs2_free_alloc_context(meta_ac);
3329
3330	brelse(new_bh);
3331
3332	return status;
3333}
3334
3335static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
3336				   const char *name, int namelen,
3337				   struct buffer_head **ret_de_bh,
3338				   unsigned int *blocks_wanted)
3339{
3340	int ret;
3341	struct super_block *sb = dir->i_sb;
3342	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3343	struct ocfs2_dir_entry *de, *last_de = NULL;
3344	char *de_buf, *limit;
3345	unsigned long offset = 0;
3346	unsigned int rec_len, new_rec_len, free_space;
3347
3348	/*
3349	 * This calculates how many free bytes we'd have in block zero, should
3350	 * this function force expansion to an extent tree.
3351	 */
3352	if (ocfs2_new_dir_wants_trailer(dir))
3353		free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
3354	else
3355		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
3356
3357	de_buf = di->id2.i_data.id_data;
3358	limit = de_buf + i_size_read(dir);
3359	rec_len = OCFS2_DIR_REC_LEN(namelen);
3360
3361	while (de_buf < limit) {
3362		de = (struct ocfs2_dir_entry *)de_buf;
3363
3364		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
3365			ret = -ENOENT;
3366			goto out;
3367		}
3368		if (ocfs2_match(namelen, name, de)) {
3369			ret = -EEXIST;
3370			goto out;
3371		}
3372		/*
3373		 * No need to check for a trailing dirent record here as
3374		 * they're not used for inline dirs.
3375		 */
3376
3377		if (ocfs2_dirent_would_fit(de, rec_len)) {
3378			/* Ok, we found a spot. Return this bh and let
3379			 * the caller actually fill it in. */
3380			*ret_de_bh = di_bh;
3381			get_bh(*ret_de_bh);
3382			ret = 0;
3383			goto out;
3384		}
3385
3386		last_de = de;
3387		de_buf += le16_to_cpu(de->rec_len);
3388		offset += le16_to_cpu(de->rec_len);
3389	}
3390
3391	/*
3392	 * We're going to require expansion of the directory - figure
3393	 * out how many blocks we'll need so that a place for the
3394	 * dirent can be found.
3395	 */
3396	*blocks_wanted = 1;
3397	new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
3398	if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
3399		*blocks_wanted = 2;
3400
3401	ret = -ENOSPC;
3402out:
3403	return ret;
3404}
3405
3406static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
3407				   int namelen, struct buffer_head **ret_de_bh)
3408{
3409	unsigned long offset;
3410	struct buffer_head *bh = NULL;
3411	unsigned short rec_len;
3412	struct ocfs2_dir_entry *de;
3413	struct super_block *sb = dir->i_sb;
3414	int status;
3415	int blocksize = dir->i_sb->s_blocksize;
3416
3417	status = ocfs2_read_dir_block(dir, 0, &bh, 0);
3418	if (status)
3419		goto bail;
3420
3421	rec_len = OCFS2_DIR_REC_LEN(namelen);
3422	offset = 0;
3423	de = (struct ocfs2_dir_entry *) bh->b_data;
3424	while (1) {
3425		if ((char *)de >= sb->s_blocksize + bh->b_data) {
3426			brelse(bh);
3427			bh = NULL;
3428
3429			if (i_size_read(dir) <= offset) {
3430				/*
3431				 * Caller will have to expand this
3432				 * directory.
3433				 */
3434				status = -ENOSPC;
3435				goto bail;
3436			}
3437			status = ocfs2_read_dir_block(dir,
3438					     offset >> sb->s_blocksize_bits,
3439					     &bh, 0);
3440			if (status)
3441				goto bail;
3442
3443			/* move to next block */
3444			de = (struct ocfs2_dir_entry *) bh->b_data;
3445		}
3446		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
3447			status = -ENOENT;
3448			goto bail;
3449		}
3450		if (ocfs2_match(namelen, name, de)) {
3451			status = -EEXIST;
3452			goto bail;
3453		}
3454
3455		if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
3456					   blocksize))
3457			goto next;
3458
3459		if (ocfs2_dirent_would_fit(de, rec_len)) {
3460			/* Ok, we found a spot. Return this bh and let
3461			 * the caller actually fill it in. */
3462			*ret_de_bh = bh;
3463			get_bh(*ret_de_bh);
3464			status = 0;
3465			goto bail;
3466		}
3467next:
3468		offset += le16_to_cpu(de->rec_len);
3469		de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
3470	}
3471
3472bail:
3473	brelse(bh);
3474	if (status)
3475		mlog_errno(status);
3476
3477	return status;
3478}
3479
3480static int dx_leaf_sort_cmp(const void *a, const void *b)
3481{
3482	const struct ocfs2_dx_entry *entry1 = a;
3483	const struct ocfs2_dx_entry *entry2 = b;
3484	u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
3485	u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
3486	u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
3487	u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
3488
3489	if (major_hash1 > major_hash2)
3490		return 1;
3491	if (major_hash1 < major_hash2)
3492		return -1;
3493
3494	/*
3495	 * It is not strictly necessary to sort by minor
3496	 */
3497	if (minor_hash1 > minor_hash2)
3498		return 1;
3499	if (minor_hash1 < minor_hash2)
3500		return -1;
3501	return 0;
3502}
3503
3504static void dx_leaf_sort_swap(void *a, void *b, int size)
3505{
3506	struct ocfs2_dx_entry *entry1 = a;
3507	struct ocfs2_dx_entry *entry2 = b;
3508
3509	BUG_ON(size != sizeof(*entry1));
3510
3511	swap(*entry1, *entry2);
3512}
3513
3514static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
3515{
3516	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3517	int i, num = le16_to_cpu(dl_list->de_num_used);
3518
3519	for (i = 0; i < (num - 1); i++) {
3520		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
3521		    le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
3522			return 0;
3523	}
3524
3525	return 1;
3526}
3527
3528/*
3529 * Find the optimal value to split this leaf on. This expects the leaf
3530 * entries to be in sorted order.
3531 *
3532 * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
3533 * the hash we want to insert.
3534 *
3535 * This function is only concerned with the major hash - that which
3536 * determines which cluster an item belongs to.
3537 */
3538static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
3539					u32 leaf_cpos, u32 insert_hash,
3540					u32 *split_hash)
3541{
3542	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3543	int i, num_used = le16_to_cpu(dl_list->de_num_used);
3544	int allsame;
3545
3546	/*
3547	 * There's a couple rare, but nasty corner cases we have to
3548	 * check for here. All of them involve a leaf where all value
3549	 * have the same hash, which is what we look for first.
3550	 *
3551	 * Most of the time, all of the above is false, and we simply
3552	 * pick the median value for a split.
3553	 */
3554	allsame = ocfs2_dx_leaf_same_major(dx_leaf);
3555	if (allsame) {
3556		u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
3557
3558		if (val == insert_hash) {
3559			/*
3560			 * No matter where we would choose to split,
3561			 * the new entry would want to occupy the same
3562			 * block as these. Since there's no space left
3563			 * in their existing block, we know there
3564			 * won't be space after the split.
3565			 */
3566			return -ENOSPC;
3567		}
3568
3569		if (val == leaf_cpos) {
3570			/*
3571			 * Because val is the same as leaf_cpos (which
3572			 * is the smallest value this leaf can have),
3573			 * yet is not equal to insert_hash, then we
3574			 * know that insert_hash *must* be larger than
3575			 * val (and leaf_cpos). At least cpos+1 in value.
3576			 *
3577			 * We also know then, that there cannot be an
3578			 * adjacent extent (otherwise we'd be looking
3579			 * at it). Choosing this value gives us a
3580			 * chance to get some contiguousness.
3581			 */
3582			*split_hash = leaf_cpos + 1;
3583			return 0;
3584		}
3585
3586		if (val > insert_hash) {
3587			/*
3588			 * val can not be the same as insert hash, and
3589			 * also must be larger than leaf_cpos. Also,
3590			 * we know that there can't be a leaf between
3591			 * cpos and val, otherwise the entries with
3592			 * hash 'val' would be there.
3593			 */
3594			*split_hash = val;
3595			return 0;
3596		}
3597
3598		*split_hash = insert_hash;
3599		return 0;
3600	}
3601
3602	/*
3603	 * Since the records are sorted and the checks above
3604	 * guaranteed that not all records in this block are the same,
3605	 * we simple travel forward, from the median, and pick the 1st
3606	 * record whose value is larger than leaf_cpos.
3607	 */
3608	for (i = (num_used / 2); i < num_used; i++)
3609		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
3610		    leaf_cpos)
3611			break;
3612
3613	BUG_ON(i == num_used); /* Should be impossible */
3614	*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
3615	return 0;
3616}
3617
3618/*
3619 * Transfer all entries in orig_dx_leaves whose major hash is equal to or
3620 * larger than split_hash into new_dx_leaves. We use a temporary
3621 * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
3622 *
3623 * Since the block offset inside a leaf (cluster) is a constant mask
3624 * of minor_hash, we can optimize - an item at block offset X within
3625 * the original cluster, will be at offset X within the new cluster.
3626 */
3627static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3628				       handle_t *handle,
3629				       struct ocfs2_dx_leaf *tmp_dx_leaf,
3630				       struct buffer_head **orig_dx_leaves,
3631				       struct buffer_head **new_dx_leaves,
3632				       int num_dx_leaves)
3633{
3634	int i, j, num_used;
3635	u32 major_hash;
3636	struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
3637	struct ocfs2_dx_entry_list *orig_list, *tmp_list;
3638	struct ocfs2_dx_entry *dx_entry;
3639
3640	tmp_list = &tmp_dx_leaf->dl_list;
3641
3642	for (i = 0; i < num_dx_leaves; i++) {
3643		orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
3644		orig_list = &orig_dx_leaf->dl_list;
3645		new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
 
3646
3647		num_used = le16_to_cpu(orig_list->de_num_used);
3648
3649		memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
3650		tmp_list->de_num_used = cpu_to_le16(0);
3651		memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
3652
3653		for (j = 0; j < num_used; j++) {
3654			dx_entry = &orig_list->de_entries[j];
3655			major_hash = le32_to_cpu(dx_entry->dx_major_hash);
3656			if (major_hash >= split_hash)
3657				ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
3658							      dx_entry);
3659			else
3660				ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
3661							      dx_entry);
3662		}
3663		memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
3664
3665		ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
3666		ocfs2_journal_dirty(handle, new_dx_leaves[i]);
3667	}
3668}
3669
3670static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3671					  struct ocfs2_dx_root_block *dx_root)
3672{
3673	int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3674
3675	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3676	credits += ocfs2_quota_trans_credits(osb->sb);
3677	return credits;
3678}
3679
3680/*
3681 * Find the median value in dx_leaf_bh and allocate a new leaf to move
3682 * half our entries into.
3683 */
3684static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3685				  struct buffer_head *dx_root_bh,
3686				  struct buffer_head *dx_leaf_bh,
3687				  struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
3688				  u64 leaf_blkno)
3689{
3690	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3691	int credits, ret, i, num_used, did_quota = 0;
3692	u32 cpos, split_hash, insert_hash = hinfo->major_hash;
3693	u64 orig_leaves_start;
3694	int num_dx_leaves;
3695	struct buffer_head **orig_dx_leaves = NULL;
3696	struct buffer_head **new_dx_leaves = NULL;
3697	struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
3698	struct ocfs2_extent_tree et;
3699	handle_t *handle = NULL;
3700	struct ocfs2_dx_root_block *dx_root;
3701	struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3702
3703	trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3704				     (unsigned long long)leaf_blkno,
3705				     insert_hash);
3706
3707	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3708
3709	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3710	/*
3711	 * XXX: This is a rather large limit. We should use a more
3712	 * realistic value.
3713	 */
3714	if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
3715		return -ENOSPC;
3716
3717	num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
3718	if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
3719		mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
3720		     "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
3721		     (unsigned long long)leaf_blkno, num_used);
3722		ret = -EIO;
3723		goto out;
3724	}
3725
3726	orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
3727	if (!orig_dx_leaves) {
3728		ret = -ENOMEM;
3729		mlog_errno(ret);
3730		goto out;
3731	}
3732
3733	new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
3734	if (!new_dx_leaves) {
3735		ret = -ENOMEM;
3736		mlog_errno(ret);
3737		goto out;
3738	}
3739
3740	ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
3741	if (ret) {
3742		if (ret != -ENOSPC)
3743			mlog_errno(ret);
3744		goto out;
3745	}
3746
3747	credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
3748	handle = ocfs2_start_trans(osb, credits);
3749	if (IS_ERR(handle)) {
3750		ret = PTR_ERR(handle);
3751		handle = NULL;
3752		mlog_errno(ret);
3753		goto out;
3754	}
3755
3756	ret = dquot_alloc_space_nodirty(dir,
3757				       ocfs2_clusters_to_bytes(dir->i_sb, 1));
3758	if (ret)
3759		goto out_commit;
3760	did_quota = 1;
3761
3762	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
3763				      OCFS2_JOURNAL_ACCESS_WRITE);
3764	if (ret) {
3765		mlog_errno(ret);
3766		goto out_commit;
3767	}
3768
3769	/*
3770	 * This block is changing anyway, so we can sort it in place.
3771	 */
3772	sort(dx_leaf->dl_list.de_entries, num_used,
3773	     sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
3774	     dx_leaf_sort_swap);
3775
3776	ocfs2_journal_dirty(handle, dx_leaf_bh);
3777
3778	ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
3779					   &split_hash);
3780	if (ret) {
3781		mlog_errno(ret);
3782		goto  out_commit;
3783	}
3784
3785	trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3786
3787	/*
3788	 * We have to carefully order operations here. There are items
3789	 * which want to be in the new cluster before insert, but in
3790	 * order to put those items in the new cluster, we alter the
3791	 * old cluster. A failure to insert gets nasty.
3792	 *
3793	 * So, start by reserving writes to the old
3794	 * cluster. ocfs2_dx_dir_new_cluster will reserve writes on
3795	 * the new cluster for us, before inserting it. The insert
3796	 * won't happen if there's an error before that. Once the
3797	 * insert is done then, we can transfer from one leaf into the
3798	 * other without fear of hitting any error.
3799	 */
3800
3801	/*
3802	 * The leaf transfer wants some scratch space so that we don't
3803	 * wind up doing a bunch of expensive memmove().
3804	 */
3805	tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
3806	if (!tmp_dx_leaf) {
3807		ret = -ENOMEM;
3808		mlog_errno(ret);
3809		goto out_commit;
3810	}
3811
3812	orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
3813	ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
3814				   orig_dx_leaves);
3815	if (ret) {
3816		mlog_errno(ret);
3817		goto out_commit;
3818	}
3819
3820	cpos = split_hash;
3821	ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3822				       data_ac, meta_ac, new_dx_leaves,
3823				       num_dx_leaves);
3824	if (ret) {
3825		mlog_errno(ret);
3826		goto out_commit;
3827	}
3828
3829	for (i = 0; i < num_dx_leaves; i++) {
3830		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3831					      orig_dx_leaves[i],
3832					      OCFS2_JOURNAL_ACCESS_WRITE);
3833		if (ret) {
3834			mlog_errno(ret);
3835			goto out_commit;
3836		}
3837
3838		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3839					      new_dx_leaves[i],
3840					      OCFS2_JOURNAL_ACCESS_WRITE);
3841		if (ret) {
3842			mlog_errno(ret);
3843			goto out_commit;
3844		}
3845	}
3846
3847	ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
3848				   orig_dx_leaves, new_dx_leaves, num_dx_leaves);
3849
3850out_commit:
3851	if (ret < 0 && did_quota)
3852		dquot_free_space_nodirty(dir,
3853				ocfs2_clusters_to_bytes(dir->i_sb, 1));
3854
3855	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3856	ocfs2_commit_trans(osb, handle);
3857
3858out:
3859	if (orig_dx_leaves || new_dx_leaves) {
3860		for (i = 0; i < num_dx_leaves; i++) {
3861			if (orig_dx_leaves)
3862				brelse(orig_dx_leaves[i]);
3863			if (new_dx_leaves)
3864				brelse(new_dx_leaves[i]);
3865		}
3866		kfree(orig_dx_leaves);
3867		kfree(new_dx_leaves);
3868	}
3869
3870	if (meta_ac)
3871		ocfs2_free_alloc_context(meta_ac);
3872	if (data_ac)
3873		ocfs2_free_alloc_context(data_ac);
3874
3875	kfree(tmp_dx_leaf);
3876	return ret;
3877}
3878
3879static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
3880				   struct buffer_head *di_bh,
3881				   struct buffer_head *dx_root_bh,
3882				   const char *name, int namelen,
3883				   struct ocfs2_dir_lookup_result *lookup)
3884{
3885	int ret, rebalanced = 0;
3886	struct ocfs2_dx_root_block *dx_root;
3887	struct buffer_head *dx_leaf_bh = NULL;
3888	struct ocfs2_dx_leaf *dx_leaf;
3889	u64 blkno;
3890	u32 leaf_cpos;
3891
3892	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3893
3894restart_search:
3895	ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
3896				  &leaf_cpos, &blkno);
3897	if (ret) {
3898		mlog_errno(ret);
3899		goto out;
3900	}
3901
3902	ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
3903	if (ret) {
3904		mlog_errno(ret);
3905		goto out;
3906	}
3907
3908	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3909
3910	if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
3911	    le16_to_cpu(dx_leaf->dl_list.de_count)) {
3912		if (rebalanced) {
3913			/*
3914			 * Rebalancing should have provided us with
3915			 * space in an appropriate leaf.
3916			 *
3917			 * XXX: Is this an abnormal condition then?
3918			 * Should we print a message here?
3919			 */
3920			ret = -ENOSPC;
3921			goto out;
3922		}
3923
3924		ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
3925					     &lookup->dl_hinfo, leaf_cpos,
3926					     blkno);
3927		if (ret) {
3928			if (ret != -ENOSPC)
3929				mlog_errno(ret);
3930			goto out;
3931		}
3932
3933		/*
3934		 * Restart the lookup. The rebalance might have
3935		 * changed which block our item fits into. Mark our
3936		 * progress, so we only execute this once.
3937		 */
3938		brelse(dx_leaf_bh);
3939		dx_leaf_bh = NULL;
3940		rebalanced = 1;
3941		goto restart_search;
3942	}
3943
3944	lookup->dl_dx_leaf_bh = dx_leaf_bh;
3945	dx_leaf_bh = NULL;
3946
3947out:
3948	brelse(dx_leaf_bh);
3949	return ret;
3950}
3951
3952static int ocfs2_search_dx_free_list(struct inode *dir,
3953				     struct buffer_head *dx_root_bh,
3954				     int namelen,
3955				     struct ocfs2_dir_lookup_result *lookup)
3956{
3957	int ret = -ENOSPC;
3958	struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
3959	struct ocfs2_dir_block_trailer *db;
3960	u64 next_block;
3961	int rec_len = OCFS2_DIR_REC_LEN(namelen);
3962	struct ocfs2_dx_root_block *dx_root;
3963
3964	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3965	next_block = le64_to_cpu(dx_root->dr_free_blk);
3966
3967	while (next_block) {
3968		brelse(prev_leaf_bh);
3969		prev_leaf_bh = leaf_bh;
3970		leaf_bh = NULL;
3971
3972		ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
3973		if (ret) {
3974			mlog_errno(ret);
3975			goto out;
3976		}
3977
3978		db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
3979		if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
3980			lookup->dl_leaf_bh = leaf_bh;
3981			lookup->dl_prev_leaf_bh = prev_leaf_bh;
3982			leaf_bh = NULL;
3983			prev_leaf_bh = NULL;
3984			break;
3985		}
3986
3987		next_block = le64_to_cpu(db->db_free_next);
3988	}
3989
3990	if (!next_block)
3991		ret = -ENOSPC;
3992
3993out:
3994
3995	brelse(leaf_bh);
3996	brelse(prev_leaf_bh);
3997	return ret;
3998}
3999
4000static int ocfs2_expand_inline_dx_root(struct inode *dir,
4001				       struct buffer_head *dx_root_bh)
4002{
4003	int ret, num_dx_leaves, i, j, did_quota = 0;
4004	struct buffer_head **dx_leaves = NULL;
4005	struct ocfs2_extent_tree et;
4006	u64 insert_blkno;
4007	struct ocfs2_alloc_context *data_ac = NULL;
4008	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4009	handle_t *handle = NULL;
4010	struct ocfs2_dx_root_block *dx_root;
4011	struct ocfs2_dx_entry_list *entry_list;
4012	struct ocfs2_dx_entry *dx_entry;
4013	struct ocfs2_dx_leaf *target_leaf;
4014
4015	ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4016	if (ret) {
4017		mlog_errno(ret);
4018		goto out;
4019	}
4020
4021	dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
4022	if (!dx_leaves) {
4023		ret = -ENOMEM;
4024		mlog_errno(ret);
4025		goto out;
4026	}
4027
4028	handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
4029	if (IS_ERR(handle)) {
4030		ret = PTR_ERR(handle);
4031		mlog_errno(ret);
4032		goto out;
4033	}
4034
4035	ret = dquot_alloc_space_nodirty(dir,
4036				       ocfs2_clusters_to_bytes(osb->sb, 1));
4037	if (ret)
4038		goto out_commit;
4039	did_quota = 1;
4040
4041	/*
4042	 * We do this up front, before the allocation, so that a
4043	 * failure to add the dx_root_bh to the journal won't result
4044	 * us losing clusters.
4045	 */
4046	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
4047				      OCFS2_JOURNAL_ACCESS_WRITE);
4048	if (ret) {
4049		mlog_errno(ret);
4050		goto out_commit;
4051	}
4052
4053	ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
4054					 num_dx_leaves, &insert_blkno);
4055	if (ret) {
4056		mlog_errno(ret);
4057		goto out_commit;
4058	}
4059
4060	/*
4061	 * Transfer the entries from our dx_root into the appropriate
4062	 * block
4063	 */
4064	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4065	entry_list = &dx_root->dr_entries;
4066
4067	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
4068		dx_entry = &entry_list->de_entries[i];
4069
4070		j = __ocfs2_dx_dir_hash_idx(osb,
4071					    le32_to_cpu(dx_entry->dx_minor_hash));
4072		target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
4073
4074		ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
4075
4076		/* Each leaf has been passed to the journal already
4077		 * via __ocfs2_dx_dir_new_cluster() */
4078	}
4079
4080	dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
4081	memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
4082	       offsetof(struct ocfs2_dx_root_block, dr_list));
4083	dx_root->dr_list.l_count =
4084		cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
4085
4086	/* This should never fail considering we start with an empty
4087	 * dx_root. */
4088	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4089	ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
4090	if (ret)
4091		mlog_errno(ret);
4092	did_quota = 0;
4093
4094	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4095	ocfs2_journal_dirty(handle, dx_root_bh);
4096
4097out_commit:
4098	if (ret < 0 && did_quota)
4099		dquot_free_space_nodirty(dir,
4100					  ocfs2_clusters_to_bytes(dir->i_sb, 1));
4101
4102	ocfs2_commit_trans(osb, handle);
4103
4104out:
4105	if (data_ac)
4106		ocfs2_free_alloc_context(data_ac);
4107
4108	if (dx_leaves) {
4109		for (i = 0; i < num_dx_leaves; i++)
4110			brelse(dx_leaves[i]);
4111		kfree(dx_leaves);
4112	}
4113	return ret;
4114}
4115
4116static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
4117{
4118	struct ocfs2_dx_root_block *dx_root;
4119	struct ocfs2_dx_entry_list *entry_list;
4120
4121	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4122	entry_list = &dx_root->dr_entries;
4123
4124	if (le16_to_cpu(entry_list->de_num_used) >=
4125	    le16_to_cpu(entry_list->de_count))
4126		return -ENOSPC;
4127
4128	return 0;
4129}
4130
4131static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
4132					   struct buffer_head *di_bh,
4133					   const char *name,
4134					   int namelen,
4135					   struct ocfs2_dir_lookup_result *lookup)
4136{
4137	int ret, free_dx_root = 1;
4138	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4139	struct buffer_head *dx_root_bh = NULL;
4140	struct buffer_head *leaf_bh = NULL;
4141	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4142	struct ocfs2_dx_root_block *dx_root;
4143
4144	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4145	if (ret) {
4146		mlog_errno(ret);
4147		goto out;
4148	}
4149
4150	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4151	if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
4152		ret = -ENOSPC;
4153		mlog_errno(ret);
4154		goto out;
4155	}
4156
4157	if (ocfs2_dx_root_inline(dx_root)) {
4158		ret = ocfs2_inline_dx_has_space(dx_root_bh);
4159
4160		if (ret == 0)
4161			goto search_el;
4162
4163		/*
4164		 * We ran out of room in the root block. Expand it to
4165		 * an extent, then allow ocfs2_find_dir_space_dx to do
4166		 * the rest.
4167		 */
4168		ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
4169		if (ret) {
4170			mlog_errno(ret);
4171			goto out;
4172		}
4173	}
4174
4175	/*
4176	 * Insert preparation for an indexed directory is split into two
4177	 * steps. The call to find_dir_space_dx reserves room in the index for
4178	 * an additional item. If we run out of space there, it's a real error
4179	 * we can't continue on.
4180	 */
4181	ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
4182				      namelen, lookup);
4183	if (ret) {
4184		mlog_errno(ret);
4185		goto out;
4186	}
4187
4188search_el:
4189	/*
4190	 * Next, we need to find space in the unindexed tree. This call
4191	 * searches using the free space linked list. If the unindexed tree
4192	 * lacks sufficient space, we'll expand it below. The expansion code
4193	 * is smart enough to add any new blocks to the free space list.
4194	 */
4195	ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
4196	if (ret && ret != -ENOSPC) {
4197		mlog_errno(ret);
4198		goto out;
4199	}
4200
4201	/* Do this up here - ocfs2_extend_dir might need the dx_root */
4202	lookup->dl_dx_root_bh = dx_root_bh;
4203	free_dx_root = 0;
4204
4205	if (ret == -ENOSPC) {
4206		ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
4207
4208		if (ret) {
4209			mlog_errno(ret);
4210			goto out;
4211		}
4212
4213		/*
4214		 * We make the assumption here that new leaf blocks are added
4215		 * to the front of our free list.
4216		 */
4217		lookup->dl_prev_leaf_bh = NULL;
4218		lookup->dl_leaf_bh = leaf_bh;
4219	}
4220
4221out:
4222	if (free_dx_root)
4223		brelse(dx_root_bh);
4224	return ret;
4225}
4226
4227/*
4228 * Get a directory ready for insert. Any directory allocation required
4229 * happens here. Success returns zero, and enough context in the dir
4230 * lookup result that ocfs2_add_entry() will be able complete the task
4231 * with minimal performance impact.
4232 */
4233int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4234				 struct inode *dir,
4235				 struct buffer_head *parent_fe_bh,
4236				 const char *name,
4237				 int namelen,
4238				 struct ocfs2_dir_lookup_result *lookup)
4239{
4240	int ret;
4241	unsigned int blocks_wanted = 1;
4242	struct buffer_head *bh = NULL;
4243
4244	trace_ocfs2_prepare_dir_for_insert(
4245		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4246
4247	if (!namelen) {
4248		ret = -EINVAL;
4249		mlog_errno(ret);
4250		goto out;
4251	}
4252
4253	/*
4254	 * Do this up front to reduce confusion.
4255	 *
4256	 * The directory might start inline, then be turned into an
4257	 * indexed one, in which case we'd need to hash deep inside
4258	 * ocfs2_find_dir_space_id(). Since
4259	 * ocfs2_prepare_dx_dir_for_insert() also needs this hash
4260	 * done, there seems no point in spreading out the calls. We
4261	 * can optimize away the case where the file system doesn't
4262	 * support indexing.
4263	 */
4264	if (ocfs2_supports_indexed_dirs(osb))
4265		ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
4266
4267	if (ocfs2_dir_indexed(dir)) {
4268		ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
4269						      name, namelen, lookup);
4270		if (ret)
4271			mlog_errno(ret);
4272		goto out;
4273	}
4274
4275	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4276		ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
4277					      namelen, &bh, &blocks_wanted);
4278	} else
4279		ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
4280
4281	if (ret && ret != -ENOSPC) {
4282		mlog_errno(ret);
4283		goto out;
4284	}
4285
4286	if (ret == -ENOSPC) {
4287		/*
4288		 * We have to expand the directory to add this name.
4289		 */
4290		BUG_ON(bh);
4291
4292		ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
4293				       lookup, &bh);
4294		if (ret) {
4295			if (ret != -ENOSPC)
4296				mlog_errno(ret);
4297			goto out;
4298		}
4299
4300		BUG_ON(!bh);
4301	}
4302
4303	lookup->dl_leaf_bh = bh;
4304	bh = NULL;
4305out:
4306	brelse(bh);
4307	return ret;
4308}
4309
4310static int ocfs2_dx_dir_remove_index(struct inode *dir,
4311				     struct buffer_head *di_bh,
4312				     struct buffer_head *dx_root_bh)
4313{
4314	int ret;
4315	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4316	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4317	struct ocfs2_dx_root_block *dx_root;
4318	struct inode *dx_alloc_inode = NULL;
4319	struct buffer_head *dx_alloc_bh = NULL;
4320	handle_t *handle;
4321	u64 blk;
4322	u16 bit;
4323	u64 bg_blkno;
4324
4325	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4326
4327	dx_alloc_inode = ocfs2_get_system_file_inode(osb,
4328					EXTENT_ALLOC_SYSTEM_INODE,
4329					le16_to_cpu(dx_root->dr_suballoc_slot));
4330	if (!dx_alloc_inode) {
4331		ret = -ENOMEM;
4332		mlog_errno(ret);
4333		goto out;
4334	}
4335	inode_lock(dx_alloc_inode);
4336
4337	ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
4338	if (ret) {
4339		mlog_errno(ret);
4340		goto out_mutex;
4341	}
4342
4343	handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
4344	if (IS_ERR(handle)) {
4345		ret = PTR_ERR(handle);
4346		mlog_errno(ret);
4347		goto out_unlock;
4348	}
4349
4350	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
4351				      OCFS2_JOURNAL_ACCESS_WRITE);
4352	if (ret) {
4353		mlog_errno(ret);
4354		goto out_commit;
4355	}
4356
4357	spin_lock(&OCFS2_I(dir)->ip_lock);
4358	OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4359	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4360	spin_unlock(&OCFS2_I(dir)->ip_lock);
4361	di->i_dx_root = cpu_to_le64(0ULL);
4362	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4363
4364	ocfs2_journal_dirty(handle, di_bh);
4365
4366	blk = le64_to_cpu(dx_root->dr_blkno);
4367	bit = le16_to_cpu(dx_root->dr_suballoc_bit);
4368	if (dx_root->dr_suballoc_loc)
4369		bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
4370	else
4371		bg_blkno = ocfs2_which_suballoc_group(blk, bit);
4372	ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
4373				       bit, bg_blkno, 1);
4374	if (ret)
4375		mlog_errno(ret);
4376
4377out_commit:
4378	ocfs2_commit_trans(osb, handle);
4379
4380out_unlock:
4381	ocfs2_inode_unlock(dx_alloc_inode, 1);
4382
4383out_mutex:
4384	inode_unlock(dx_alloc_inode);
4385	brelse(dx_alloc_bh);
4386out:
4387	iput(dx_alloc_inode);
4388	return ret;
4389}
4390
4391int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
4392{
4393	int ret;
4394	unsigned int clen;
4395	u32 major_hash = UINT_MAX, p_cpos, cpos;
4396	u64 blkno;
4397	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4398	struct buffer_head *dx_root_bh = NULL;
4399	struct ocfs2_dx_root_block *dx_root;
4400	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4401	struct ocfs2_cached_dealloc_ctxt dealloc;
4402	struct ocfs2_extent_tree et;
4403
4404	ocfs2_init_dealloc_ctxt(&dealloc);
4405
4406	if (!ocfs2_dir_indexed(dir))
4407		return 0;
4408
4409	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4410	if (ret) {
4411		mlog_errno(ret);
4412		goto out;
4413	}
4414	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4415
4416	if (ocfs2_dx_root_inline(dx_root))
4417		goto remove_index;
4418
4419	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4420
4421	/* XXX: What if dr_clusters is too large? */
4422	while (le32_to_cpu(dx_root->dr_clusters)) {
4423		ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
4424					      major_hash, &cpos, &blkno, &clen);
4425		if (ret) {
4426			mlog_errno(ret);
4427			goto out;
4428		}
4429
4430		p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
4431
4432		ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
4433					       &dealloc, 0, false);
4434		if (ret) {
4435			mlog_errno(ret);
4436			goto out;
4437		}
4438
4439		if (cpos == 0)
4440			break;
4441
4442		major_hash = cpos - 1;
4443	}
4444
4445remove_index:
4446	ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
4447	if (ret) {
4448		mlog_errno(ret);
4449		goto out;
4450	}
4451
4452	ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
4453out:
4454	ocfs2_schedule_truncate_log_flush(osb, 1);
4455	ocfs2_run_deallocs(osb, &dealloc);
4456
4457	brelse(dx_root_bh);
4458	return ret;
4459}