Linux Audio

Check our new training course

Loading...
v4.10.11
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * dir.c
   5 *
   6 * Creates, reads, walks and deletes directory-nodes
   7 *
   8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   9 *
  10 *  Portions of this code from linux/fs/ext3/dir.c
  11 *
  12 *  Copyright (C) 1992, 1993, 1994, 1995
  13 *  Remy Card (card@masi.ibp.fr)
  14 *  Laboratoire MASI - Institut Blaise pascal
  15 *  Universite Pierre et Marie Curie (Paris VI)
  16 *
  17 *   from
  18 *
  19 *   linux/fs/minix/dir.c
  20 *
  21 *   Copyright (C) 1991, 1992 Linus Torvalds
  22 *
  23 * This program is free software; you can redistribute it and/or
  24 * modify it under the terms of the GNU General Public
  25 * License as published by the Free Software Foundation; either
  26 * version 2 of the License, or (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  31 * General Public License for more details.
  32 *
  33 * You should have received a copy of the GNU General Public
  34 * License along with this program; if not, write to the
  35 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  36 * Boston, MA 021110-1307, USA.
  37 */
  38
  39#include <linux/fs.h>
  40#include <linux/types.h>
  41#include <linux/slab.h>
  42#include <linux/highmem.h>
  43#include <linux/quotaops.h>
  44#include <linux/sort.h>
 
  45
  46#include <cluster/masklog.h>
  47
  48#include "ocfs2.h"
  49
  50#include "alloc.h"
  51#include "blockcheck.h"
  52#include "dir.h"
  53#include "dlmglue.h"
  54#include "extent_map.h"
  55#include "file.h"
  56#include "inode.h"
  57#include "journal.h"
  58#include "namei.h"
  59#include "suballoc.h"
  60#include "super.h"
  61#include "sysfile.h"
  62#include "uptodate.h"
  63#include "ocfs2_trace.h"
  64
  65#include "buffer_head_io.h"
  66
  67#define NAMEI_RA_CHUNKS  2
  68#define NAMEI_RA_BLOCKS  4
  69#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  70
  71static unsigned char ocfs2_filetype_table[] = {
  72	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
  73};
  74
  75static int ocfs2_do_extend_dir(struct super_block *sb,
  76			       handle_t *handle,
  77			       struct inode *dir,
  78			       struct buffer_head *parent_fe_bh,
  79			       struct ocfs2_alloc_context *data_ac,
  80			       struct ocfs2_alloc_context *meta_ac,
  81			       struct buffer_head **new_bh);
  82static int ocfs2_dir_indexed(struct inode *inode);
  83
  84/*
  85 * These are distinct checks because future versions of the file system will
  86 * want to have a trailing dirent structure independent of indexing.
  87 */
  88static int ocfs2_supports_dir_trailer(struct inode *dir)
  89{
  90	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  91
  92	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  93		return 0;
  94
  95	return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
  96}
  97
  98/*
  99 * "new' here refers to the point at which we're creating a new
 100 * directory via "mkdir()", but also when we're expanding an inline
 101 * directory. In either case, we don't yet have the indexing bit set
 102 * on the directory, so the standard checks will fail in when metaecc
 103 * is turned off. Only directory-initialization type functions should
 104 * use this then. Everything else wants ocfs2_supports_dir_trailer()
 105 */
 106static int ocfs2_new_dir_wants_trailer(struct inode *dir)
 107{
 108	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 109
 110	return ocfs2_meta_ecc(osb) ||
 111		ocfs2_supports_indexed_dirs(osb);
 112}
 113
 114static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
 115{
 116	return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
 117}
 118
 119#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
 120
 121/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
 122 * them more consistent? */
 123struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
 124							    void *data)
 125{
 126	char *p = data;
 127
 128	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
 129	return (struct ocfs2_dir_block_trailer *)p;
 130}
 131
 132/*
 133 * XXX: This is executed once on every dirent. We should consider optimizing
 134 * it.
 135 */
 136static int ocfs2_skip_dir_trailer(struct inode *dir,
 137				  struct ocfs2_dir_entry *de,
 138				  unsigned long offset,
 139				  unsigned long blklen)
 140{
 141	unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
 142
 143	if (!ocfs2_supports_dir_trailer(dir))
 144		return 0;
 145
 146	if (offset != toff)
 147		return 0;
 148
 149	return 1;
 150}
 151
 152static void ocfs2_init_dir_trailer(struct inode *inode,
 153				   struct buffer_head *bh, u16 rec_len)
 154{
 155	struct ocfs2_dir_block_trailer *trailer;
 156
 157	trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
 158	strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
 159	trailer->db_compat_rec_len =
 160			cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
 161	trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
 162	trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
 163	trailer->db_free_rec_len = cpu_to_le16(rec_len);
 164}
 165/*
 166 * Link an unindexed block with a dir trailer structure into the index free
 167 * list. This function will modify dirdata_bh, but assumes you've already
 168 * passed it to the journal.
 169 */
 170static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
 171				     struct buffer_head *dx_root_bh,
 172				     struct buffer_head *dirdata_bh)
 173{
 174	int ret;
 175	struct ocfs2_dx_root_block *dx_root;
 176	struct ocfs2_dir_block_trailer *trailer;
 177
 178	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
 179				      OCFS2_JOURNAL_ACCESS_WRITE);
 180	if (ret) {
 181		mlog_errno(ret);
 182		goto out;
 183	}
 184	trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
 185	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
 186
 187	trailer->db_free_next = dx_root->dr_free_blk;
 188	dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
 189
 190	ocfs2_journal_dirty(handle, dx_root_bh);
 191
 192out:
 193	return ret;
 194}
 195
 196static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
 197{
 198	return res->dl_prev_leaf_bh == NULL;
 199}
 200
 201void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
 202{
 203	brelse(res->dl_dx_root_bh);
 204	brelse(res->dl_leaf_bh);
 205	brelse(res->dl_dx_leaf_bh);
 206	brelse(res->dl_prev_leaf_bh);
 207}
 208
 209static int ocfs2_dir_indexed(struct inode *inode)
 210{
 211	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
 212		return 1;
 213	return 0;
 214}
 215
 216static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
 217{
 218	return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
 219}
 220
 221/*
 222 * Hashing code adapted from ext3
 223 */
 224#define DELTA 0x9E3779B9
 225
 226static void TEA_transform(__u32 buf[4], __u32 const in[])
 227{
 228	__u32	sum = 0;
 229	__u32	b0 = buf[0], b1 = buf[1];
 230	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
 231	int	n = 16;
 232
 233	do {
 234		sum += DELTA;
 235		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
 236		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
 237	} while (--n);
 238
 239	buf[0] += b0;
 240	buf[1] += b1;
 241}
 242
 243static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
 244{
 245	__u32	pad, val;
 246	int	i;
 247
 248	pad = (__u32)len | ((__u32)len << 8);
 249	pad |= pad << 16;
 250
 251	val = pad;
 252	if (len > num*4)
 253		len = num * 4;
 254	for (i = 0; i < len; i++) {
 255		if ((i % 4) == 0)
 256			val = pad;
 257		val = msg[i] + (val << 8);
 258		if ((i % 4) == 3) {
 259			*buf++ = val;
 260			val = pad;
 261			num--;
 262		}
 263	}
 264	if (--num >= 0)
 265		*buf++ = val;
 266	while (--num >= 0)
 267		*buf++ = pad;
 268}
 269
 270static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
 271				   struct ocfs2_dx_hinfo *hinfo)
 272{
 273	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 274	const char	*p;
 275	__u32		in[8], buf[4];
 276
 277	/*
 278	 * XXX: Is this really necessary, if the index is never looked
 279	 * at by readdir? Is a hash value of '0' a bad idea?
 280	 */
 281	if ((len == 1 && !strncmp(".", name, 1)) ||
 282	    (len == 2 && !strncmp("..", name, 2))) {
 283		buf[0] = buf[1] = 0;
 284		goto out;
 285	}
 286
 287#ifdef OCFS2_DEBUG_DX_DIRS
 288	/*
 289	 * This makes it very easy to debug indexing problems. We
 290	 * should never allow this to be selected without hand editing
 291	 * this file though.
 292	 */
 293	buf[0] = buf[1] = len;
 294	goto out;
 295#endif
 296
 297	memcpy(buf, osb->osb_dx_seed, sizeof(buf));
 298
 299	p = name;
 300	while (len > 0) {
 301		str2hashbuf(p, len, in, 4);
 302		TEA_transform(buf, in);
 303		len -= 16;
 304		p += 16;
 305	}
 306
 307out:
 308	hinfo->major_hash = buf[0];
 309	hinfo->minor_hash = buf[1];
 310}
 311
 312/*
 313 * bh passed here can be an inode block or a dir data block, depending
 314 * on the inode inline data flag.
 315 */
 316static int ocfs2_check_dir_entry(struct inode * dir,
 317				 struct ocfs2_dir_entry * de,
 318				 struct buffer_head * bh,
 
 
 319				 unsigned long offset)
 320{
 321	const char *error_msg = NULL;
 322	const int rlen = le16_to_cpu(de->rec_len);
 
 323
 324	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
 325		error_msg = "rec_len is smaller than minimal";
 326	else if (unlikely(rlen % 4 != 0))
 327		error_msg = "rec_len % 4 != 0";
 328	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
 329		error_msg = "rec_len is too small for name_len";
 330	else if (unlikely(
 331		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
 332		error_msg = "directory entry across blocks";
 
 
 333
 334	if (unlikely(error_msg != NULL))
 335		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
 336		     "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
 337		     (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
 338		     offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
 339		     de->name_len);
 340
 341	return error_msg == NULL ? 1 : 0;
 342}
 343
 344static inline int ocfs2_match(int len,
 345			      const char * const name,
 346			      struct ocfs2_dir_entry *de)
 347{
 348	if (len != de->name_len)
 349		return 0;
 350	if (!de->inode)
 351		return 0;
 352	return !memcmp(name, de->name, len);
 353}
 354
 355/*
 356 * Returns 0 if not found, -1 on failure, and 1 on success
 357 */
 358static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 359					struct inode *dir,
 360					const char *name, int namelen,
 361					unsigned long offset,
 362					char *first_de,
 363					unsigned int bytes,
 364					struct ocfs2_dir_entry **res_dir)
 365{
 366	struct ocfs2_dir_entry *de;
 367	char *dlimit, *de_buf;
 368	int de_len;
 369	int ret = 0;
 370
 371	de_buf = first_de;
 372	dlimit = de_buf + bytes;
 373
 374	while (de_buf < dlimit) {
 375		/* this code is executed quadratically often */
 376		/* do minimal checking `by hand' */
 377
 378		de = (struct ocfs2_dir_entry *) de_buf;
 379
 380		if (de_buf + namelen <= dlimit &&
 381		    ocfs2_match(namelen, name, de)) {
 382			/* found a match - just to be sure, do a full check */
 383			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
 
 384				ret = -1;
 385				goto bail;
 386			}
 387			*res_dir = de;
 388			ret = 1;
 389			goto bail;
 390		}
 391
 392		/* prevent looping on a bad block */
 393		de_len = le16_to_cpu(de->rec_len);
 394		if (de_len <= 0) {
 395			ret = -1;
 396			goto bail;
 397		}
 398
 399		de_buf += de_len;
 400		offset += de_len;
 401	}
 402
 403bail:
 404	trace_ocfs2_search_dirblock(ret);
 405	return ret;
 406}
 407
 408static struct buffer_head *ocfs2_find_entry_id(const char *name,
 409					       int namelen,
 410					       struct inode *dir,
 411					       struct ocfs2_dir_entry **res_dir)
 412{
 413	int ret, found;
 414	struct buffer_head *di_bh = NULL;
 415	struct ocfs2_dinode *di;
 416	struct ocfs2_inline_data *data;
 417
 418	ret = ocfs2_read_inode_block(dir, &di_bh);
 419	if (ret) {
 420		mlog_errno(ret);
 421		goto out;
 422	}
 423
 424	di = (struct ocfs2_dinode *)di_bh->b_data;
 425	data = &di->id2.i_data;
 426
 427	found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
 428				      data->id_data, i_size_read(dir), res_dir);
 429	if (found == 1)
 430		return di_bh;
 431
 432	brelse(di_bh);
 433out:
 434	return NULL;
 435}
 436
 437static int ocfs2_validate_dir_block(struct super_block *sb,
 438				    struct buffer_head *bh)
 439{
 440	int rc;
 441	struct ocfs2_dir_block_trailer *trailer =
 442		ocfs2_trailer_from_bh(bh, sb);
 443
 444
 445	/*
 446	 * We don't validate dirents here, that's handled
 447	 * in-place when the code walks them.
 448	 */
 449	trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
 450
 451	BUG_ON(!buffer_uptodate(bh));
 452
 453	/*
 454	 * If the ecc fails, we return the error but otherwise
 455	 * leave the filesystem running.  We know any error is
 456	 * local to this block.
 457	 *
 458	 * Note that we are safe to call this even if the directory
 459	 * doesn't have a trailer.  Filesystems without metaecc will do
 460	 * nothing, and filesystems with it will have one.
 461	 */
 462	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
 463	if (rc)
 464		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
 465		     (unsigned long long)bh->b_blocknr);
 466
 467	return rc;
 468}
 469
 470/*
 471 * Validate a directory trailer.
 472 *
 473 * We check the trailer here rather than in ocfs2_validate_dir_block()
 474 * because that function doesn't have the inode to test.
 475 */
 476static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
 477{
 478	int rc = 0;
 479	struct ocfs2_dir_block_trailer *trailer;
 480
 481	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 482	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
 483		rc = ocfs2_error(dir->i_sb,
 484				 "Invalid dirblock #%llu: signature = %.*s\n",
 485				 (unsigned long long)bh->b_blocknr, 7,
 486				 trailer->db_signature);
 487		goto out;
 488	}
 489	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
 490		rc = ocfs2_error(dir->i_sb,
 491				 "Directory block #%llu has an invalid db_blkno of %llu\n",
 492				 (unsigned long long)bh->b_blocknr,
 493				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 494		goto out;
 495	}
 496	if (le64_to_cpu(trailer->db_parent_dinode) !=
 497	    OCFS2_I(dir)->ip_blkno) {
 498		rc = ocfs2_error(dir->i_sb,
 499				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
 500				 (unsigned long long)bh->b_blocknr,
 501				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
 502				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 503		goto out;
 504	}
 505out:
 506	return rc;
 507}
 508
 509/*
 510 * This function forces all errors to -EIO for consistency with its
 511 * predecessor, ocfs2_bread().  We haven't audited what returning the
 512 * real error codes would do to callers.  We log the real codes with
 513 * mlog_errno() before we squash them.
 514 */
 515static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
 516				struct buffer_head **bh, int flags)
 517{
 518	int rc = 0;
 519	struct buffer_head *tmp = *bh;
 520
 521	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
 522				    ocfs2_validate_dir_block);
 523	if (rc) {
 524		mlog_errno(rc);
 525		goto out;
 526	}
 527
 528	if (!(flags & OCFS2_BH_READAHEAD) &&
 529	    ocfs2_supports_dir_trailer(inode)) {
 530		rc = ocfs2_check_dir_trailer(inode, tmp);
 531		if (rc) {
 532			if (!*bh)
 533				brelse(tmp);
 534			mlog_errno(rc);
 535			goto out;
 536		}
 537	}
 538
 539	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
 540	if (!*bh)
 541		*bh = tmp;
 542
 543out:
 544	return rc ? -EIO : 0;
 545}
 546
 547/*
 548 * Read the block at 'phys' which belongs to this directory
 549 * inode. This function does no virtual->physical block translation -
 550 * what's passed in is assumed to be a valid directory block.
 551 */
 552static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
 553				       struct buffer_head **bh)
 554{
 555	int ret;
 556	struct buffer_head *tmp = *bh;
 557
 558	ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
 559			       ocfs2_validate_dir_block);
 560	if (ret) {
 561		mlog_errno(ret);
 562		goto out;
 563	}
 564
 565	if (ocfs2_supports_dir_trailer(dir)) {
 566		ret = ocfs2_check_dir_trailer(dir, tmp);
 567		if (ret) {
 568			if (!*bh)
 569				brelse(tmp);
 570			mlog_errno(ret);
 571			goto out;
 572		}
 573	}
 574
 575	if (!ret && !*bh)
 576		*bh = tmp;
 577out:
 578	return ret;
 579}
 580
 581static int ocfs2_validate_dx_root(struct super_block *sb,
 582				  struct buffer_head *bh)
 583{
 584	int ret;
 585	struct ocfs2_dx_root_block *dx_root;
 586
 587	BUG_ON(!buffer_uptodate(bh));
 588
 589	dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
 590
 591	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
 592	if (ret) {
 593		mlog(ML_ERROR,
 594		     "Checksum failed for dir index root block %llu\n",
 595		     (unsigned long long)bh->b_blocknr);
 596		return ret;
 597	}
 598
 599	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
 600		ret = ocfs2_error(sb,
 601				  "Dir Index Root # %llu has bad signature %.*s\n",
 602				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
 603				  7, dx_root->dr_signature);
 604	}
 605
 606	return ret;
 607}
 608
 609static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
 610			      struct buffer_head **dx_root_bh)
 611{
 612	int ret;
 613	u64 blkno = le64_to_cpu(di->i_dx_root);
 614	struct buffer_head *tmp = *dx_root_bh;
 615
 616	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 617			       ocfs2_validate_dx_root);
 618
 619	/* If ocfs2_read_block() got us a new bh, pass it up. */
 620	if (!ret && !*dx_root_bh)
 621		*dx_root_bh = tmp;
 622
 623	return ret;
 624}
 625
 626static int ocfs2_validate_dx_leaf(struct super_block *sb,
 627				  struct buffer_head *bh)
 628{
 629	int ret;
 630	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
 631
 632	BUG_ON(!buffer_uptodate(bh));
 633
 634	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
 635	if (ret) {
 636		mlog(ML_ERROR,
 637		     "Checksum failed for dir index leaf block %llu\n",
 638		     (unsigned long long)bh->b_blocknr);
 639		return ret;
 640	}
 641
 642	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
 643		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
 644				  7, dx_leaf->dl_signature);
 645	}
 646
 647	return ret;
 648}
 649
 650static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
 651			      struct buffer_head **dx_leaf_bh)
 652{
 653	int ret;
 654	struct buffer_head *tmp = *dx_leaf_bh;
 655
 656	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 657			       ocfs2_validate_dx_leaf);
 658
 659	/* If ocfs2_read_block() got us a new bh, pass it up. */
 660	if (!ret && !*dx_leaf_bh)
 661		*dx_leaf_bh = tmp;
 662
 663	return ret;
 664}
 665
 666/*
 667 * Read a series of dx_leaf blocks. This expects all buffer_head
 668 * pointers to be NULL on function entry.
 669 */
 670static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
 671				struct buffer_head **dx_leaf_bhs)
 672{
 673	int ret;
 674
 675	ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
 676				ocfs2_validate_dx_leaf);
 677	if (ret)
 678		mlog_errno(ret);
 679
 680	return ret;
 681}
 682
 683static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
 684					       struct inode *dir,
 685					       struct ocfs2_dir_entry **res_dir)
 686{
 687	struct super_block *sb;
 688	struct buffer_head *bh_use[NAMEI_RA_SIZE];
 689	struct buffer_head *bh, *ret = NULL;
 690	unsigned long start, block, b;
 691	int ra_max = 0;		/* Number of bh's in the readahead
 692				   buffer, bh_use[] */
 693	int ra_ptr = 0;		/* Current index into readahead
 694				   buffer */
 695	int num = 0;
 696	int nblocks, i, err;
 697
 698	sb = dir->i_sb;
 699
 700	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 701	start = OCFS2_I(dir)->ip_dir_start_lookup;
 702	if (start >= nblocks)
 703		start = 0;
 704	block = start;
 705
 706restart:
 707	do {
 708		/*
 709		 * We deal with the read-ahead logic here.
 710		 */
 711		if (ra_ptr >= ra_max) {
 712			/* Refill the readahead buffer */
 713			ra_ptr = 0;
 714			b = block;
 715			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
 716				/*
 717				 * Terminate if we reach the end of the
 718				 * directory and must wrap, or if our
 719				 * search has finished at this block.
 720				 */
 721				if (b >= nblocks || (num && block == start)) {
 722					bh_use[ra_max] = NULL;
 723					break;
 724				}
 725				num++;
 726
 727				bh = NULL;
 728				err = ocfs2_read_dir_block(dir, b++, &bh,
 729							   OCFS2_BH_READAHEAD);
 730				bh_use[ra_max] = bh;
 731			}
 732		}
 733		if ((bh = bh_use[ra_ptr++]) == NULL)
 734			goto next;
 735		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
 736			/* read error, skip block & hope for the best.
 737			 * ocfs2_read_dir_block() has released the bh. */
 738			mlog(ML_ERROR, "reading directory %llu, "
 739				    "offset %lu\n",
 740				    (unsigned long long)OCFS2_I(dir)->ip_blkno,
 741				    block);
 742			goto next;
 743		}
 744		i = ocfs2_search_dirblock(bh, dir, name, namelen,
 745					  block << sb->s_blocksize_bits,
 746					  bh->b_data, sb->s_blocksize,
 747					  res_dir);
 748		if (i == 1) {
 749			OCFS2_I(dir)->ip_dir_start_lookup = block;
 750			ret = bh;
 751			goto cleanup_and_exit;
 752		} else {
 753			brelse(bh);
 754			if (i < 0)
 755				goto cleanup_and_exit;
 756		}
 757	next:
 758		if (++block >= nblocks)
 759			block = 0;
 760	} while (block != start);
 761
 762	/*
 763	 * If the directory has grown while we were searching, then
 764	 * search the last part of the directory before giving up.
 765	 */
 766	block = nblocks;
 767	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 768	if (block < nblocks) {
 769		start = 0;
 770		goto restart;
 771	}
 772
 773cleanup_and_exit:
 774	/* Clean up the read-ahead blocks */
 775	for (; ra_ptr < ra_max; ra_ptr++)
 776		brelse(bh_use[ra_ptr]);
 777
 778	trace_ocfs2_find_entry_el(ret);
 779	return ret;
 780}
 781
 782static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
 783				   struct ocfs2_extent_list *el,
 784				   u32 major_hash,
 785				   u32 *ret_cpos,
 786				   u64 *ret_phys_blkno,
 787				   unsigned int *ret_clen)
 788{
 789	int ret = 0, i, found;
 790	struct buffer_head *eb_bh = NULL;
 791	struct ocfs2_extent_block *eb;
 792	struct ocfs2_extent_rec *rec = NULL;
 793
 794	if (el->l_tree_depth) {
 795		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
 796				      &eb_bh);
 797		if (ret) {
 798			mlog_errno(ret);
 799			goto out;
 800		}
 801
 802		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
 803		el = &eb->h_list;
 804
 805		if (el->l_tree_depth) {
 806			ret = ocfs2_error(inode->i_sb,
 807					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
 808					  inode->i_ino,
 809					  (unsigned long long)eb_bh->b_blocknr);
 810			goto out;
 811		}
 812	}
 813
 814	found = 0;
 815	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
 816		rec = &el->l_recs[i];
 817
 818		if (le32_to_cpu(rec->e_cpos) <= major_hash) {
 819			found = 1;
 820			break;
 821		}
 822	}
 823
 824	if (!found) {
 825		ret = ocfs2_error(inode->i_sb,
 826				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
 827				  inode->i_ino,
 828				  le32_to_cpu(rec->e_cpos),
 829				  ocfs2_rec_clusters(el, rec));
 830		goto out;
 831	}
 832
 833	if (ret_phys_blkno)
 834		*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
 835	if (ret_cpos)
 836		*ret_cpos = le32_to_cpu(rec->e_cpos);
 837	if (ret_clen)
 838		*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
 839
 840out:
 841	brelse(eb_bh);
 842	return ret;
 843}
 844
 845/*
 846 * Returns the block index, from the start of the cluster which this
 847 * hash belongs too.
 848 */
 849static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 850						   u32 minor_hash)
 851{
 852	return minor_hash & osb->osb_dx_mask;
 853}
 854
 855static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 856					  struct ocfs2_dx_hinfo *hinfo)
 857{
 858	return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
 859}
 860
 861static int ocfs2_dx_dir_lookup(struct inode *inode,
 862			       struct ocfs2_extent_list *el,
 863			       struct ocfs2_dx_hinfo *hinfo,
 864			       u32 *ret_cpos,
 865			       u64 *ret_phys_blkno)
 866{
 867	int ret = 0;
 868	unsigned int cend, uninitialized_var(clen);
 869	u32 uninitialized_var(cpos);
 870	u64 uninitialized_var(blkno);
 871	u32 name_hash = hinfo->major_hash;
 872
 873	ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
 874				      &clen);
 875	if (ret) {
 876		mlog_errno(ret);
 877		goto out;
 878	}
 879
 880	cend = cpos + clen;
 881	if (name_hash >= cend) {
 882		/* We want the last cluster */
 883		blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
 884		cpos += clen - 1;
 885	} else {
 886		blkno += ocfs2_clusters_to_blocks(inode->i_sb,
 887						  name_hash - cpos);
 888		cpos = name_hash;
 889	}
 890
 891	/*
 892	 * We now have the cluster which should hold our entry. To
 893	 * find the exact block from the start of the cluster to
 894	 * search, we take the lower bits of the hash.
 895	 */
 896	blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
 897
 898	if (ret_phys_blkno)
 899		*ret_phys_blkno = blkno;
 900	if (ret_cpos)
 901		*ret_cpos = cpos;
 902
 903out:
 904
 905	return ret;
 906}
 907
 908static int ocfs2_dx_dir_search(const char *name, int namelen,
 909			       struct inode *dir,
 910			       struct ocfs2_dx_root_block *dx_root,
 911			       struct ocfs2_dir_lookup_result *res)
 912{
 913	int ret, i, found;
 914	u64 uninitialized_var(phys);
 915	struct buffer_head *dx_leaf_bh = NULL;
 916	struct ocfs2_dx_leaf *dx_leaf;
 917	struct ocfs2_dx_entry *dx_entry = NULL;
 918	struct buffer_head *dir_ent_bh = NULL;
 919	struct ocfs2_dir_entry *dir_ent = NULL;
 920	struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
 921	struct ocfs2_extent_list *dr_el;
 922	struct ocfs2_dx_entry_list *entry_list;
 923
 924	ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
 925
 926	if (ocfs2_dx_root_inline(dx_root)) {
 927		entry_list = &dx_root->dr_entries;
 928		goto search;
 929	}
 930
 931	dr_el = &dx_root->dr_list;
 932
 933	ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
 934	if (ret) {
 935		mlog_errno(ret);
 936		goto out;
 937	}
 938
 939	trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
 940				  namelen, name, hinfo->major_hash,
 941				  hinfo->minor_hash, (unsigned long long)phys);
 942
 943	ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
 944	if (ret) {
 945		mlog_errno(ret);
 946		goto out;
 947	}
 948
 949	dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
 950
 951	trace_ocfs2_dx_dir_search_leaf_info(
 952			le16_to_cpu(dx_leaf->dl_list.de_num_used),
 953			le16_to_cpu(dx_leaf->dl_list.de_count));
 954
 955	entry_list = &dx_leaf->dl_list;
 956
 957search:
 958	/*
 959	 * Empty leaf is legal, so no need to check for that.
 960	 */
 961	found = 0;
 962	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
 963		dx_entry = &entry_list->de_entries[i];
 964
 965		if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
 966		    || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
 967			continue;
 968
 969		/*
 970		 * Search unindexed leaf block now. We're not
 971		 * guaranteed to find anything.
 972		 */
 973		ret = ocfs2_read_dir_block_direct(dir,
 974					  le64_to_cpu(dx_entry->dx_dirent_blk),
 975					  &dir_ent_bh);
 976		if (ret) {
 977			mlog_errno(ret);
 978			goto out;
 979		}
 980
 981		/*
 982		 * XXX: We should check the unindexed block here,
 983		 * before using it.
 984		 */
 985
 986		found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
 987					      0, dir_ent_bh->b_data,
 988					      dir->i_sb->s_blocksize, &dir_ent);
 989		if (found == 1)
 990			break;
 991
 992		if (found == -1) {
 993			/* This means we found a bad directory entry. */
 994			ret = -EIO;
 995			mlog_errno(ret);
 996			goto out;
 997		}
 998
 999		brelse(dir_ent_bh);
1000		dir_ent_bh = NULL;
1001	}
1002
1003	if (found <= 0) {
1004		ret = -ENOENT;
1005		goto out;
1006	}
1007
1008	res->dl_leaf_bh = dir_ent_bh;
1009	res->dl_entry = dir_ent;
1010	res->dl_dx_leaf_bh = dx_leaf_bh;
1011	res->dl_dx_entry = dx_entry;
1012
1013	ret = 0;
1014out:
1015	if (ret) {
1016		brelse(dx_leaf_bh);
1017		brelse(dir_ent_bh);
1018	}
1019	return ret;
1020}
1021
1022static int ocfs2_find_entry_dx(const char *name, int namelen,
1023			       struct inode *dir,
1024			       struct ocfs2_dir_lookup_result *lookup)
1025{
1026	int ret;
1027	struct buffer_head *di_bh = NULL;
1028	struct ocfs2_dinode *di;
1029	struct buffer_head *dx_root_bh = NULL;
1030	struct ocfs2_dx_root_block *dx_root;
1031
1032	ret = ocfs2_read_inode_block(dir, &di_bh);
1033	if (ret) {
1034		mlog_errno(ret);
1035		goto out;
1036	}
1037
1038	di = (struct ocfs2_dinode *)di_bh->b_data;
1039
1040	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
1041	if (ret) {
1042		mlog_errno(ret);
1043		goto out;
1044	}
1045	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
1046
1047	ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
1048	if (ret) {
1049		if (ret != -ENOENT)
1050			mlog_errno(ret);
1051		goto out;
1052	}
1053
1054	lookup->dl_dx_root_bh = dx_root_bh;
1055	dx_root_bh = NULL;
1056out:
1057	brelse(di_bh);
1058	brelse(dx_root_bh);
1059	return ret;
1060}
1061
1062/*
1063 * Try to find an entry of the provided name within 'dir'.
1064 *
1065 * If nothing was found, -ENOENT is returned. Otherwise, zero is
1066 * returned and the struct 'res' will contain information useful to
1067 * other directory manipulation functions.
1068 *
1069 * Caller can NOT assume anything about the contents of the
1070 * buffer_heads - they are passed back only so that it can be passed
1071 * into any one of the manipulation functions (add entry, delete
1072 * entry, etc). As an example, bh in the extent directory case is a
1073 * data block, in the inline-data case it actually points to an inode,
1074 * in the indexed directory case, multiple buffers are involved.
1075 */
1076int ocfs2_find_entry(const char *name, int namelen,
1077		     struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
1078{
1079	struct buffer_head *bh;
1080	struct ocfs2_dir_entry *res_dir = NULL;
 
1081
1082	if (ocfs2_dir_indexed(dir))
1083		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
1084
 
 
 
 
 
1085	/*
1086	 * The unindexed dir code only uses part of the lookup
1087	 * structure, so there's no reason to push it down further
1088	 * than this.
1089	 */
1090	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 
 
 
 
 
1091		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
1092	else
1093		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
 
1094
1095	if (bh == NULL)
1096		return -ENOENT;
1097
1098	lookup->dl_leaf_bh = bh;
1099	lookup->dl_entry = res_dir;
1100	return 0;
 
1101}
1102
1103/*
1104 * Update inode number and type of a previously found directory entry.
1105 */
1106int ocfs2_update_entry(struct inode *dir, handle_t *handle,
1107		       struct ocfs2_dir_lookup_result *res,
1108		       struct inode *new_entry_inode)
1109{
1110	int ret;
1111	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1112	struct ocfs2_dir_entry *de = res->dl_entry;
1113	struct buffer_head *de_bh = res->dl_leaf_bh;
1114
1115	/*
1116	 * The same code works fine for both inline-data and extent
1117	 * based directories, so no need to split this up.  The only
1118	 * difference is the journal_access function.
1119	 */
1120
1121	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1122		access = ocfs2_journal_access_di;
1123
1124	ret = access(handle, INODE_CACHE(dir), de_bh,
1125		     OCFS2_JOURNAL_ACCESS_WRITE);
1126	if (ret) {
1127		mlog_errno(ret);
1128		goto out;
1129	}
1130
1131	de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
1132	ocfs2_set_de_type(de, new_entry_inode->i_mode);
1133
1134	ocfs2_journal_dirty(handle, de_bh);
1135
1136out:
1137	return ret;
1138}
1139
1140/*
1141 * __ocfs2_delete_entry deletes a directory entry by merging it with the
1142 * previous entry
1143 */
1144static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1145				struct ocfs2_dir_entry *de_del,
1146				struct buffer_head *bh, char *first_de,
1147				unsigned int bytes)
1148{
1149	struct ocfs2_dir_entry *de, *pde;
1150	int i, status = -ENOENT;
1151	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1152
1153	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1154		access = ocfs2_journal_access_di;
1155
1156	i = 0;
1157	pde = NULL;
1158	de = (struct ocfs2_dir_entry *) first_de;
1159	while (i < bytes) {
1160		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
1161			status = -EIO;
1162			mlog_errno(status);
1163			goto bail;
1164		}
1165		if (de == de_del)  {
1166			status = access(handle, INODE_CACHE(dir), bh,
1167					OCFS2_JOURNAL_ACCESS_WRITE);
1168			if (status < 0) {
1169				status = -EIO;
1170				mlog_errno(status);
1171				goto bail;
1172			}
1173			if (pde)
1174				le16_add_cpu(&pde->rec_len,
1175						le16_to_cpu(de->rec_len));
1176			de->inode = 0;
1177			dir->i_version++;
1178			ocfs2_journal_dirty(handle, bh);
1179			goto bail;
1180		}
1181		i += le16_to_cpu(de->rec_len);
1182		pde = de;
1183		de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1184	}
1185bail:
1186	return status;
1187}
1188
1189static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
1190{
1191	unsigned int hole;
1192
1193	if (le64_to_cpu(de->inode) == 0)
1194		hole = le16_to_cpu(de->rec_len);
1195	else
1196		hole = le16_to_cpu(de->rec_len) -
1197			OCFS2_DIR_REC_LEN(de->name_len);
1198
1199	return hole;
1200}
1201
1202static int ocfs2_find_max_rec_len(struct super_block *sb,
1203				  struct buffer_head *dirblock_bh)
1204{
1205	int size, this_hole, largest_hole = 0;
1206	char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
1207	struct ocfs2_dir_entry *de;
1208
1209	trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
1210	size = ocfs2_dir_trailer_blk_off(sb);
1211	limit = start + size;
1212	de_buf = start;
1213	de = (struct ocfs2_dir_entry *)de_buf;
1214	do {
1215		if (de_buf != trailer) {
1216			this_hole = ocfs2_figure_dirent_hole(de);
1217			if (this_hole > largest_hole)
1218				largest_hole = this_hole;
1219		}
1220
1221		de_buf += le16_to_cpu(de->rec_len);
1222		de = (struct ocfs2_dir_entry *)de_buf;
1223	} while (de_buf < limit);
1224
1225	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
1226		return largest_hole;
1227	return 0;
1228}
1229
1230static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
1231				       int index)
1232{
1233	int num_used = le16_to_cpu(entry_list->de_num_used);
1234
1235	if (num_used == 1 || index == (num_used - 1))
1236		goto clear;
1237
1238	memmove(&entry_list->de_entries[index],
1239		&entry_list->de_entries[index + 1],
1240		(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
1241clear:
1242	num_used--;
1243	memset(&entry_list->de_entries[num_used], 0,
1244	       sizeof(struct ocfs2_dx_entry));
1245	entry_list->de_num_used = cpu_to_le16(num_used);
1246}
1247
1248static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1249				 struct ocfs2_dir_lookup_result *lookup)
1250{
1251	int ret, index, max_rec_len, add_to_free_list = 0;
1252	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1253	struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
1254	struct ocfs2_dx_leaf *dx_leaf;
1255	struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
1256	struct ocfs2_dir_block_trailer *trailer;
1257	struct ocfs2_dx_root_block *dx_root;
1258	struct ocfs2_dx_entry_list *entry_list;
1259
1260	/*
1261	 * This function gets a bit messy because we might have to
1262	 * modify the root block, regardless of whether the indexed
1263	 * entries are stored inline.
1264	 */
1265
1266	/*
1267	 * *Only* set 'entry_list' here, based on where we're looking
1268	 * for the indexed entries. Later, we might still want to
1269	 * journal both blocks, based on free list state.
1270	 */
1271	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
1272	if (ocfs2_dx_root_inline(dx_root)) {
1273		entry_list = &dx_root->dr_entries;
1274	} else {
1275		dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
1276		entry_list = &dx_leaf->dl_list;
1277	}
1278
1279	/* Neither of these are a disk corruption - that should have
1280	 * been caught by lookup, before we got here. */
1281	BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
1282	BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
1283
1284	index = (char *)dx_entry - (char *)entry_list->de_entries;
1285	index /= sizeof(*dx_entry);
1286
1287	if (index >= le16_to_cpu(entry_list->de_num_used)) {
1288		mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
1289		     (unsigned long long)OCFS2_I(dir)->ip_blkno, index,
1290		     entry_list, dx_entry);
1291		return -EIO;
1292	}
1293
1294	/*
1295	 * We know that removal of this dirent will leave enough room
1296	 * for a new one, so add this block to the free list if it
1297	 * isn't already there.
1298	 */
1299	trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
1300	if (trailer->db_free_rec_len == 0)
1301		add_to_free_list = 1;
1302
1303	/*
1304	 * Add the block holding our index into the journal before
1305	 * removing the unindexed entry. If we get an error return
1306	 * from __ocfs2_delete_entry(), then it hasn't removed the
1307	 * entry yet. Likewise, successful return means we *must*
1308	 * remove the indexed entry.
1309	 *
1310	 * We're also careful to journal the root tree block here as
1311	 * the entry count needs to be updated. Also, we might be
1312	 * adding to the start of the free list.
1313	 */
1314	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1315				      OCFS2_JOURNAL_ACCESS_WRITE);
1316	if (ret) {
1317		mlog_errno(ret);
1318		goto out;
1319	}
1320
1321	if (!ocfs2_dx_root_inline(dx_root)) {
1322		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
1323					      lookup->dl_dx_leaf_bh,
1324					      OCFS2_JOURNAL_ACCESS_WRITE);
1325		if (ret) {
1326			mlog_errno(ret);
1327			goto out;
1328		}
1329	}
1330
1331	trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1332				    index);
1333
1334	ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1335				   leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
1336	if (ret) {
1337		mlog_errno(ret);
1338		goto out;
1339	}
1340
1341	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
1342	trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1343	if (add_to_free_list) {
1344		trailer->db_free_next = dx_root->dr_free_blk;
1345		dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
1346		ocfs2_journal_dirty(handle, dx_root_bh);
1347	}
1348
1349	/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
1350	ocfs2_journal_dirty(handle, leaf_bh);
1351
1352	le32_add_cpu(&dx_root->dr_num_entries, -1);
1353	ocfs2_journal_dirty(handle, dx_root_bh);
1354
1355	ocfs2_dx_list_remove_entry(entry_list, index);
1356
1357	if (!ocfs2_dx_root_inline(dx_root))
1358		ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
1359
1360out:
1361	return ret;
1362}
1363
1364static inline int ocfs2_delete_entry_id(handle_t *handle,
1365					struct inode *dir,
1366					struct ocfs2_dir_entry *de_del,
1367					struct buffer_head *bh)
1368{
1369	int ret;
1370	struct buffer_head *di_bh = NULL;
1371	struct ocfs2_dinode *di;
1372	struct ocfs2_inline_data *data;
1373
1374	ret = ocfs2_read_inode_block(dir, &di_bh);
1375	if (ret) {
1376		mlog_errno(ret);
1377		goto out;
1378	}
1379
1380	di = (struct ocfs2_dinode *)di_bh->b_data;
1381	data = &di->id2.i_data;
1382
1383	ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
1384				   i_size_read(dir));
1385
1386	brelse(di_bh);
1387out:
1388	return ret;
1389}
1390
1391static inline int ocfs2_delete_entry_el(handle_t *handle,
1392					struct inode *dir,
1393					struct ocfs2_dir_entry *de_del,
1394					struct buffer_head *bh)
1395{
1396	return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
1397				    bh->b_size);
1398}
1399
1400/*
1401 * Delete a directory entry. Hide the details of directory
1402 * implementation from the caller.
1403 */
1404int ocfs2_delete_entry(handle_t *handle,
1405		       struct inode *dir,
1406		       struct ocfs2_dir_lookup_result *res)
1407{
1408	if (ocfs2_dir_indexed(dir))
1409		return ocfs2_delete_entry_dx(handle, dir, res);
1410
1411	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1412		return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
1413					     res->dl_leaf_bh);
1414
1415	return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
1416				     res->dl_leaf_bh);
1417}
1418
1419/*
1420 * Check whether 'de' has enough room to hold an entry of
1421 * 'new_rec_len' bytes.
1422 */
1423static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
1424					 unsigned int new_rec_len)
1425{
1426	unsigned int de_really_used;
1427
1428	/* Check whether this is an empty record with enough space */
1429	if (le64_to_cpu(de->inode) == 0 &&
1430	    le16_to_cpu(de->rec_len) >= new_rec_len)
1431		return 1;
1432
1433	/*
1434	 * Record might have free space at the end which we can
1435	 * use.
1436	 */
1437	de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
1438	if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
1439	    return 1;
1440
1441	return 0;
1442}
1443
1444static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
1445					  struct ocfs2_dx_entry *dx_new_entry)
1446{
1447	int i;
1448
1449	i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
1450	dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
1451
1452	le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
1453}
1454
1455static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
1456				       struct ocfs2_dx_hinfo *hinfo,
1457				       u64 dirent_blk)
1458{
1459	int i;
1460	struct ocfs2_dx_entry *dx_entry;
1461
1462	i = le16_to_cpu(entry_list->de_num_used);
1463	dx_entry = &entry_list->de_entries[i];
1464
1465	memset(dx_entry, 0, sizeof(*dx_entry));
1466	dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
1467	dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
1468	dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
1469
1470	le16_add_cpu(&entry_list->de_num_used, 1);
1471}
1472
1473static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
1474				      struct ocfs2_dx_hinfo *hinfo,
1475				      u64 dirent_blk,
1476				      struct buffer_head *dx_leaf_bh)
1477{
1478	int ret;
1479	struct ocfs2_dx_leaf *dx_leaf;
1480
1481	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
1482				      OCFS2_JOURNAL_ACCESS_WRITE);
1483	if (ret) {
1484		mlog_errno(ret);
1485		goto out;
1486	}
1487
1488	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
1489	ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
1490	ocfs2_journal_dirty(handle, dx_leaf_bh);
1491
1492out:
1493	return ret;
1494}
1495
1496static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
1497					struct ocfs2_dx_hinfo *hinfo,
1498					u64 dirent_blk,
1499					struct ocfs2_dx_root_block *dx_root)
1500{
1501	ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
1502}
1503
1504static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
1505			       struct ocfs2_dir_lookup_result *lookup)
1506{
1507	int ret = 0;
1508	struct ocfs2_dx_root_block *dx_root;
1509	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1510
1511	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1512				      OCFS2_JOURNAL_ACCESS_WRITE);
1513	if (ret) {
1514		mlog_errno(ret);
1515		goto out;
1516	}
1517
1518	dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
1519	if (ocfs2_dx_root_inline(dx_root)) {
1520		ocfs2_dx_inline_root_insert(dir, handle,
1521					    &lookup->dl_hinfo,
1522					    lookup->dl_leaf_bh->b_blocknr,
1523					    dx_root);
1524	} else {
1525		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
1526						 lookup->dl_leaf_bh->b_blocknr,
1527						 lookup->dl_dx_leaf_bh);
1528		if (ret)
1529			goto out;
1530	}
1531
1532	le32_add_cpu(&dx_root->dr_num_entries, 1);
1533	ocfs2_journal_dirty(handle, dx_root_bh);
1534
1535out:
1536	return ret;
1537}
1538
1539static void ocfs2_remove_block_from_free_list(struct inode *dir,
1540				       handle_t *handle,
1541				       struct ocfs2_dir_lookup_result *lookup)
1542{
1543	struct ocfs2_dir_block_trailer *trailer, *prev;
1544	struct ocfs2_dx_root_block *dx_root;
1545	struct buffer_head *bh;
1546
1547	trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1548
1549	if (ocfs2_free_list_at_root(lookup)) {
1550		bh = lookup->dl_dx_root_bh;
1551		dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
1552		dx_root->dr_free_blk = trailer->db_free_next;
1553	} else {
1554		bh = lookup->dl_prev_leaf_bh;
1555		prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
1556		prev->db_free_next = trailer->db_free_next;
1557	}
1558
1559	trailer->db_free_rec_len = cpu_to_le16(0);
1560	trailer->db_free_next = cpu_to_le64(0);
1561
1562	ocfs2_journal_dirty(handle, bh);
1563	ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1564}
1565
1566/*
1567 * This expects that a journal write has been reserved on
1568 * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
1569 */
1570static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
1571				   struct ocfs2_dir_lookup_result *lookup)
1572{
1573	int max_rec_len;
1574	struct ocfs2_dir_block_trailer *trailer;
1575
1576	/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
1577	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
1578	if (max_rec_len) {
1579		/*
1580		 * There's still room in this block, so no need to remove it
1581		 * from the free list. In this case, we just want to update
1582		 * the rec len accounting.
1583		 */
1584		trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1585		trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1586		ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1587	} else {
1588		ocfs2_remove_block_from_free_list(dir, handle, lookup);
1589	}
1590}
1591
1592/* we don't always have a dentry for what we want to add, so people
1593 * like orphan dir can call this instead.
1594 *
1595 * The lookup context must have been filled from
1596 * ocfs2_prepare_dir_for_insert.
1597 */
1598int __ocfs2_add_entry(handle_t *handle,
1599		      struct inode *dir,
1600		      const char *name, int namelen,
1601		      struct inode *inode, u64 blkno,
1602		      struct buffer_head *parent_fe_bh,
1603		      struct ocfs2_dir_lookup_result *lookup)
1604{
1605	unsigned long offset;
1606	unsigned short rec_len;
1607	struct ocfs2_dir_entry *de, *de1;
1608	struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
1609	struct super_block *sb = dir->i_sb;
1610	int retval;
1611	unsigned int size = sb->s_blocksize;
1612	struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1613	char *data_start = insert_bh->b_data;
1614
1615	if (!namelen)
1616		return -EINVAL;
1617
1618	if (ocfs2_dir_indexed(dir)) {
1619		struct buffer_head *bh;
1620
1621		/*
1622		 * An indexed dir may require that we update the free space
1623		 * list. Reserve a write to the previous node in the list so
1624		 * that we don't fail later.
1625		 *
1626		 * XXX: This can be either a dx_root_block, or an unindexed
1627		 * directory tree leaf block.
1628		 */
1629		if (ocfs2_free_list_at_root(lookup)) {
1630			bh = lookup->dl_dx_root_bh;
1631			retval = ocfs2_journal_access_dr(handle,
1632						 INODE_CACHE(dir), bh,
1633						 OCFS2_JOURNAL_ACCESS_WRITE);
1634		} else {
1635			bh = lookup->dl_prev_leaf_bh;
1636			retval = ocfs2_journal_access_db(handle,
1637						 INODE_CACHE(dir), bh,
1638						 OCFS2_JOURNAL_ACCESS_WRITE);
1639		}
1640		if (retval) {
1641			mlog_errno(retval);
1642			return retval;
1643		}
1644	} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1645		data_start = di->id2.i_data.id_data;
1646		size = i_size_read(dir);
1647
1648		BUG_ON(insert_bh != parent_fe_bh);
1649	}
1650
1651	rec_len = OCFS2_DIR_REC_LEN(namelen);
1652	offset = 0;
1653	de = (struct ocfs2_dir_entry *) data_start;
1654	while (1) {
1655		BUG_ON((char *)de >= (size + data_start));
1656
1657		/* These checks should've already been passed by the
1658		 * prepare function, but I guess we can leave them
1659		 * here anyway. */
1660		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
 
1661			retval = -ENOENT;
1662			goto bail;
1663		}
1664		if (ocfs2_match(namelen, name, de)) {
1665			retval = -EEXIST;
1666			goto bail;
1667		}
1668
1669		/* We're guaranteed that we should have space, so we
1670		 * can't possibly have hit the trailer...right? */
1671		mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
1672				"Hit dir trailer trying to insert %.*s "
1673			        "(namelen %d) into directory %llu.  "
1674				"offset is %lu, trailer offset is %d\n",
1675				namelen, name, namelen,
1676				(unsigned long long)parent_fe_bh->b_blocknr,
1677				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
1678
1679		if (ocfs2_dirent_would_fit(de, rec_len)) {
1680			dir->i_mtime = dir->i_ctime = current_time(dir);
 
1681			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
1682			if (retval < 0) {
1683				mlog_errno(retval);
1684				goto bail;
1685			}
1686
1687			if (insert_bh == parent_fe_bh)
1688				retval = ocfs2_journal_access_di(handle,
1689								 INODE_CACHE(dir),
1690								 insert_bh,
1691								 OCFS2_JOURNAL_ACCESS_WRITE);
1692			else {
1693				retval = ocfs2_journal_access_db(handle,
1694								 INODE_CACHE(dir),
1695								 insert_bh,
1696					      OCFS2_JOURNAL_ACCESS_WRITE);
1697
1698				if (!retval && ocfs2_dir_indexed(dir))
1699					retval = ocfs2_dx_dir_insert(dir,
1700								handle,
1701								lookup);
1702			}
1703
1704			if (retval) {
1705				mlog_errno(retval);
1706				goto bail;
1707			}
1708
1709			/* By now the buffer is marked for journaling */
1710			offset += le16_to_cpu(de->rec_len);
1711			if (le64_to_cpu(de->inode)) {
1712				de1 = (struct ocfs2_dir_entry *)((char *) de +
1713					OCFS2_DIR_REC_LEN(de->name_len));
1714				de1->rec_len =
1715					cpu_to_le16(le16_to_cpu(de->rec_len) -
1716					OCFS2_DIR_REC_LEN(de->name_len));
1717				de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
1718				de = de1;
1719			}
1720			de->file_type = OCFS2_FT_UNKNOWN;
1721			if (blkno) {
1722				de->inode = cpu_to_le64(blkno);
1723				ocfs2_set_de_type(de, inode->i_mode);
1724			} else
1725				de->inode = 0;
1726			de->name_len = namelen;
1727			memcpy(de->name, name, namelen);
1728
1729			if (ocfs2_dir_indexed(dir))
1730				ocfs2_recalc_free_list(dir, handle, lookup);
1731
1732			dir->i_version++;
1733			ocfs2_journal_dirty(handle, insert_bh);
1734			retval = 0;
1735			goto bail;
1736		}
1737
1738		offset += le16_to_cpu(de->rec_len);
1739		de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
1740	}
1741
1742	/* when you think about it, the assert above should prevent us
1743	 * from ever getting here. */
1744	retval = -ENOSPC;
1745bail:
1746	if (retval)
1747		mlog_errno(retval);
1748
1749	return retval;
1750}
1751
1752static int ocfs2_dir_foreach_blk_id(struct inode *inode,
1753				    u64 *f_version,
1754				    struct dir_context *ctx)
1755{
1756	int ret, i;
1757	unsigned long offset = ctx->pos;
1758	struct buffer_head *di_bh = NULL;
1759	struct ocfs2_dinode *di;
1760	struct ocfs2_inline_data *data;
1761	struct ocfs2_dir_entry *de;
1762
1763	ret = ocfs2_read_inode_block(inode, &di_bh);
1764	if (ret) {
1765		mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
1766		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1767		goto out;
1768	}
1769
1770	di = (struct ocfs2_dinode *)di_bh->b_data;
1771	data = &di->id2.i_data;
1772
1773	while (ctx->pos < i_size_read(inode)) {
1774		/* If the dir block has changed since the last call to
1775		 * readdir(2), then we might be pointing to an invalid
1776		 * dirent right now.  Scan from the start of the block
1777		 * to make sure. */
1778		if (*f_version != inode->i_version) {
1779			for (i = 0; i < i_size_read(inode) && i < offset; ) {
1780				de = (struct ocfs2_dir_entry *)
1781					(data->id_data + i);
1782				/* It's too expensive to do a full
1783				 * dirent test each time round this
1784				 * loop, but we do have to test at
1785				 * least that it is non-zero.  A
1786				 * failure will be detected in the
1787				 * dirent test below. */
1788				if (le16_to_cpu(de->rec_len) <
1789				    OCFS2_DIR_REC_LEN(1))
1790					break;
1791				i += le16_to_cpu(de->rec_len);
1792			}
1793			ctx->pos = offset = i;
1794			*f_version = inode->i_version;
1795		}
1796
1797		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
1798		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
 
1799			/* On error, skip the f_pos to the end. */
1800			ctx->pos = i_size_read(inode);
1801			break;
1802		}
1803		offset += le16_to_cpu(de->rec_len);
1804		if (le64_to_cpu(de->inode)) {
1805			unsigned char d_type = DT_UNKNOWN;
1806
1807			if (de->file_type < OCFS2_FT_MAX)
1808				d_type = ocfs2_filetype_table[de->file_type];
1809
1810			if (!dir_emit(ctx, de->name, de->name_len,
1811				      le64_to_cpu(de->inode), d_type))
 
1812				goto out;
1813		}
1814		ctx->pos += le16_to_cpu(de->rec_len);
1815	}
1816out:
1817	brelse(di_bh);
1818	return 0;
1819}
1820
1821/*
1822 * NOTE: This function can be called against unindexed directories,
1823 * and indexed ones.
1824 */
1825static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1826				    u64 *f_version,
1827				    struct dir_context *ctx,
1828				    bool persist)
1829{
1830	unsigned long offset, blk, last_ra_blk = 0;
1831	int i;
1832	struct buffer_head * bh, * tmp;
1833	struct ocfs2_dir_entry * de;
1834	struct super_block * sb = inode->i_sb;
1835	unsigned int ra_sectors = 16;
1836	int stored = 0;
1837
1838	bh = NULL;
1839
1840	offset = ctx->pos & (sb->s_blocksize - 1);
1841
1842	while (ctx->pos < i_size_read(inode)) {
1843		blk = ctx->pos >> sb->s_blocksize_bits;
1844		if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
1845			/* Skip the corrupt dirblock and keep trying */
1846			ctx->pos += sb->s_blocksize - offset;
1847			continue;
1848		}
1849
1850		/* The idea here is to begin with 8k read-ahead and to stay
1851		 * 4k ahead of our current position.
1852		 *
1853		 * TODO: Use the pagecache for this. We just need to
1854		 * make sure it's cluster-safe... */
1855		if (!last_ra_blk
1856		    || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
1857			for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
1858			     i > 0; i--) {
1859				tmp = NULL;
1860				if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
1861							  OCFS2_BH_READAHEAD))
1862					brelse(tmp);
1863			}
1864			last_ra_blk = blk;
1865			ra_sectors = 8;
1866		}
1867
1868		/* If the dir block has changed since the last call to
1869		 * readdir(2), then we might be pointing to an invalid
1870		 * dirent right now.  Scan from the start of the block
1871		 * to make sure. */
1872		if (*f_version != inode->i_version) {
1873			for (i = 0; i < sb->s_blocksize && i < offset; ) {
1874				de = (struct ocfs2_dir_entry *) (bh->b_data + i);
1875				/* It's too expensive to do a full
1876				 * dirent test each time round this
1877				 * loop, but we do have to test at
1878				 * least that it is non-zero.  A
1879				 * failure will be detected in the
1880				 * dirent test below. */
1881				if (le16_to_cpu(de->rec_len) <
1882				    OCFS2_DIR_REC_LEN(1))
1883					break;
1884				i += le16_to_cpu(de->rec_len);
1885			}
1886			offset = i;
1887			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
1888				| offset;
1889			*f_version = inode->i_version;
1890		}
1891
1892		while (ctx->pos < i_size_read(inode)
1893		       && offset < sb->s_blocksize) {
1894			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
1895			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
 
1896				/* On error, skip the f_pos to the
1897				   next block. */
1898				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1899				brelse(bh);
1900				continue;
1901			}
1902			if (le64_to_cpu(de->inode)) {
1903				unsigned char d_type = DT_UNKNOWN;
1904
1905				if (de->file_type < OCFS2_FT_MAX)
1906					d_type = ocfs2_filetype_table[de->file_type];
1907				if (!dir_emit(ctx, de->name,
1908						de->name_len,
1909						le64_to_cpu(de->inode),
1910						d_type)) {
1911					brelse(bh);
1912					return 0;
1913				}
1914				stored++;
1915			}
1916			offset += le16_to_cpu(de->rec_len);
1917			ctx->pos += le16_to_cpu(de->rec_len);
1918		}
1919		offset = 0;
1920		brelse(bh);
1921		bh = NULL;
1922		if (!persist && stored)
1923			break;
1924	}
1925	return 0;
1926}
1927
1928static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
1929				 struct dir_context *ctx,
1930				 bool persist)
1931{
1932	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1933		return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
1934	return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
1935}
1936
1937/*
1938 * This is intended to be called from inside other kernel functions,
1939 * so we fake some arguments.
1940 */
1941int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
1942{
1943	u64 version = inode->i_version;
1944	ocfs2_dir_foreach_blk(inode, &version, ctx, true);
1945	return 0;
1946}
1947
1948/*
1949 * ocfs2_readdir()
1950 *
1951 */
1952int ocfs2_readdir(struct file *file, struct dir_context *ctx)
1953{
1954	int error = 0;
1955	struct inode *inode = file_inode(file);
 
1956	int lock_level = 0;
1957
1958	trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
1959
1960	error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level);
1961	if (lock_level && error >= 0) {
1962		/* We release EX lock which used to update atime
1963		 * and get PR lock again to reduce contention
1964		 * on commonly accessed directories. */
1965		ocfs2_inode_unlock(inode, 1);
1966		lock_level = 0;
1967		error = ocfs2_inode_lock(inode, NULL, 0);
1968	}
1969	if (error < 0) {
1970		if (error != -ENOENT)
1971			mlog_errno(error);
1972		/* we haven't got any yet, so propagate the error. */
1973		goto bail_nolock;
1974	}
1975
1976	error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
1977
1978	ocfs2_inode_unlock(inode, lock_level);
1979	if (error)
1980		mlog_errno(error);
1981
1982bail_nolock:
1983
1984	return error;
1985}
1986
1987/*
1988 * NOTE: this should always be called with parent dir i_mutex taken.
1989 */
1990int ocfs2_find_files_on_disk(const char *name,
1991			     int namelen,
1992			     u64 *blkno,
1993			     struct inode *inode,
1994			     struct ocfs2_dir_lookup_result *lookup)
1995{
1996	int status = -ENOENT;
1997
1998	trace_ocfs2_find_files_on_disk(namelen, name, blkno,
1999				(unsigned long long)OCFS2_I(inode)->ip_blkno);
2000
2001	status = ocfs2_find_entry(name, namelen, inode, lookup);
2002	if (status)
2003		goto leave;
2004
2005	*blkno = le64_to_cpu(lookup->dl_entry->inode);
2006
2007	status = 0;
2008leave:
2009
2010	return status;
2011}
2012
2013/*
2014 * Convenience function for callers which just want the block number
2015 * mapped to a name and don't require the full dirent info, etc.
2016 */
2017int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
2018			       int namelen, u64 *blkno)
2019{
2020	int ret;
2021	struct ocfs2_dir_lookup_result lookup = { NULL, };
2022
2023	ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
2024	ocfs2_free_dir_lookup_result(&lookup);
2025
2026	return ret;
2027}
2028
2029/* Check for a name within a directory.
2030 *
2031 * Return 0 if the name does not exist
2032 * Return -EEXIST if the directory contains the name
 
2033 *
2034 * Callers should have i_mutex + a cluster lock on dir
2035 */
2036int ocfs2_check_dir_for_entry(struct inode *dir,
2037			      const char *name,
2038			      int namelen)
2039{
2040	int ret = 0;
2041	struct ocfs2_dir_lookup_result lookup = { NULL, };
2042
2043	trace_ocfs2_check_dir_for_entry(
2044		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2045
2046	if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
 
2047		ret = -EEXIST;
2048		mlog_errno(ret);
 
 
2049	}
2050
2051	ocfs2_free_dir_lookup_result(&lookup);
2052
2053	return ret;
2054}
2055
2056struct ocfs2_empty_dir_priv {
2057	struct dir_context ctx;
2058	unsigned seen_dot;
2059	unsigned seen_dot_dot;
2060	unsigned seen_other;
2061	unsigned dx_dir;
2062};
2063static int ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
2064				   int name_len, loff_t pos, u64 ino,
2065				   unsigned type)
2066{
2067	struct ocfs2_empty_dir_priv *p =
2068		container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
2069
2070	/*
2071	 * Check the positions of "." and ".." records to be sure
2072	 * they're in the correct place.
2073	 *
2074	 * Indexed directories don't need to proceed past the first
2075	 * two entries, so we end the scan after seeing '..'. Despite
2076	 * that, we allow the scan to proceed In the event that we
2077	 * have a corrupted indexed directory (no dot or dot dot
2078	 * entries). This allows us to double check for existing
2079	 * entries which might not have been found in the index.
2080	 */
2081	if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
2082		p->seen_dot = 1;
2083		return 0;
2084	}
2085
2086	if (name_len == 2 && !strncmp("..", name, 2) &&
2087	    pos == OCFS2_DIR_REC_LEN(1)) {
2088		p->seen_dot_dot = 1;
2089
2090		if (p->dx_dir && p->seen_dot)
2091			return 1;
2092
2093		return 0;
2094	}
2095
2096	p->seen_other = 1;
2097	return 1;
2098}
2099
2100static int ocfs2_empty_dir_dx(struct inode *inode,
2101			      struct ocfs2_empty_dir_priv *priv)
2102{
2103	int ret;
2104	struct buffer_head *di_bh = NULL;
2105	struct buffer_head *dx_root_bh = NULL;
2106	struct ocfs2_dinode *di;
2107	struct ocfs2_dx_root_block *dx_root;
2108
2109	priv->dx_dir = 1;
2110
2111	ret = ocfs2_read_inode_block(inode, &di_bh);
2112	if (ret) {
2113		mlog_errno(ret);
2114		goto out;
2115	}
2116	di = (struct ocfs2_dinode *)di_bh->b_data;
2117
2118	ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
2119	if (ret) {
2120		mlog_errno(ret);
2121		goto out;
2122	}
2123	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2124
2125	if (le32_to_cpu(dx_root->dr_num_entries) != 2)
2126		priv->seen_other = 1;
2127
2128out:
2129	brelse(di_bh);
2130	brelse(dx_root_bh);
2131	return ret;
2132}
2133
2134/*
2135 * routine to check that the specified directory is empty (for rmdir)
2136 *
2137 * Returns 1 if dir is empty, zero otherwise.
2138 *
2139 * XXX: This is a performance problem for unindexed directories.
2140 */
2141int ocfs2_empty_dir(struct inode *inode)
2142{
2143	int ret;
2144	struct ocfs2_empty_dir_priv priv = {
2145		.ctx.actor = ocfs2_empty_dir_filldir,
2146	};
2147
2148	if (ocfs2_dir_indexed(inode)) {
2149		ret = ocfs2_empty_dir_dx(inode, &priv);
2150		if (ret)
2151			mlog_errno(ret);
2152		/*
2153		 * We still run ocfs2_dir_foreach to get the checks
2154		 * for "." and "..".
2155		 */
2156	}
2157
2158	ret = ocfs2_dir_foreach(inode, &priv.ctx);
2159	if (ret)
2160		mlog_errno(ret);
2161
2162	if (!priv.seen_dot || !priv.seen_dot_dot) {
2163		mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
2164		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
2165		/*
2166		 * XXX: Is it really safe to allow an unlink to continue?
2167		 */
2168		return 1;
2169	}
2170
2171	return !priv.seen_other;
2172}
2173
2174/*
2175 * Fills "." and ".." dirents in a new directory block. Returns dirent for
2176 * "..", which might be used during creation of a directory with a trailing
2177 * header. It is otherwise safe to ignore the return code.
2178 */
2179static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
2180							  struct inode *parent,
2181							  char *start,
2182							  unsigned int size)
2183{
2184	struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
2185
2186	de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
2187	de->name_len = 1;
2188	de->rec_len =
2189		cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
2190	strcpy(de->name, ".");
2191	ocfs2_set_de_type(de, S_IFDIR);
2192
2193	de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
2194	de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
2195	de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
2196	de->name_len = 2;
2197	strcpy(de->name, "..");
2198	ocfs2_set_de_type(de, S_IFDIR);
2199
2200	return de;
2201}
2202
2203/*
2204 * This works together with code in ocfs2_mknod_locked() which sets
2205 * the inline-data flag and initializes the inline-data section.
2206 */
2207static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
2208				 handle_t *handle,
2209				 struct inode *parent,
2210				 struct inode *inode,
2211				 struct buffer_head *di_bh)
2212{
2213	int ret;
2214	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2215	struct ocfs2_inline_data *data = &di->id2.i_data;
2216	unsigned int size = le16_to_cpu(data->id_count);
2217
2218	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2219				      OCFS2_JOURNAL_ACCESS_WRITE);
2220	if (ret) {
2221		mlog_errno(ret);
2222		goto out;
2223	}
2224
2225	ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
2226	ocfs2_journal_dirty(handle, di_bh);
2227
2228	i_size_write(inode, size);
2229	set_nlink(inode, 2);
2230	inode->i_blocks = ocfs2_inode_sector_count(inode);
2231
2232	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2233	if (ret < 0)
2234		mlog_errno(ret);
2235
2236out:
2237	return ret;
2238}
2239
2240static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2241				 handle_t *handle,
2242				 struct inode *parent,
2243				 struct inode *inode,
2244				 struct buffer_head *fe_bh,
2245				 struct ocfs2_alloc_context *data_ac,
2246				 struct buffer_head **ret_new_bh)
2247{
2248	int status;
2249	unsigned int size = osb->sb->s_blocksize;
2250	struct buffer_head *new_bh = NULL;
2251	struct ocfs2_dir_entry *de;
2252
2253	if (ocfs2_new_dir_wants_trailer(inode))
2254		size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2255
2256	status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
2257				     data_ac, NULL, &new_bh);
2258	if (status < 0) {
2259		mlog_errno(status);
2260		goto bail;
2261	}
2262
2263	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2264
2265	status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
2266					 OCFS2_JOURNAL_ACCESS_CREATE);
2267	if (status < 0) {
2268		mlog_errno(status);
2269		goto bail;
2270	}
2271	memset(new_bh->b_data, 0, osb->sb->s_blocksize);
2272
2273	de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
2274	if (ocfs2_new_dir_wants_trailer(inode)) {
2275		int size = le16_to_cpu(de->rec_len);
2276
2277		/*
2278		 * Figure out the size of the hole left over after
2279		 * insertion of '.' and '..'. The trailer wants this
2280		 * information.
2281		 */
2282		size -= OCFS2_DIR_REC_LEN(2);
2283		size -= sizeof(struct ocfs2_dir_block_trailer);
2284
2285		ocfs2_init_dir_trailer(inode, new_bh, size);
2286	}
2287
2288	ocfs2_journal_dirty(handle, new_bh);
2289
2290	i_size_write(inode, inode->i_sb->s_blocksize);
2291	set_nlink(inode, 2);
2292	inode->i_blocks = ocfs2_inode_sector_count(inode);
2293	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
2294	if (status < 0) {
2295		mlog_errno(status);
2296		goto bail;
2297	}
2298
2299	status = 0;
2300	if (ret_new_bh) {
2301		*ret_new_bh = new_bh;
2302		new_bh = NULL;
2303	}
2304bail:
2305	brelse(new_bh);
2306
2307	return status;
2308}
2309
2310static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2311				     handle_t *handle, struct inode *dir,
2312				     struct buffer_head *di_bh,
2313				     struct buffer_head *dirdata_bh,
2314				     struct ocfs2_alloc_context *meta_ac,
2315				     int dx_inline, u32 num_entries,
2316				     struct buffer_head **ret_dx_root_bh)
2317{
2318	int ret;
2319	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2320	u16 dr_suballoc_bit;
2321	u64 suballoc_loc, dr_blkno;
2322	unsigned int num_bits;
2323	struct buffer_head *dx_root_bh = NULL;
2324	struct ocfs2_dx_root_block *dx_root;
2325	struct ocfs2_dir_block_trailer *trailer =
2326		ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
2327
2328	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
2329				   &dr_suballoc_bit, &num_bits, &dr_blkno);
2330	if (ret) {
2331		mlog_errno(ret);
2332		goto out;
2333	}
2334
2335	trace_ocfs2_dx_dir_attach_index(
2336				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2337				(unsigned long long)dr_blkno);
2338
2339	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2340	if (dx_root_bh == NULL) {
2341		ret = -ENOMEM;
2342		goto out;
2343	}
2344	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
2345
2346	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
2347				      OCFS2_JOURNAL_ACCESS_CREATE);
2348	if (ret < 0) {
2349		mlog_errno(ret);
2350		goto out;
2351	}
2352
2353	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2354	memset(dx_root, 0, osb->sb->s_blocksize);
2355	strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2356	dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2357	dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
2358	dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2359	dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2360	dx_root->dr_blkno = cpu_to_le64(dr_blkno);
2361	dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
2362	dx_root->dr_num_entries = cpu_to_le32(num_entries);
2363	if (le16_to_cpu(trailer->db_free_rec_len))
2364		dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
2365	else
2366		dx_root->dr_free_blk = cpu_to_le64(0);
2367
2368	if (dx_inline) {
2369		dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
2370		dx_root->dr_entries.de_count =
2371			cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
2372	} else {
2373		dx_root->dr_list.l_count =
2374			cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
2375	}
2376	ocfs2_journal_dirty(handle, dx_root_bh);
2377
2378	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2379				      OCFS2_JOURNAL_ACCESS_CREATE);
2380	if (ret) {
2381		mlog_errno(ret);
2382		goto out;
2383	}
2384
2385	di->i_dx_root = cpu_to_le64(dr_blkno);
2386
2387	spin_lock(&OCFS2_I(dir)->ip_lock);
2388	OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2389	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2390	spin_unlock(&OCFS2_I(dir)->ip_lock);
2391
2392	ocfs2_journal_dirty(handle, di_bh);
2393
2394	*ret_dx_root_bh = dx_root_bh;
2395	dx_root_bh = NULL;
2396
2397out:
2398	brelse(dx_root_bh);
2399	return ret;
2400}
2401
2402static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2403				       handle_t *handle, struct inode *dir,
2404				       struct buffer_head **dx_leaves,
2405				       int num_dx_leaves, u64 start_blk)
2406{
2407	int ret, i;
2408	struct ocfs2_dx_leaf *dx_leaf;
2409	struct buffer_head *bh;
2410
2411	for (i = 0; i < num_dx_leaves; i++) {
2412		bh = sb_getblk(osb->sb, start_blk + i);
2413		if (bh == NULL) {
2414			ret = -ENOMEM;
2415			goto out;
2416		}
2417		dx_leaves[i] = bh;
2418
2419		ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
2420
2421		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
2422					      OCFS2_JOURNAL_ACCESS_CREATE);
2423		if (ret < 0) {
2424			mlog_errno(ret);
2425			goto out;
2426		}
2427
2428		dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
2429
2430		memset(dx_leaf, 0, osb->sb->s_blocksize);
2431		strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
2432		dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
2433		dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
2434		dx_leaf->dl_list.de_count =
2435			cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2436
2437		trace_ocfs2_dx_dir_format_cluster(
2438				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2439				(unsigned long long)bh->b_blocknr,
2440				le16_to_cpu(dx_leaf->dl_list.de_count));
2441
2442		ocfs2_journal_dirty(handle, bh);
2443	}
2444
2445	ret = 0;
2446out:
2447	return ret;
2448}
2449
2450/*
2451 * Allocates and formats a new cluster for use in an indexed dir
2452 * leaf. This version will not do the extent insert, so that it can be
2453 * used by operations which need careful ordering.
2454 */
2455static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
2456				      u32 cpos, handle_t *handle,
2457				      struct ocfs2_alloc_context *data_ac,
2458				      struct buffer_head **dx_leaves,
2459				      int num_dx_leaves, u64 *ret_phys_blkno)
2460{
2461	int ret;
2462	u32 phys, num;
2463	u64 phys_blkno;
2464	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2465
2466	/*
2467	 * XXX: For create, this should claim cluster for the index
2468	 * *before* the unindexed insert so that we have a better
2469	 * chance of contiguousness as the directory grows in number
2470	 * of entries.
2471	 */
2472	ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
2473	if (ret) {
2474		mlog_errno(ret);
2475		goto out;
2476	}
2477
2478	/*
2479	 * Format the new cluster first. That way, we're inserting
2480	 * valid data.
2481	 */
2482	phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
2483	ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
2484					  num_dx_leaves, phys_blkno);
2485	if (ret) {
2486		mlog_errno(ret);
2487		goto out;
2488	}
2489
2490	*ret_phys_blkno = phys_blkno;
2491out:
2492	return ret;
2493}
2494
2495static int ocfs2_dx_dir_new_cluster(struct inode *dir,
2496				    struct ocfs2_extent_tree *et,
2497				    u32 cpos, handle_t *handle,
2498				    struct ocfs2_alloc_context *data_ac,
2499				    struct ocfs2_alloc_context *meta_ac,
2500				    struct buffer_head **dx_leaves,
2501				    int num_dx_leaves)
2502{
2503	int ret;
2504	u64 phys_blkno;
2505
2506	ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
2507					 num_dx_leaves, &phys_blkno);
2508	if (ret) {
2509		mlog_errno(ret);
2510		goto out;
2511	}
2512
2513	ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
2514				  meta_ac);
2515	if (ret)
2516		mlog_errno(ret);
2517out:
2518	return ret;
2519}
2520
2521static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
2522							int *ret_num_leaves)
2523{
2524	int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
2525	struct buffer_head **dx_leaves;
2526
2527	dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
2528			    GFP_NOFS);
2529	if (dx_leaves && ret_num_leaves)
2530		*ret_num_leaves = num_dx_leaves;
2531
2532	return dx_leaves;
2533}
2534
2535static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
2536				 handle_t *handle,
2537				 struct inode *parent,
2538				 struct inode *inode,
2539				 struct buffer_head *di_bh,
2540				 struct ocfs2_alloc_context *data_ac,
2541				 struct ocfs2_alloc_context *meta_ac)
2542{
2543	int ret;
2544	struct buffer_head *leaf_bh = NULL;
2545	struct buffer_head *dx_root_bh = NULL;
2546	struct ocfs2_dx_hinfo hinfo;
2547	struct ocfs2_dx_root_block *dx_root;
2548	struct ocfs2_dx_entry_list *entry_list;
2549
2550	/*
2551	 * Our strategy is to create the directory as though it were
2552	 * unindexed, then add the index block. This works with very
2553	 * little complication since the state of a new directory is a
2554	 * very well known quantity.
2555	 *
2556	 * Essentially, we have two dirents ("." and ".."), in the 1st
2557	 * block which need indexing. These are easily inserted into
2558	 * the index block.
2559	 */
2560
2561	ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
2562				    data_ac, &leaf_bh);
2563	if (ret) {
2564		mlog_errno(ret);
2565		goto out;
2566	}
2567
2568	ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
2569					meta_ac, 1, 2, &dx_root_bh);
2570	if (ret) {
2571		mlog_errno(ret);
2572		goto out;
2573	}
2574	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2575	entry_list = &dx_root->dr_entries;
2576
2577	/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
2578	ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
2579	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2580
2581	ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
2582	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2583
2584out:
2585	brelse(dx_root_bh);
2586	brelse(leaf_bh);
2587	return ret;
2588}
2589
2590int ocfs2_fill_new_dir(struct ocfs2_super *osb,
2591		       handle_t *handle,
2592		       struct inode *parent,
2593		       struct inode *inode,
2594		       struct buffer_head *fe_bh,
2595		       struct ocfs2_alloc_context *data_ac,
2596		       struct ocfs2_alloc_context *meta_ac)
2597
2598{
2599	BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
2600
2601	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2602		return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
2603
2604	if (ocfs2_supports_indexed_dirs(osb))
2605		return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
2606					     data_ac, meta_ac);
2607
2608	return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
2609				     data_ac, NULL);
2610}
2611
2612static int ocfs2_dx_dir_index_block(struct inode *dir,
2613				    handle_t *handle,
2614				    struct buffer_head **dx_leaves,
2615				    int num_dx_leaves,
2616				    u32 *num_dx_entries,
2617				    struct buffer_head *dirent_bh)
2618{
2619	int ret = 0, namelen, i;
2620	char *de_buf, *limit;
2621	struct ocfs2_dir_entry *de;
2622	struct buffer_head *dx_leaf_bh;
2623	struct ocfs2_dx_hinfo hinfo;
2624	u64 dirent_blk = dirent_bh->b_blocknr;
2625
2626	de_buf = dirent_bh->b_data;
2627	limit = de_buf + dir->i_sb->s_blocksize;
2628
2629	while (de_buf < limit) {
2630		de = (struct ocfs2_dir_entry *)de_buf;
2631
2632		namelen = de->name_len;
2633		if (!namelen || !de->inode)
2634			goto inc;
2635
2636		ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
2637
2638		i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
2639		dx_leaf_bh = dx_leaves[i];
2640
2641		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
2642						 dirent_blk, dx_leaf_bh);
2643		if (ret) {
2644			mlog_errno(ret);
2645			goto out;
2646		}
2647
2648		*num_dx_entries = *num_dx_entries + 1;
2649
2650inc:
2651		de_buf += le16_to_cpu(de->rec_len);
2652	}
2653
2654out:
2655	return ret;
2656}
2657
2658/*
2659 * XXX: This expects dx_root_bh to already be part of the transaction.
2660 */
2661static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2662					 struct buffer_head *dx_root_bh,
2663					 struct buffer_head *dirent_bh)
2664{
2665	char *de_buf, *limit;
2666	struct ocfs2_dx_root_block *dx_root;
2667	struct ocfs2_dir_entry *de;
2668	struct ocfs2_dx_hinfo hinfo;
2669	u64 dirent_blk = dirent_bh->b_blocknr;
2670
2671	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2672
2673	de_buf = dirent_bh->b_data;
2674	limit = de_buf + dir->i_sb->s_blocksize;
2675
2676	while (de_buf < limit) {
2677		de = (struct ocfs2_dir_entry *)de_buf;
2678
2679		if (!de->name_len || !de->inode)
2680			goto inc;
2681
2682		ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2683
2684		trace_ocfs2_dx_dir_index_root_block(
2685				(unsigned long long)dir->i_ino,
2686				hinfo.major_hash, hinfo.minor_hash,
2687				de->name_len, de->name,
2688				le16_to_cpu(dx_root->dr_entries.de_num_used));
2689
2690		ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2691					   dirent_blk);
2692
2693		le32_add_cpu(&dx_root->dr_num_entries, 1);
2694inc:
2695		de_buf += le16_to_cpu(de->rec_len);
2696	}
2697}
2698
2699/*
2700 * Count the number of inline directory entries in di_bh and compare
2701 * them against the number of entries we can hold in an inline dx root
2702 * block.
2703 */
2704static int ocfs2_new_dx_should_be_inline(struct inode *dir,
2705					 struct buffer_head *di_bh)
2706{
2707	int dirent_count = 0;
2708	char *de_buf, *limit;
2709	struct ocfs2_dir_entry *de;
2710	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2711
2712	de_buf = di->id2.i_data.id_data;
2713	limit = de_buf + i_size_read(dir);
2714
2715	while (de_buf < limit) {
2716		de = (struct ocfs2_dir_entry *)de_buf;
2717
2718		if (de->name_len && de->inode)
2719			dirent_count++;
2720
2721		de_buf += le16_to_cpu(de->rec_len);
2722	}
2723
2724	/* We are careful to leave room for one extra record. */
2725	return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
2726}
2727
2728/*
2729 * Expand rec_len of the rightmost dirent in a directory block so that it
2730 * contains the end of our valid space for dirents. We do this during
2731 * expansion from an inline directory to one with extents. The first dir block
2732 * in that case is taken from the inline data portion of the inode block.
2733 *
2734 * This will also return the largest amount of contiguous space for a dirent
2735 * in the block. That value is *not* necessarily the last dirent, even after
2736 * expansion. The directory indexing code wants this value for free space
2737 * accounting. We do this here since we're already walking the entire dir
2738 * block.
2739 *
2740 * We add the dir trailer if this filesystem wants it.
2741 */
2742static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
2743					     struct inode *dir)
2744{
2745	struct super_block *sb = dir->i_sb;
2746	struct ocfs2_dir_entry *de;
2747	struct ocfs2_dir_entry *prev_de;
2748	char *de_buf, *limit;
2749	unsigned int new_size = sb->s_blocksize;
2750	unsigned int bytes, this_hole;
2751	unsigned int largest_hole = 0;
2752
2753	if (ocfs2_new_dir_wants_trailer(dir))
2754		new_size = ocfs2_dir_trailer_blk_off(sb);
2755
2756	bytes = new_size - old_size;
2757
2758	limit = start + old_size;
2759	de_buf = start;
2760	de = (struct ocfs2_dir_entry *)de_buf;
2761	do {
2762		this_hole = ocfs2_figure_dirent_hole(de);
2763		if (this_hole > largest_hole)
2764			largest_hole = this_hole;
2765
2766		prev_de = de;
2767		de_buf += le16_to_cpu(de->rec_len);
2768		de = (struct ocfs2_dir_entry *)de_buf;
2769	} while (de_buf < limit);
2770
2771	le16_add_cpu(&prev_de->rec_len, bytes);
2772
2773	/* We need to double check this after modification of the final
2774	 * dirent. */
2775	this_hole = ocfs2_figure_dirent_hole(prev_de);
2776	if (this_hole > largest_hole)
2777		largest_hole = this_hole;
2778
2779	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
2780		return largest_hole;
2781	return 0;
2782}
2783
2784/*
2785 * We allocate enough clusters to fulfill "blocks_wanted", but set
2786 * i_size to exactly one block. Ocfs2_extend_dir() will handle the
2787 * rest automatically for us.
2788 *
2789 * *first_block_bh is a pointer to the 1st data block allocated to the
2790 *  directory.
2791 */
2792static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2793				   unsigned int blocks_wanted,
2794				   struct ocfs2_dir_lookup_result *lookup,
2795				   struct buffer_head **first_block_bh)
2796{
2797	u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
2798	struct super_block *sb = dir->i_sb;
2799	int ret, i, num_dx_leaves = 0, dx_inline = 0,
2800		credits = ocfs2_inline_to_extents_credits(sb);
2801	u64 dx_insert_blkno, blkno,
2802		bytes = blocks_wanted << sb->s_blocksize_bits;
2803	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2804	struct ocfs2_inode_info *oi = OCFS2_I(dir);
2805	struct ocfs2_alloc_context *data_ac = NULL;
2806	struct ocfs2_alloc_context *meta_ac = NULL;
2807	struct buffer_head *dirdata_bh = NULL;
2808	struct buffer_head *dx_root_bh = NULL;
2809	struct buffer_head **dx_leaves = NULL;
2810	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2811	handle_t *handle;
2812	struct ocfs2_extent_tree et;
2813	struct ocfs2_extent_tree dx_et;
2814	int did_quota = 0, bytes_allocated = 0;
2815
2816	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
2817
2818	alloc = ocfs2_clusters_for_bytes(sb, bytes);
2819	dx_alloc = 0;
2820
2821	down_write(&oi->ip_alloc_sem);
2822
2823	if (ocfs2_supports_indexed_dirs(osb)) {
2824		credits += ocfs2_add_dir_index_credits(sb);
2825
2826		dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
2827		if (!dx_inline) {
2828			/* Add one more cluster for an index leaf */
2829			dx_alloc++;
2830			dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
2831								&num_dx_leaves);
2832			if (!dx_leaves) {
2833				ret = -ENOMEM;
2834				mlog_errno(ret);
2835				goto out;
2836			}
2837		}
2838
2839		/* This gets us the dx_root */
2840		ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
2841		if (ret) {
2842			mlog_errno(ret);
2843			goto out;
2844		}
2845	}
2846
2847	/*
2848	 * We should never need more than 2 clusters for the unindexed
2849	 * tree - maximum dirent size is far less than one block. In
2850	 * fact, the only time we'd need more than one cluster is if
2851	 * blocksize == clustersize and the dirent won't fit in the
2852	 * extra space that the expansion to a single block gives. As
2853	 * of today, that only happens on 4k/4k file systems.
2854	 */
2855	BUG_ON(alloc > 2);
2856
2857	ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
2858	if (ret) {
2859		mlog_errno(ret);
2860		goto out;
2861	}
2862
2863	/*
2864	 * Prepare for worst case allocation scenario of two separate
2865	 * extents in the unindexed tree.
2866	 */
2867	if (alloc == 2)
2868		credits += OCFS2_SUBALLOC_ALLOC;
2869
2870	handle = ocfs2_start_trans(osb, credits);
2871	if (IS_ERR(handle)) {
2872		ret = PTR_ERR(handle);
2873		mlog_errno(ret);
2874		goto out;
2875	}
2876
2877	ret = dquot_alloc_space_nodirty(dir,
2878		ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
2879	if (ret)
2880		goto out_commit;
2881	did_quota = 1;
2882
2883	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2884		/*
2885		 * Allocate our index cluster first, to maximize the
2886		 * possibility that unindexed leaves grow
2887		 * contiguously.
2888		 */
2889		ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
2890						 dx_leaves, num_dx_leaves,
2891						 &dx_insert_blkno);
2892		if (ret) {
2893			mlog_errno(ret);
2894			goto out_commit;
2895		}
2896		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2897	}
2898
2899	/*
2900	 * Try to claim as many clusters as the bitmap can give though
2901	 * if we only get one now, that's enough to continue. The rest
2902	 * will be claimed after the conversion to extents.
2903	 */
2904	if (ocfs2_dir_resv_allowed(osb))
2905		data_ac->ac_resv = &oi->ip_la_data_resv;
2906	ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
2907	if (ret) {
2908		mlog_errno(ret);
2909		goto out_commit;
2910	}
2911	bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2912
2913	/*
2914	 * Operations are carefully ordered so that we set up the new
2915	 * data block first. The conversion from inline data to
2916	 * extents follows.
2917	 */
2918	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2919	dirdata_bh = sb_getblk(sb, blkno);
2920	if (!dirdata_bh) {
2921		ret = -ENOMEM;
2922		mlog_errno(ret);
2923		goto out_commit;
2924	}
2925
2926	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
2927
2928	ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
2929				      OCFS2_JOURNAL_ACCESS_CREATE);
2930	if (ret) {
2931		mlog_errno(ret);
2932		goto out_commit;
2933	}
2934
2935	memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
2936	memset(dirdata_bh->b_data + i_size_read(dir), 0,
2937	       sb->s_blocksize - i_size_read(dir));
2938	i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
2939	if (ocfs2_new_dir_wants_trailer(dir)) {
2940		/*
2941		 * Prepare the dir trailer up front. It will otherwise look
2942		 * like a valid dirent. Even if inserting the index fails
2943		 * (unlikely), then all we'll have done is given first dir
2944		 * block a small amount of fragmentation.
2945		 */
2946		ocfs2_init_dir_trailer(dir, dirdata_bh, i);
2947	}
2948
2949	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2950	ocfs2_journal_dirty(handle, dirdata_bh);
2951
2952	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2953		/*
2954		 * Dx dirs with an external cluster need to do this up
2955		 * front. Inline dx root's get handled later, after
2956		 * we've allocated our root block. We get passed back
2957		 * a total number of items so that dr_num_entries can
2958		 * be correctly set once the dx_root has been
2959		 * allocated.
2960		 */
2961		ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
2962					       num_dx_leaves, &num_dx_entries,
2963					       dirdata_bh);
2964		if (ret) {
2965			mlog_errno(ret);
2966			goto out_commit;
2967		}
2968	}
2969
2970	/*
2971	 * Set extent, i_size, etc on the directory. After this, the
2972	 * inode should contain the same exact dirents as before and
2973	 * be fully accessible from system calls.
2974	 *
2975	 * We let the later dirent insert modify c/mtime - to the user
2976	 * the data hasn't changed.
2977	 */
2978	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2979				      OCFS2_JOURNAL_ACCESS_CREATE);
2980	if (ret) {
2981		mlog_errno(ret);
2982		goto out_commit;
2983	}
2984
2985	spin_lock(&oi->ip_lock);
2986	oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
2987	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2988	spin_unlock(&oi->ip_lock);
2989
2990	ocfs2_dinode_new_extent_list(dir, di);
2991
2992	i_size_write(dir, sb->s_blocksize);
2993	dir->i_mtime = dir->i_ctime = current_time(dir);
2994
2995	di->i_size = cpu_to_le64(sb->s_blocksize);
2996	di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
2997	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
2998	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2999
3000	/*
3001	 * This should never fail as our extent list is empty and all
3002	 * related blocks have been journaled already.
3003	 */
3004	ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
3005				  0, NULL);
3006	if (ret) {
3007		mlog_errno(ret);
3008		goto out_commit;
3009	}
3010
3011	/*
3012	 * Set i_blocks after the extent insert for the most up to
3013	 * date ip_clusters value.
3014	 */
3015	dir->i_blocks = ocfs2_inode_sector_count(dir);
3016
3017	ocfs2_journal_dirty(handle, di_bh);
3018
3019	if (ocfs2_supports_indexed_dirs(osb)) {
3020		ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
3021						dirdata_bh, meta_ac, dx_inline,
3022						num_dx_entries, &dx_root_bh);
3023		if (ret) {
3024			mlog_errno(ret);
3025			goto out_commit;
3026		}
3027
3028		if (dx_inline) {
3029			ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
3030						      dirdata_bh);
3031		} else {
3032			ocfs2_init_dx_root_extent_tree(&dx_et,
3033						       INODE_CACHE(dir),
3034						       dx_root_bh);
3035			ret = ocfs2_insert_extent(handle, &dx_et, 0,
3036						  dx_insert_blkno, 1, 0, NULL);
3037			if (ret)
3038				mlog_errno(ret);
3039		}
3040	}
3041
3042	/*
3043	 * We asked for two clusters, but only got one in the 1st
3044	 * pass. Claim the 2nd cluster as a separate extent.
3045	 */
3046	if (alloc > len) {
3047		ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
3048					   &len);
3049		if (ret) {
3050			mlog_errno(ret);
3051			goto out_commit;
3052		}
3053		blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
3054
3055		ret = ocfs2_insert_extent(handle, &et, 1,
3056					  blkno, len, 0, NULL);
3057		if (ret) {
3058			mlog_errno(ret);
3059			goto out_commit;
3060		}
3061		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
3062	}
3063
3064	*first_block_bh = dirdata_bh;
3065	dirdata_bh = NULL;
3066	if (ocfs2_supports_indexed_dirs(osb)) {
3067		unsigned int off;
3068
3069		if (!dx_inline) {
3070			/*
3071			 * We need to return the correct block within the
3072			 * cluster which should hold our entry.
3073			 */
3074			off = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb),
3075						    &lookup->dl_hinfo);
3076			get_bh(dx_leaves[off]);
3077			lookup->dl_dx_leaf_bh = dx_leaves[off];
3078		}
3079		lookup->dl_dx_root_bh = dx_root_bh;
3080		dx_root_bh = NULL;
3081	}
3082
3083out_commit:
3084	if (ret < 0 && did_quota)
3085		dquot_free_space_nodirty(dir, bytes_allocated);
3086
3087	ocfs2_commit_trans(osb, handle);
3088
3089out:
3090	up_write(&oi->ip_alloc_sem);
3091	if (data_ac)
3092		ocfs2_free_alloc_context(data_ac);
3093	if (meta_ac)
3094		ocfs2_free_alloc_context(meta_ac);
3095
3096	if (dx_leaves) {
3097		for (i = 0; i < num_dx_leaves; i++)
3098			brelse(dx_leaves[i]);
3099		kfree(dx_leaves);
3100	}
3101
3102	brelse(dirdata_bh);
3103	brelse(dx_root_bh);
3104
3105	return ret;
3106}
3107
3108/* returns a bh of the 1st new block in the allocation. */
3109static int ocfs2_do_extend_dir(struct super_block *sb,
3110			       handle_t *handle,
3111			       struct inode *dir,
3112			       struct buffer_head *parent_fe_bh,
3113			       struct ocfs2_alloc_context *data_ac,
3114			       struct ocfs2_alloc_context *meta_ac,
3115			       struct buffer_head **new_bh)
3116{
3117	int status;
3118	int extend, did_quota = 0;
3119	u64 p_blkno, v_blkno;
3120
3121	spin_lock(&OCFS2_I(dir)->ip_lock);
3122	extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
3123	spin_unlock(&OCFS2_I(dir)->ip_lock);
3124
3125	if (extend) {
3126		u32 offset = OCFS2_I(dir)->ip_clusters;
3127
3128		status = dquot_alloc_space_nodirty(dir,
3129					ocfs2_clusters_to_bytes(sb, 1));
3130		if (status)
3131			goto bail;
3132		did_quota = 1;
3133
3134		status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
3135					      1, 0, parent_fe_bh, handle,
3136					      data_ac, meta_ac, NULL);
3137		BUG_ON(status == -EAGAIN);
3138		if (status < 0) {
3139			mlog_errno(status);
3140			goto bail;
3141		}
3142	}
3143
3144	v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
3145	status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
3146	if (status < 0) {
3147		mlog_errno(status);
3148		goto bail;
3149	}
3150
3151	*new_bh = sb_getblk(sb, p_blkno);
3152	if (!*new_bh) {
3153		status = -ENOMEM;
3154		mlog_errno(status);
3155		goto bail;
3156	}
3157	status = 0;
3158bail:
3159	if (did_quota && status < 0)
3160		dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3161	return status;
3162}
3163
3164/*
3165 * Assumes you already have a cluster lock on the directory.
3166 *
3167 * 'blocks_wanted' is only used if we have an inline directory which
3168 * is to be turned into an extent based one. The size of the dirent to
3169 * insert might be larger than the space gained by growing to just one
3170 * block, so we may have to grow the inode by two blocks in that case.
3171 *
3172 * If the directory is already indexed, dx_root_bh must be provided.
3173 */
3174static int ocfs2_extend_dir(struct ocfs2_super *osb,
3175			    struct inode *dir,
3176			    struct buffer_head *parent_fe_bh,
3177			    unsigned int blocks_wanted,
3178			    struct ocfs2_dir_lookup_result *lookup,
3179			    struct buffer_head **new_de_bh)
3180{
3181	int status = 0;
3182	int credits, num_free_extents, drop_alloc_sem = 0;
3183	loff_t dir_i_size;
3184	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
3185	struct ocfs2_extent_list *el = &fe->id2.i_list;
3186	struct ocfs2_alloc_context *data_ac = NULL;
3187	struct ocfs2_alloc_context *meta_ac = NULL;
3188	handle_t *handle = NULL;
3189	struct buffer_head *new_bh = NULL;
3190	struct ocfs2_dir_entry * de;
3191	struct super_block *sb = osb->sb;
3192	struct ocfs2_extent_tree et;
3193	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3194
3195	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3196		/*
3197		 * This would be a code error as an inline directory should
3198		 * never have an index root.
3199		 */
3200		BUG_ON(dx_root_bh);
3201
3202		status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
3203						 blocks_wanted, lookup,
3204						 &new_bh);
3205		if (status) {
3206			mlog_errno(status);
3207			goto bail;
3208		}
3209
3210		/* Expansion from inline to an indexed directory will
3211		 * have given us this. */
3212		dx_root_bh = lookup->dl_dx_root_bh;
3213
3214		if (blocks_wanted == 1) {
3215			/*
3216			 * If the new dirent will fit inside the space
3217			 * created by pushing out to one block, then
3218			 * we can complete the operation
3219			 * here. Otherwise we have to expand i_size
3220			 * and format the 2nd block below.
3221			 */
3222			BUG_ON(new_bh == NULL);
3223			goto bail_bh;
3224		}
3225
3226		/*
3227		 * Get rid of 'new_bh' - we want to format the 2nd
3228		 * data block and return that instead.
3229		 */
3230		brelse(new_bh);
3231		new_bh = NULL;
3232
3233		down_write(&OCFS2_I(dir)->ip_alloc_sem);
3234		drop_alloc_sem = 1;
3235		dir_i_size = i_size_read(dir);
3236		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3237		goto do_extend;
3238	}
3239
3240	down_write(&OCFS2_I(dir)->ip_alloc_sem);
3241	drop_alloc_sem = 1;
3242	dir_i_size = i_size_read(dir);
3243	trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3244			       dir_i_size);
3245
3246	/* dir->i_size is always block aligned. */
3247	spin_lock(&OCFS2_I(dir)->ip_lock);
3248	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
3249		spin_unlock(&OCFS2_I(dir)->ip_lock);
3250		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
3251					      parent_fe_bh);
3252		num_free_extents = ocfs2_num_free_extents(osb, &et);
3253		if (num_free_extents < 0) {
3254			status = num_free_extents;
3255			mlog_errno(status);
3256			goto bail;
3257		}
3258
3259		if (!num_free_extents) {
3260			status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
3261			if (status < 0) {
3262				if (status != -ENOSPC)
3263					mlog_errno(status);
3264				goto bail;
3265			}
3266		}
3267
3268		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
3269		if (status < 0) {
3270			if (status != -ENOSPC)
3271				mlog_errno(status);
3272			goto bail;
3273		}
3274
3275		if (ocfs2_dir_resv_allowed(osb))
3276			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3277
3278		credits = ocfs2_calc_extend_credits(sb, el);
3279	} else {
3280		spin_unlock(&OCFS2_I(dir)->ip_lock);
3281		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3282	}
3283
3284do_extend:
3285	if (ocfs2_dir_indexed(dir))
3286		credits++; /* For attaching the new dirent block to the
3287			    * dx_root */
3288
3289	handle = ocfs2_start_trans(osb, credits);
3290	if (IS_ERR(handle)) {
3291		status = PTR_ERR(handle);
3292		handle = NULL;
3293		mlog_errno(status);
3294		goto bail;
3295	}
3296
3297	status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
3298				     data_ac, meta_ac, &new_bh);
3299	if (status < 0) {
3300		mlog_errno(status);
3301		goto bail;
3302	}
3303
3304	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
3305
3306	status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
3307					 OCFS2_JOURNAL_ACCESS_CREATE);
3308	if (status < 0) {
3309		mlog_errno(status);
3310		goto bail;
3311	}
3312	memset(new_bh->b_data, 0, sb->s_blocksize);
3313
3314	de = (struct ocfs2_dir_entry *) new_bh->b_data;
3315	de->inode = 0;
3316	if (ocfs2_supports_dir_trailer(dir)) {
3317		de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
3318
3319		ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
3320
3321		if (ocfs2_dir_indexed(dir)) {
3322			status = ocfs2_dx_dir_link_trailer(dir, handle,
3323							   dx_root_bh, new_bh);
3324			if (status) {
3325				mlog_errno(status);
3326				goto bail;
3327			}
3328		}
3329	} else {
3330		de->rec_len = cpu_to_le16(sb->s_blocksize);
3331	}
3332	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3333	ocfs2_journal_dirty(handle, new_bh);
3334
3335	dir_i_size += dir->i_sb->s_blocksize;
3336	i_size_write(dir, dir_i_size);
3337	dir->i_blocks = ocfs2_inode_sector_count(dir);
3338	status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
3339	if (status < 0) {
3340		mlog_errno(status);
3341		goto bail;
3342	}
3343
3344bail_bh:
3345	*new_de_bh = new_bh;
3346	get_bh(*new_de_bh);
3347bail:
3348	if (handle)
3349		ocfs2_commit_trans(osb, handle);
3350	if (drop_alloc_sem)
3351		up_write(&OCFS2_I(dir)->ip_alloc_sem);
3352
3353	if (data_ac)
3354		ocfs2_free_alloc_context(data_ac);
3355	if (meta_ac)
3356		ocfs2_free_alloc_context(meta_ac);
3357
3358	brelse(new_bh);
3359
3360	return status;
3361}
3362
3363static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
3364				   const char *name, int namelen,
3365				   struct buffer_head **ret_de_bh,
3366				   unsigned int *blocks_wanted)
3367{
3368	int ret;
3369	struct super_block *sb = dir->i_sb;
3370	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3371	struct ocfs2_dir_entry *de, *last_de = NULL;
3372	char *de_buf, *limit;
3373	unsigned long offset = 0;
3374	unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize;
3375
3376	/*
3377	 * This calculates how many free bytes we'd have in block zero, should
3378	 * this function force expansion to an extent tree.
3379	 */
3380	if (ocfs2_new_dir_wants_trailer(dir))
3381		free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
3382	else
3383		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
3384
3385	de_buf = di->id2.i_data.id_data;
 
3386	limit = de_buf + i_size_read(dir);
3387	rec_len = OCFS2_DIR_REC_LEN(namelen);
3388
3389	while (de_buf < limit) {
3390		de = (struct ocfs2_dir_entry *)de_buf;
3391
3392		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
 
3393			ret = -ENOENT;
3394			goto out;
3395		}
3396		if (ocfs2_match(namelen, name, de)) {
3397			ret = -EEXIST;
3398			goto out;
3399		}
3400		/*
3401		 * No need to check for a trailing dirent record here as
3402		 * they're not used for inline dirs.
3403		 */
3404
3405		if (ocfs2_dirent_would_fit(de, rec_len)) {
3406			/* Ok, we found a spot. Return this bh and let
3407			 * the caller actually fill it in. */
3408			*ret_de_bh = di_bh;
3409			get_bh(*ret_de_bh);
3410			ret = 0;
3411			goto out;
3412		}
3413
3414		last_de = de;
3415		de_buf += le16_to_cpu(de->rec_len);
3416		offset += le16_to_cpu(de->rec_len);
3417	}
3418
3419	/*
3420	 * We're going to require expansion of the directory - figure
3421	 * out how many blocks we'll need so that a place for the
3422	 * dirent can be found.
3423	 */
3424	*blocks_wanted = 1;
3425	new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
3426	if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
3427		*blocks_wanted = 2;
3428
3429	ret = -ENOSPC;
3430out:
3431	return ret;
3432}
3433
3434static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
3435				   int namelen, struct buffer_head **ret_de_bh)
3436{
3437	unsigned long offset;
3438	struct buffer_head *bh = NULL;
3439	unsigned short rec_len;
3440	struct ocfs2_dir_entry *de;
3441	struct super_block *sb = dir->i_sb;
3442	int status;
3443	int blocksize = dir->i_sb->s_blocksize;
3444
3445	status = ocfs2_read_dir_block(dir, 0, &bh, 0);
3446	if (status)
3447		goto bail;
3448
3449	rec_len = OCFS2_DIR_REC_LEN(namelen);
3450	offset = 0;
3451	de = (struct ocfs2_dir_entry *) bh->b_data;
3452	while (1) {
3453		if ((char *)de >= sb->s_blocksize + bh->b_data) {
3454			brelse(bh);
3455			bh = NULL;
3456
3457			if (i_size_read(dir) <= offset) {
3458				/*
3459				 * Caller will have to expand this
3460				 * directory.
3461				 */
3462				status = -ENOSPC;
3463				goto bail;
3464			}
3465			status = ocfs2_read_dir_block(dir,
3466					     offset >> sb->s_blocksize_bits,
3467					     &bh, 0);
3468			if (status)
3469				goto bail;
3470
3471			/* move to next block */
3472			de = (struct ocfs2_dir_entry *) bh->b_data;
3473		}
3474		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
 
3475			status = -ENOENT;
3476			goto bail;
3477		}
3478		if (ocfs2_match(namelen, name, de)) {
3479			status = -EEXIST;
3480			goto bail;
3481		}
3482
3483		if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
3484					   blocksize))
3485			goto next;
3486
3487		if (ocfs2_dirent_would_fit(de, rec_len)) {
3488			/* Ok, we found a spot. Return this bh and let
3489			 * the caller actually fill it in. */
3490			*ret_de_bh = bh;
3491			get_bh(*ret_de_bh);
3492			status = 0;
3493			goto bail;
3494		}
3495next:
3496		offset += le16_to_cpu(de->rec_len);
3497		de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
3498	}
3499
3500bail:
3501	brelse(bh);
3502	if (status)
3503		mlog_errno(status);
3504
3505	return status;
3506}
3507
3508static int dx_leaf_sort_cmp(const void *a, const void *b)
3509{
3510	const struct ocfs2_dx_entry *entry1 = a;
3511	const struct ocfs2_dx_entry *entry2 = b;
3512	u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
3513	u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
3514	u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
3515	u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
3516
3517	if (major_hash1 > major_hash2)
3518		return 1;
3519	if (major_hash1 < major_hash2)
3520		return -1;
3521
3522	/*
3523	 * It is not strictly necessary to sort by minor
3524	 */
3525	if (minor_hash1 > minor_hash2)
3526		return 1;
3527	if (minor_hash1 < minor_hash2)
3528		return -1;
3529	return 0;
3530}
3531
3532static void dx_leaf_sort_swap(void *a, void *b, int size)
3533{
3534	struct ocfs2_dx_entry *entry1 = a;
3535	struct ocfs2_dx_entry *entry2 = b;
3536
3537	BUG_ON(size != sizeof(*entry1));
3538
3539	swap(*entry1, *entry2);
3540}
3541
3542static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
3543{
3544	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3545	int i, num = le16_to_cpu(dl_list->de_num_used);
3546
3547	for (i = 0; i < (num - 1); i++) {
3548		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
3549		    le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
3550			return 0;
3551	}
3552
3553	return 1;
3554}
3555
3556/*
3557 * Find the optimal value to split this leaf on. This expects the leaf
3558 * entries to be in sorted order.
3559 *
3560 * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
3561 * the hash we want to insert.
3562 *
3563 * This function is only concerned with the major hash - that which
3564 * determines which cluster an item belongs to.
3565 */
3566static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
3567					u32 leaf_cpos, u32 insert_hash,
3568					u32 *split_hash)
3569{
3570	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3571	int i, num_used = le16_to_cpu(dl_list->de_num_used);
3572	int allsame;
3573
3574	/*
3575	 * There's a couple rare, but nasty corner cases we have to
3576	 * check for here. All of them involve a leaf where all value
3577	 * have the same hash, which is what we look for first.
3578	 *
3579	 * Most of the time, all of the above is false, and we simply
3580	 * pick the median value for a split.
3581	 */
3582	allsame = ocfs2_dx_leaf_same_major(dx_leaf);
3583	if (allsame) {
3584		u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
3585
3586		if (val == insert_hash) {
3587			/*
3588			 * No matter where we would choose to split,
3589			 * the new entry would want to occupy the same
3590			 * block as these. Since there's no space left
3591			 * in their existing block, we know there
3592			 * won't be space after the split.
3593			 */
3594			return -ENOSPC;
3595		}
3596
3597		if (val == leaf_cpos) {
3598			/*
3599			 * Because val is the same as leaf_cpos (which
3600			 * is the smallest value this leaf can have),
3601			 * yet is not equal to insert_hash, then we
3602			 * know that insert_hash *must* be larger than
3603			 * val (and leaf_cpos). At least cpos+1 in value.
3604			 *
3605			 * We also know then, that there cannot be an
3606			 * adjacent extent (otherwise we'd be looking
3607			 * at it). Choosing this value gives us a
3608			 * chance to get some contiguousness.
3609			 */
3610			*split_hash = leaf_cpos + 1;
3611			return 0;
3612		}
3613
3614		if (val > insert_hash) {
3615			/*
3616			 * val can not be the same as insert hash, and
3617			 * also must be larger than leaf_cpos. Also,
3618			 * we know that there can't be a leaf between
3619			 * cpos and val, otherwise the entries with
3620			 * hash 'val' would be there.
3621			 */
3622			*split_hash = val;
3623			return 0;
3624		}
3625
3626		*split_hash = insert_hash;
3627		return 0;
3628	}
3629
3630	/*
3631	 * Since the records are sorted and the checks above
3632	 * guaranteed that not all records in this block are the same,
3633	 * we simple travel forward, from the median, and pick the 1st
3634	 * record whose value is larger than leaf_cpos.
3635	 */
3636	for (i = (num_used / 2); i < num_used; i++)
3637		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
3638		    leaf_cpos)
3639			break;
3640
3641	BUG_ON(i == num_used); /* Should be impossible */
3642	*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
3643	return 0;
3644}
3645
3646/*
3647 * Transfer all entries in orig_dx_leaves whose major hash is equal to or
3648 * larger than split_hash into new_dx_leaves. We use a temporary
3649 * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
3650 *
3651 * Since the block offset inside a leaf (cluster) is a constant mask
3652 * of minor_hash, we can optimize - an item at block offset X within
3653 * the original cluster, will be at offset X within the new cluster.
3654 */
3655static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3656				       handle_t *handle,
3657				       struct ocfs2_dx_leaf *tmp_dx_leaf,
3658				       struct buffer_head **orig_dx_leaves,
3659				       struct buffer_head **new_dx_leaves,
3660				       int num_dx_leaves)
3661{
3662	int i, j, num_used;
3663	u32 major_hash;
3664	struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
3665	struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list;
3666	struct ocfs2_dx_entry *dx_entry;
3667
3668	tmp_list = &tmp_dx_leaf->dl_list;
3669
3670	for (i = 0; i < num_dx_leaves; i++) {
3671		orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
3672		orig_list = &orig_dx_leaf->dl_list;
3673		new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
3674		new_list = &new_dx_leaf->dl_list;
3675
3676		num_used = le16_to_cpu(orig_list->de_num_used);
3677
3678		memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
3679		tmp_list->de_num_used = cpu_to_le16(0);
3680		memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
3681
3682		for (j = 0; j < num_used; j++) {
3683			dx_entry = &orig_list->de_entries[j];
3684			major_hash = le32_to_cpu(dx_entry->dx_major_hash);
3685			if (major_hash >= split_hash)
3686				ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
3687							      dx_entry);
3688			else
3689				ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
3690							      dx_entry);
3691		}
3692		memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
3693
3694		ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
3695		ocfs2_journal_dirty(handle, new_dx_leaves[i]);
3696	}
3697}
3698
3699static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3700					  struct ocfs2_dx_root_block *dx_root)
3701{
3702	int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3703
3704	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3705	credits += ocfs2_quota_trans_credits(osb->sb);
3706	return credits;
3707}
3708
3709/*
3710 * Find the median value in dx_leaf_bh and allocate a new leaf to move
3711 * half our entries into.
3712 */
3713static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3714				  struct buffer_head *dx_root_bh,
3715				  struct buffer_head *dx_leaf_bh,
3716				  struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
3717				  u64 leaf_blkno)
3718{
3719	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3720	int credits, ret, i, num_used, did_quota = 0;
3721	u32 cpos, split_hash, insert_hash = hinfo->major_hash;
3722	u64 orig_leaves_start;
3723	int num_dx_leaves;
3724	struct buffer_head **orig_dx_leaves = NULL;
3725	struct buffer_head **new_dx_leaves = NULL;
3726	struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
3727	struct ocfs2_extent_tree et;
3728	handle_t *handle = NULL;
3729	struct ocfs2_dx_root_block *dx_root;
3730	struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3731
3732	trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3733				     (unsigned long long)leaf_blkno,
3734				     insert_hash);
3735
3736	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3737
3738	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3739	/*
3740	 * XXX: This is a rather large limit. We should use a more
3741	 * realistic value.
3742	 */
3743	if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
3744		return -ENOSPC;
3745
3746	num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
3747	if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
3748		mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
3749		     "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
3750		     (unsigned long long)leaf_blkno, num_used);
3751		ret = -EIO;
3752		goto out;
3753	}
3754
3755	orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
3756	if (!orig_dx_leaves) {
3757		ret = -ENOMEM;
3758		mlog_errno(ret);
3759		goto out;
3760	}
3761
3762	new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
3763	if (!new_dx_leaves) {
3764		ret = -ENOMEM;
3765		mlog_errno(ret);
3766		goto out;
3767	}
3768
3769	ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
3770	if (ret) {
3771		if (ret != -ENOSPC)
3772			mlog_errno(ret);
3773		goto out;
3774	}
3775
3776	credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
3777	handle = ocfs2_start_trans(osb, credits);
3778	if (IS_ERR(handle)) {
3779		ret = PTR_ERR(handle);
3780		handle = NULL;
3781		mlog_errno(ret);
3782		goto out;
3783	}
3784
3785	ret = dquot_alloc_space_nodirty(dir,
3786				       ocfs2_clusters_to_bytes(dir->i_sb, 1));
3787	if (ret)
3788		goto out_commit;
3789	did_quota = 1;
3790
3791	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
3792				      OCFS2_JOURNAL_ACCESS_WRITE);
3793	if (ret) {
3794		mlog_errno(ret);
3795		goto out_commit;
3796	}
3797
3798	/*
3799	 * This block is changing anyway, so we can sort it in place.
3800	 */
3801	sort(dx_leaf->dl_list.de_entries, num_used,
3802	     sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
3803	     dx_leaf_sort_swap);
3804
3805	ocfs2_journal_dirty(handle, dx_leaf_bh);
3806
3807	ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
3808					   &split_hash);
3809	if (ret) {
3810		mlog_errno(ret);
3811		goto  out_commit;
3812	}
3813
3814	trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3815
3816	/*
3817	 * We have to carefully order operations here. There are items
3818	 * which want to be in the new cluster before insert, but in
3819	 * order to put those items in the new cluster, we alter the
3820	 * old cluster. A failure to insert gets nasty.
3821	 *
3822	 * So, start by reserving writes to the old
3823	 * cluster. ocfs2_dx_dir_new_cluster will reserve writes on
3824	 * the new cluster for us, before inserting it. The insert
3825	 * won't happen if there's an error before that. Once the
3826	 * insert is done then, we can transfer from one leaf into the
3827	 * other without fear of hitting any error.
3828	 */
3829
3830	/*
3831	 * The leaf transfer wants some scratch space so that we don't
3832	 * wind up doing a bunch of expensive memmove().
3833	 */
3834	tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
3835	if (!tmp_dx_leaf) {
3836		ret = -ENOMEM;
3837		mlog_errno(ret);
3838		goto out_commit;
3839	}
3840
3841	orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
3842	ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
3843				   orig_dx_leaves);
3844	if (ret) {
3845		mlog_errno(ret);
3846		goto out_commit;
3847	}
3848
3849	cpos = split_hash;
3850	ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3851				       data_ac, meta_ac, new_dx_leaves,
3852				       num_dx_leaves);
3853	if (ret) {
3854		mlog_errno(ret);
3855		goto out_commit;
3856	}
3857
3858	for (i = 0; i < num_dx_leaves; i++) {
3859		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3860					      orig_dx_leaves[i],
3861					      OCFS2_JOURNAL_ACCESS_WRITE);
3862		if (ret) {
3863			mlog_errno(ret);
3864			goto out_commit;
3865		}
3866
3867		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3868					      new_dx_leaves[i],
3869					      OCFS2_JOURNAL_ACCESS_WRITE);
3870		if (ret) {
3871			mlog_errno(ret);
3872			goto out_commit;
3873		}
3874	}
3875
3876	ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
3877				   orig_dx_leaves, new_dx_leaves, num_dx_leaves);
3878
3879out_commit:
3880	if (ret < 0 && did_quota)
3881		dquot_free_space_nodirty(dir,
3882				ocfs2_clusters_to_bytes(dir->i_sb, 1));
3883
3884	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3885	ocfs2_commit_trans(osb, handle);
3886
3887out:
3888	if (orig_dx_leaves || new_dx_leaves) {
3889		for (i = 0; i < num_dx_leaves; i++) {
3890			if (orig_dx_leaves)
3891				brelse(orig_dx_leaves[i]);
3892			if (new_dx_leaves)
3893				brelse(new_dx_leaves[i]);
3894		}
3895		kfree(orig_dx_leaves);
3896		kfree(new_dx_leaves);
3897	}
3898
3899	if (meta_ac)
3900		ocfs2_free_alloc_context(meta_ac);
3901	if (data_ac)
3902		ocfs2_free_alloc_context(data_ac);
3903
3904	kfree(tmp_dx_leaf);
3905	return ret;
3906}
3907
3908static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
3909				   struct buffer_head *di_bh,
3910				   struct buffer_head *dx_root_bh,
3911				   const char *name, int namelen,
3912				   struct ocfs2_dir_lookup_result *lookup)
3913{
3914	int ret, rebalanced = 0;
3915	struct ocfs2_dx_root_block *dx_root;
3916	struct buffer_head *dx_leaf_bh = NULL;
3917	struct ocfs2_dx_leaf *dx_leaf;
3918	u64 blkno;
3919	u32 leaf_cpos;
3920
3921	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3922
3923restart_search:
3924	ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
3925				  &leaf_cpos, &blkno);
3926	if (ret) {
3927		mlog_errno(ret);
3928		goto out;
3929	}
3930
3931	ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
3932	if (ret) {
3933		mlog_errno(ret);
3934		goto out;
3935	}
3936
3937	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3938
3939	if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
3940	    le16_to_cpu(dx_leaf->dl_list.de_count)) {
3941		if (rebalanced) {
3942			/*
3943			 * Rebalancing should have provided us with
3944			 * space in an appropriate leaf.
3945			 *
3946			 * XXX: Is this an abnormal condition then?
3947			 * Should we print a message here?
3948			 */
3949			ret = -ENOSPC;
3950			goto out;
3951		}
3952
3953		ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
3954					     &lookup->dl_hinfo, leaf_cpos,
3955					     blkno);
3956		if (ret) {
3957			if (ret != -ENOSPC)
3958				mlog_errno(ret);
3959			goto out;
3960		}
3961
3962		/*
3963		 * Restart the lookup. The rebalance might have
3964		 * changed which block our item fits into. Mark our
3965		 * progress, so we only execute this once.
3966		 */
3967		brelse(dx_leaf_bh);
3968		dx_leaf_bh = NULL;
3969		rebalanced = 1;
3970		goto restart_search;
3971	}
3972
3973	lookup->dl_dx_leaf_bh = dx_leaf_bh;
3974	dx_leaf_bh = NULL;
3975
3976out:
3977	brelse(dx_leaf_bh);
3978	return ret;
3979}
3980
3981static int ocfs2_search_dx_free_list(struct inode *dir,
3982				     struct buffer_head *dx_root_bh,
3983				     int namelen,
3984				     struct ocfs2_dir_lookup_result *lookup)
3985{
3986	int ret = -ENOSPC;
3987	struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
3988	struct ocfs2_dir_block_trailer *db;
3989	u64 next_block;
3990	int rec_len = OCFS2_DIR_REC_LEN(namelen);
3991	struct ocfs2_dx_root_block *dx_root;
3992
3993	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3994	next_block = le64_to_cpu(dx_root->dr_free_blk);
3995
3996	while (next_block) {
3997		brelse(prev_leaf_bh);
3998		prev_leaf_bh = leaf_bh;
3999		leaf_bh = NULL;
4000
4001		ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
4002		if (ret) {
4003			mlog_errno(ret);
4004			goto out;
4005		}
4006
4007		db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
4008		if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
4009			lookup->dl_leaf_bh = leaf_bh;
4010			lookup->dl_prev_leaf_bh = prev_leaf_bh;
4011			leaf_bh = NULL;
4012			prev_leaf_bh = NULL;
4013			break;
4014		}
4015
4016		next_block = le64_to_cpu(db->db_free_next);
4017	}
4018
4019	if (!next_block)
4020		ret = -ENOSPC;
4021
4022out:
4023
4024	brelse(leaf_bh);
4025	brelse(prev_leaf_bh);
4026	return ret;
4027}
4028
4029static int ocfs2_expand_inline_dx_root(struct inode *dir,
4030				       struct buffer_head *dx_root_bh)
4031{
4032	int ret, num_dx_leaves, i, j, did_quota = 0;
4033	struct buffer_head **dx_leaves = NULL;
4034	struct ocfs2_extent_tree et;
4035	u64 insert_blkno;
4036	struct ocfs2_alloc_context *data_ac = NULL;
4037	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4038	handle_t *handle = NULL;
4039	struct ocfs2_dx_root_block *dx_root;
4040	struct ocfs2_dx_entry_list *entry_list;
4041	struct ocfs2_dx_entry *dx_entry;
4042	struct ocfs2_dx_leaf *target_leaf;
4043
4044	ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4045	if (ret) {
4046		mlog_errno(ret);
4047		goto out;
4048	}
4049
4050	dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
4051	if (!dx_leaves) {
4052		ret = -ENOMEM;
4053		mlog_errno(ret);
4054		goto out;
4055	}
4056
4057	handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
4058	if (IS_ERR(handle)) {
4059		ret = PTR_ERR(handle);
4060		mlog_errno(ret);
4061		goto out;
4062	}
4063
4064	ret = dquot_alloc_space_nodirty(dir,
4065				       ocfs2_clusters_to_bytes(osb->sb, 1));
4066	if (ret)
4067		goto out_commit;
4068	did_quota = 1;
4069
4070	/*
4071	 * We do this up front, before the allocation, so that a
4072	 * failure to add the dx_root_bh to the journal won't result
4073	 * us losing clusters.
4074	 */
4075	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
4076				      OCFS2_JOURNAL_ACCESS_WRITE);
4077	if (ret) {
4078		mlog_errno(ret);
4079		goto out_commit;
4080	}
4081
4082	ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
4083					 num_dx_leaves, &insert_blkno);
4084	if (ret) {
4085		mlog_errno(ret);
4086		goto out_commit;
4087	}
4088
4089	/*
4090	 * Transfer the entries from our dx_root into the appropriate
4091	 * block
4092	 */
4093	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4094	entry_list = &dx_root->dr_entries;
4095
4096	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
4097		dx_entry = &entry_list->de_entries[i];
4098
4099		j = __ocfs2_dx_dir_hash_idx(osb,
4100					    le32_to_cpu(dx_entry->dx_minor_hash));
4101		target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
4102
4103		ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
4104
4105		/* Each leaf has been passed to the journal already
4106		 * via __ocfs2_dx_dir_new_cluster() */
4107	}
4108
4109	dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
4110	memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
4111	       offsetof(struct ocfs2_dx_root_block, dr_list));
4112	dx_root->dr_list.l_count =
4113		cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
4114
4115	/* This should never fail considering we start with an empty
4116	 * dx_root. */
4117	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4118	ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
4119	if (ret)
4120		mlog_errno(ret);
4121	did_quota = 0;
4122
4123	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4124	ocfs2_journal_dirty(handle, dx_root_bh);
4125
4126out_commit:
4127	if (ret < 0 && did_quota)
4128		dquot_free_space_nodirty(dir,
4129					  ocfs2_clusters_to_bytes(dir->i_sb, 1));
4130
4131	ocfs2_commit_trans(osb, handle);
4132
4133out:
4134	if (data_ac)
4135		ocfs2_free_alloc_context(data_ac);
4136
4137	if (dx_leaves) {
4138		for (i = 0; i < num_dx_leaves; i++)
4139			brelse(dx_leaves[i]);
4140		kfree(dx_leaves);
4141	}
4142	return ret;
4143}
4144
4145static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
4146{
4147	struct ocfs2_dx_root_block *dx_root;
4148	struct ocfs2_dx_entry_list *entry_list;
4149
4150	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4151	entry_list = &dx_root->dr_entries;
4152
4153	if (le16_to_cpu(entry_list->de_num_used) >=
4154	    le16_to_cpu(entry_list->de_count))
4155		return -ENOSPC;
4156
4157	return 0;
4158}
4159
4160static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
4161					   struct buffer_head *di_bh,
4162					   const char *name,
4163					   int namelen,
4164					   struct ocfs2_dir_lookup_result *lookup)
4165{
4166	int ret, free_dx_root = 1;
4167	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4168	struct buffer_head *dx_root_bh = NULL;
4169	struct buffer_head *leaf_bh = NULL;
4170	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4171	struct ocfs2_dx_root_block *dx_root;
4172
4173	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4174	if (ret) {
4175		mlog_errno(ret);
4176		goto out;
4177	}
4178
4179	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4180	if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
4181		ret = -ENOSPC;
4182		mlog_errno(ret);
4183		goto out;
4184	}
4185
4186	if (ocfs2_dx_root_inline(dx_root)) {
4187		ret = ocfs2_inline_dx_has_space(dx_root_bh);
4188
4189		if (ret == 0)
4190			goto search_el;
4191
4192		/*
4193		 * We ran out of room in the root block. Expand it to
4194		 * an extent, then allow ocfs2_find_dir_space_dx to do
4195		 * the rest.
4196		 */
4197		ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
4198		if (ret) {
4199			mlog_errno(ret);
4200			goto out;
4201		}
4202	}
4203
4204	/*
4205	 * Insert preparation for an indexed directory is split into two
4206	 * steps. The call to find_dir_space_dx reserves room in the index for
4207	 * an additional item. If we run out of space there, it's a real error
4208	 * we can't continue on.
4209	 */
4210	ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
4211				      namelen, lookup);
4212	if (ret) {
4213		mlog_errno(ret);
4214		goto out;
4215	}
4216
4217search_el:
4218	/*
4219	 * Next, we need to find space in the unindexed tree. This call
4220	 * searches using the free space linked list. If the unindexed tree
4221	 * lacks sufficient space, we'll expand it below. The expansion code
4222	 * is smart enough to add any new blocks to the free space list.
4223	 */
4224	ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
4225	if (ret && ret != -ENOSPC) {
4226		mlog_errno(ret);
4227		goto out;
4228	}
4229
4230	/* Do this up here - ocfs2_extend_dir might need the dx_root */
4231	lookup->dl_dx_root_bh = dx_root_bh;
4232	free_dx_root = 0;
4233
4234	if (ret == -ENOSPC) {
4235		ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
4236
4237		if (ret) {
4238			mlog_errno(ret);
4239			goto out;
4240		}
4241
4242		/*
4243		 * We make the assumption here that new leaf blocks are added
4244		 * to the front of our free list.
4245		 */
4246		lookup->dl_prev_leaf_bh = NULL;
4247		lookup->dl_leaf_bh = leaf_bh;
4248	}
4249
4250out:
4251	if (free_dx_root)
4252		brelse(dx_root_bh);
4253	return ret;
4254}
4255
4256/*
4257 * Get a directory ready for insert. Any directory allocation required
4258 * happens here. Success returns zero, and enough context in the dir
4259 * lookup result that ocfs2_add_entry() will be able complete the task
4260 * with minimal performance impact.
4261 */
4262int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4263				 struct inode *dir,
4264				 struct buffer_head *parent_fe_bh,
4265				 const char *name,
4266				 int namelen,
4267				 struct ocfs2_dir_lookup_result *lookup)
4268{
4269	int ret;
4270	unsigned int blocks_wanted = 1;
4271	struct buffer_head *bh = NULL;
4272
4273	trace_ocfs2_prepare_dir_for_insert(
4274		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4275
4276	if (!namelen) {
4277		ret = -EINVAL;
4278		mlog_errno(ret);
4279		goto out;
4280	}
4281
4282	/*
4283	 * Do this up front to reduce confusion.
4284	 *
4285	 * The directory might start inline, then be turned into an
4286	 * indexed one, in which case we'd need to hash deep inside
4287	 * ocfs2_find_dir_space_id(). Since
4288	 * ocfs2_prepare_dx_dir_for_insert() also needs this hash
4289	 * done, there seems no point in spreading out the calls. We
4290	 * can optimize away the case where the file system doesn't
4291	 * support indexing.
4292	 */
4293	if (ocfs2_supports_indexed_dirs(osb))
4294		ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
4295
4296	if (ocfs2_dir_indexed(dir)) {
4297		ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
4298						      name, namelen, lookup);
4299		if (ret)
4300			mlog_errno(ret);
4301		goto out;
4302	}
4303
4304	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4305		ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
4306					      namelen, &bh, &blocks_wanted);
4307	} else
4308		ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
4309
4310	if (ret && ret != -ENOSPC) {
4311		mlog_errno(ret);
4312		goto out;
4313	}
4314
4315	if (ret == -ENOSPC) {
4316		/*
4317		 * We have to expand the directory to add this name.
4318		 */
4319		BUG_ON(bh);
4320
4321		ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
4322				       lookup, &bh);
4323		if (ret) {
4324			if (ret != -ENOSPC)
4325				mlog_errno(ret);
4326			goto out;
4327		}
4328
4329		BUG_ON(!bh);
4330	}
4331
4332	lookup->dl_leaf_bh = bh;
4333	bh = NULL;
4334out:
4335	brelse(bh);
4336	return ret;
4337}
4338
4339static int ocfs2_dx_dir_remove_index(struct inode *dir,
4340				     struct buffer_head *di_bh,
4341				     struct buffer_head *dx_root_bh)
4342{
4343	int ret;
4344	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4345	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4346	struct ocfs2_dx_root_block *dx_root;
4347	struct inode *dx_alloc_inode = NULL;
4348	struct buffer_head *dx_alloc_bh = NULL;
4349	handle_t *handle;
4350	u64 blk;
4351	u16 bit;
4352	u64 bg_blkno;
4353
4354	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4355
4356	dx_alloc_inode = ocfs2_get_system_file_inode(osb,
4357					EXTENT_ALLOC_SYSTEM_INODE,
4358					le16_to_cpu(dx_root->dr_suballoc_slot));
4359	if (!dx_alloc_inode) {
4360		ret = -ENOMEM;
4361		mlog_errno(ret);
4362		goto out;
4363	}
4364	inode_lock(dx_alloc_inode);
4365
4366	ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
4367	if (ret) {
4368		mlog_errno(ret);
4369		goto out_mutex;
4370	}
4371
4372	handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
4373	if (IS_ERR(handle)) {
4374		ret = PTR_ERR(handle);
4375		mlog_errno(ret);
4376		goto out_unlock;
4377	}
4378
4379	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
4380				      OCFS2_JOURNAL_ACCESS_WRITE);
4381	if (ret) {
4382		mlog_errno(ret);
4383		goto out_commit;
4384	}
4385
4386	spin_lock(&OCFS2_I(dir)->ip_lock);
4387	OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4388	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4389	spin_unlock(&OCFS2_I(dir)->ip_lock);
4390	di->i_dx_root = cpu_to_le64(0ULL);
4391	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4392
4393	ocfs2_journal_dirty(handle, di_bh);
4394
4395	blk = le64_to_cpu(dx_root->dr_blkno);
4396	bit = le16_to_cpu(dx_root->dr_suballoc_bit);
4397	if (dx_root->dr_suballoc_loc)
4398		bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
4399	else
4400		bg_blkno = ocfs2_which_suballoc_group(blk, bit);
4401	ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
4402				       bit, bg_blkno, 1);
4403	if (ret)
4404		mlog_errno(ret);
4405
4406out_commit:
4407	ocfs2_commit_trans(osb, handle);
4408
4409out_unlock:
4410	ocfs2_inode_unlock(dx_alloc_inode, 1);
4411
4412out_mutex:
4413	inode_unlock(dx_alloc_inode);
4414	brelse(dx_alloc_bh);
4415out:
4416	iput(dx_alloc_inode);
4417	return ret;
4418}
4419
4420int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
4421{
4422	int ret;
4423	unsigned int uninitialized_var(clen);
4424	u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos);
4425	u64 uninitialized_var(blkno);
4426	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4427	struct buffer_head *dx_root_bh = NULL;
4428	struct ocfs2_dx_root_block *dx_root;
4429	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4430	struct ocfs2_cached_dealloc_ctxt dealloc;
4431	struct ocfs2_extent_tree et;
4432
4433	ocfs2_init_dealloc_ctxt(&dealloc);
4434
4435	if (!ocfs2_dir_indexed(dir))
4436		return 0;
4437
4438	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4439	if (ret) {
4440		mlog_errno(ret);
4441		goto out;
4442	}
4443	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4444
4445	if (ocfs2_dx_root_inline(dx_root))
4446		goto remove_index;
4447
4448	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4449
4450	/* XXX: What if dr_clusters is too large? */
4451	while (le32_to_cpu(dx_root->dr_clusters)) {
4452		ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
4453					      major_hash, &cpos, &blkno, &clen);
4454		if (ret) {
4455			mlog_errno(ret);
4456			goto out;
4457		}
4458
4459		p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
4460
4461		ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
4462					       &dealloc, 0, false);
4463		if (ret) {
4464			mlog_errno(ret);
4465			goto out;
4466		}
4467
4468		if (cpos == 0)
4469			break;
4470
4471		major_hash = cpos - 1;
4472	}
4473
4474remove_index:
4475	ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
4476	if (ret) {
4477		mlog_errno(ret);
4478		goto out;
4479	}
4480
4481	ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
4482out:
4483	ocfs2_schedule_truncate_log_flush(osb, 1);
4484	ocfs2_run_deallocs(osb, &dealloc);
4485
4486	brelse(dx_root_bh);
4487	return ret;
4488}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
 
   3 * dir.c
   4 *
   5 * Creates, reads, walks and deletes directory-nodes
   6 *
   7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   8 *
   9 *  Portions of this code from linux/fs/ext3/dir.c
  10 *
  11 *  Copyright (C) 1992, 1993, 1994, 1995
  12 *  Remy Card (card@masi.ibp.fr)
  13 *  Laboratoire MASI - Institut Blaise pascal
  14 *  Universite Pierre et Marie Curie (Paris VI)
  15 *
  16 *   from
  17 *
  18 *   linux/fs/minix/dir.c
  19 *
  20 *   Copyright (C) 1991, 1992 Linus Torvalds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/types.h>
  25#include <linux/slab.h>
  26#include <linux/highmem.h>
  27#include <linux/quotaops.h>
  28#include <linux/sort.h>
  29#include <linux/iversion.h>
  30
  31#include <cluster/masklog.h>
  32
  33#include "ocfs2.h"
  34
  35#include "alloc.h"
  36#include "blockcheck.h"
  37#include "dir.h"
  38#include "dlmglue.h"
  39#include "extent_map.h"
  40#include "file.h"
  41#include "inode.h"
  42#include "journal.h"
  43#include "namei.h"
  44#include "suballoc.h"
  45#include "super.h"
  46#include "sysfile.h"
  47#include "uptodate.h"
  48#include "ocfs2_trace.h"
  49
  50#include "buffer_head_io.h"
  51
  52#define NAMEI_RA_CHUNKS  2
  53#define NAMEI_RA_BLOCKS  4
  54#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  55
 
 
 
 
  56static int ocfs2_do_extend_dir(struct super_block *sb,
  57			       handle_t *handle,
  58			       struct inode *dir,
  59			       struct buffer_head *parent_fe_bh,
  60			       struct ocfs2_alloc_context *data_ac,
  61			       struct ocfs2_alloc_context *meta_ac,
  62			       struct buffer_head **new_bh);
  63static int ocfs2_dir_indexed(struct inode *inode);
  64
  65/*
  66 * These are distinct checks because future versions of the file system will
  67 * want to have a trailing dirent structure independent of indexing.
  68 */
  69static int ocfs2_supports_dir_trailer(struct inode *dir)
  70{
  71	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  72
  73	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  74		return 0;
  75
  76	return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir);
  77}
  78
  79/*
  80 * "new' here refers to the point at which we're creating a new
  81 * directory via "mkdir()", but also when we're expanding an inline
  82 * directory. In either case, we don't yet have the indexing bit set
  83 * on the directory, so the standard checks will fail in when metaecc
  84 * is turned off. Only directory-initialization type functions should
  85 * use this then. Everything else wants ocfs2_supports_dir_trailer()
  86 */
  87static int ocfs2_new_dir_wants_trailer(struct inode *dir)
  88{
  89	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  90
  91	return ocfs2_meta_ecc(osb) ||
  92		ocfs2_supports_indexed_dirs(osb);
  93}
  94
  95static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb)
  96{
  97	return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer);
  98}
  99
 100#define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb))))
 101
 102/* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make
 103 * them more consistent? */
 104struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize,
 105							    void *data)
 106{
 107	char *p = data;
 108
 109	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
 110	return (struct ocfs2_dir_block_trailer *)p;
 111}
 112
 113/*
 114 * XXX: This is executed once on every dirent. We should consider optimizing
 115 * it.
 116 */
 117static int ocfs2_skip_dir_trailer(struct inode *dir,
 118				  struct ocfs2_dir_entry *de,
 119				  unsigned long offset,
 120				  unsigned long blklen)
 121{
 122	unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer);
 123
 124	if (!ocfs2_supports_dir_trailer(dir))
 125		return 0;
 126
 127	if (offset != toff)
 128		return 0;
 129
 130	return 1;
 131}
 132
 133static void ocfs2_init_dir_trailer(struct inode *inode,
 134				   struct buffer_head *bh, u16 rec_len)
 135{
 136	struct ocfs2_dir_block_trailer *trailer;
 137
 138	trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
 139	strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
 140	trailer->db_compat_rec_len =
 141			cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
 142	trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
 143	trailer->db_blkno = cpu_to_le64(bh->b_blocknr);
 144	trailer->db_free_rec_len = cpu_to_le16(rec_len);
 145}
 146/*
 147 * Link an unindexed block with a dir trailer structure into the index free
 148 * list. This function will modify dirdata_bh, but assumes you've already
 149 * passed it to the journal.
 150 */
 151static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
 152				     struct buffer_head *dx_root_bh,
 153				     struct buffer_head *dirdata_bh)
 154{
 155	int ret;
 156	struct ocfs2_dx_root_block *dx_root;
 157	struct ocfs2_dir_block_trailer *trailer;
 158
 159	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
 160				      OCFS2_JOURNAL_ACCESS_WRITE);
 161	if (ret) {
 162		mlog_errno(ret);
 163		goto out;
 164	}
 165	trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
 166	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
 167
 168	trailer->db_free_next = dx_root->dr_free_blk;
 169	dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
 170
 171	ocfs2_journal_dirty(handle, dx_root_bh);
 172
 173out:
 174	return ret;
 175}
 176
 177static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res)
 178{
 179	return res->dl_prev_leaf_bh == NULL;
 180}
 181
 182void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res)
 183{
 184	brelse(res->dl_dx_root_bh);
 185	brelse(res->dl_leaf_bh);
 186	brelse(res->dl_dx_leaf_bh);
 187	brelse(res->dl_prev_leaf_bh);
 188}
 189
 190static int ocfs2_dir_indexed(struct inode *inode)
 191{
 192	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL)
 193		return 1;
 194	return 0;
 195}
 196
 197static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root)
 198{
 199	return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE;
 200}
 201
 202/*
 203 * Hashing code adapted from ext3
 204 */
 205#define DELTA 0x9E3779B9
 206
 207static void TEA_transform(__u32 buf[4], __u32 const in[])
 208{
 209	__u32	sum = 0;
 210	__u32	b0 = buf[0], b1 = buf[1];
 211	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
 212	int	n = 16;
 213
 214	do {
 215		sum += DELTA;
 216		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
 217		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
 218	} while (--n);
 219
 220	buf[0] += b0;
 221	buf[1] += b1;
 222}
 223
 224static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
 225{
 226	__u32	pad, val;
 227	int	i;
 228
 229	pad = (__u32)len | ((__u32)len << 8);
 230	pad |= pad << 16;
 231
 232	val = pad;
 233	if (len > num*4)
 234		len = num * 4;
 235	for (i = 0; i < len; i++) {
 236		if ((i % 4) == 0)
 237			val = pad;
 238		val = msg[i] + (val << 8);
 239		if ((i % 4) == 3) {
 240			*buf++ = val;
 241			val = pad;
 242			num--;
 243		}
 244	}
 245	if (--num >= 0)
 246		*buf++ = val;
 247	while (--num >= 0)
 248		*buf++ = pad;
 249}
 250
 251static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
 252				   struct ocfs2_dx_hinfo *hinfo)
 253{
 254	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 255	const char	*p;
 256	__u32		in[8], buf[4];
 257
 258	/*
 259	 * XXX: Is this really necessary, if the index is never looked
 260	 * at by readdir? Is a hash value of '0' a bad idea?
 261	 */
 262	if ((len == 1 && !strncmp(".", name, 1)) ||
 263	    (len == 2 && !strncmp("..", name, 2))) {
 264		buf[0] = buf[1] = 0;
 265		goto out;
 266	}
 267
 268#ifdef OCFS2_DEBUG_DX_DIRS
 269	/*
 270	 * This makes it very easy to debug indexing problems. We
 271	 * should never allow this to be selected without hand editing
 272	 * this file though.
 273	 */
 274	buf[0] = buf[1] = len;
 275	goto out;
 276#endif
 277
 278	memcpy(buf, osb->osb_dx_seed, sizeof(buf));
 279
 280	p = name;
 281	while (len > 0) {
 282		str2hashbuf(p, len, in, 4);
 283		TEA_transform(buf, in);
 284		len -= 16;
 285		p += 16;
 286	}
 287
 288out:
 289	hinfo->major_hash = buf[0];
 290	hinfo->minor_hash = buf[1];
 291}
 292
 293/*
 294 * bh passed here can be an inode block or a dir data block, depending
 295 * on the inode inline data flag.
 296 */
 297static int ocfs2_check_dir_entry(struct inode *dir,
 298				 struct ocfs2_dir_entry *de,
 299				 struct buffer_head *bh,
 300				 char *buf,
 301				 unsigned int size,
 302				 unsigned long offset)
 303{
 304	const char *error_msg = NULL;
 305	const int rlen = le16_to_cpu(de->rec_len);
 306	const unsigned long next_offset = ((char *) de - buf) + rlen;
 307
 308	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
 309		error_msg = "rec_len is smaller than minimal";
 310	else if (unlikely(rlen % 4 != 0))
 311		error_msg = "rec_len % 4 != 0";
 312	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
 313		error_msg = "rec_len is too small for name_len";
 314	else if (unlikely(next_offset > size))
 315		error_msg = "directory entry overrun";
 316	else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) &&
 317		 next_offset != size)
 318		error_msg = "directory entry too close to end";
 319
 320	if (unlikely(error_msg != NULL))
 321		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
 322		     "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
 323		     (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
 324		     offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
 325		     de->name_len);
 326
 327	return error_msg == NULL ? 1 : 0;
 328}
 329
 330static inline int ocfs2_match(int len,
 331			      const char * const name,
 332			      struct ocfs2_dir_entry *de)
 333{
 334	if (len != de->name_len)
 335		return 0;
 336	if (!de->inode)
 337		return 0;
 338	return !memcmp(name, de->name, len);
 339}
 340
 341/*
 342 * Returns 0 if not found, -1 on failure, and 1 on success
 343 */
 344static inline int ocfs2_search_dirblock(struct buffer_head *bh,
 345					struct inode *dir,
 346					const char *name, int namelen,
 347					unsigned long offset,
 348					char *first_de,
 349					unsigned int bytes,
 350					struct ocfs2_dir_entry **res_dir)
 351{
 352	struct ocfs2_dir_entry *de;
 353	char *dlimit, *de_buf;
 354	int de_len;
 355	int ret = 0;
 356
 357	de_buf = first_de;
 358	dlimit = de_buf + bytes;
 359
 360	while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) {
 361		/* this code is executed quadratically often */
 362		/* do minimal checking `by hand' */
 363
 364		de = (struct ocfs2_dir_entry *) de_buf;
 365
 366		if (de->name + namelen <= dlimit &&
 367		    ocfs2_match(namelen, name, de)) {
 368			/* found a match - just to be sure, do a full check */
 369			if (!ocfs2_check_dir_entry(dir, de, bh, first_de,
 370						   bytes, offset)) {
 371				ret = -1;
 372				goto bail;
 373			}
 374			*res_dir = de;
 375			ret = 1;
 376			goto bail;
 377		}
 378
 379		/* prevent looping on a bad block */
 380		de_len = le16_to_cpu(de->rec_len);
 381		if (de_len <= 0) {
 382			ret = -1;
 383			goto bail;
 384		}
 385
 386		de_buf += de_len;
 387		offset += de_len;
 388	}
 389
 390bail:
 391	trace_ocfs2_search_dirblock(ret);
 392	return ret;
 393}
 394
 395static struct buffer_head *ocfs2_find_entry_id(const char *name,
 396					       int namelen,
 397					       struct inode *dir,
 398					       struct ocfs2_dir_entry **res_dir)
 399{
 400	int ret, found;
 401	struct buffer_head *di_bh = NULL;
 402	struct ocfs2_dinode *di;
 403	struct ocfs2_inline_data *data;
 404
 405	ret = ocfs2_read_inode_block(dir, &di_bh);
 406	if (ret) {
 407		mlog_errno(ret);
 408		goto out;
 409	}
 410
 411	di = (struct ocfs2_dinode *)di_bh->b_data;
 412	data = &di->id2.i_data;
 413
 414	found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
 415				      data->id_data, i_size_read(dir), res_dir);
 416	if (found == 1)
 417		return di_bh;
 418
 419	brelse(di_bh);
 420out:
 421	return NULL;
 422}
 423
 424static int ocfs2_validate_dir_block(struct super_block *sb,
 425				    struct buffer_head *bh)
 426{
 427	int rc;
 428	struct ocfs2_dir_block_trailer *trailer =
 429		ocfs2_trailer_from_bh(bh, sb);
 430
 431
 432	/*
 433	 * We don't validate dirents here, that's handled
 434	 * in-place when the code walks them.
 435	 */
 436	trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
 437
 438	BUG_ON(!buffer_uptodate(bh));
 439
 440	/*
 441	 * If the ecc fails, we return the error but otherwise
 442	 * leave the filesystem running.  We know any error is
 443	 * local to this block.
 444	 *
 445	 * Note that we are safe to call this even if the directory
 446	 * doesn't have a trailer.  Filesystems without metaecc will do
 447	 * nothing, and filesystems with it will have one.
 448	 */
 449	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check);
 450	if (rc)
 451		mlog(ML_ERROR, "Checksum failed for dinode %llu\n",
 452		     (unsigned long long)bh->b_blocknr);
 453
 454	return rc;
 455}
 456
 457/*
 458 * Validate a directory trailer.
 459 *
 460 * We check the trailer here rather than in ocfs2_validate_dir_block()
 461 * because that function doesn't have the inode to test.
 462 */
 463static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh)
 464{
 465	int rc = 0;
 466	struct ocfs2_dir_block_trailer *trailer;
 467
 468	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 469	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
 470		rc = ocfs2_error(dir->i_sb,
 471				 "Invalid dirblock #%llu: signature = %.*s\n",
 472				 (unsigned long long)bh->b_blocknr, 7,
 473				 trailer->db_signature);
 474		goto out;
 475	}
 476	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
 477		rc = ocfs2_error(dir->i_sb,
 478				 "Directory block #%llu has an invalid db_blkno of %llu\n",
 479				 (unsigned long long)bh->b_blocknr,
 480				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 481		goto out;
 482	}
 483	if (le64_to_cpu(trailer->db_parent_dinode) !=
 484	    OCFS2_I(dir)->ip_blkno) {
 485		rc = ocfs2_error(dir->i_sb,
 486				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
 487				 (unsigned long long)bh->b_blocknr,
 488				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
 489				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 490		goto out;
 491	}
 492out:
 493	return rc;
 494}
 495
 496/*
 497 * This function forces all errors to -EIO for consistency with its
 498 * predecessor, ocfs2_bread().  We haven't audited what returning the
 499 * real error codes would do to callers.  We log the real codes with
 500 * mlog_errno() before we squash them.
 501 */
 502static int ocfs2_read_dir_block(struct inode *inode, u64 v_block,
 503				struct buffer_head **bh, int flags)
 504{
 505	int rc = 0;
 506	struct buffer_head *tmp = *bh;
 507
 508	rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags,
 509				    ocfs2_validate_dir_block);
 510	if (rc) {
 511		mlog_errno(rc);
 512		goto out;
 513	}
 514
 515	if (!(flags & OCFS2_BH_READAHEAD) &&
 516	    ocfs2_supports_dir_trailer(inode)) {
 517		rc = ocfs2_check_dir_trailer(inode, tmp);
 518		if (rc) {
 519			if (!*bh)
 520				brelse(tmp);
 521			mlog_errno(rc);
 522			goto out;
 523		}
 524	}
 525
 526	/* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
 527	if (!*bh)
 528		*bh = tmp;
 529
 530out:
 531	return rc ? -EIO : 0;
 532}
 533
 534/*
 535 * Read the block at 'phys' which belongs to this directory
 536 * inode. This function does no virtual->physical block translation -
 537 * what's passed in is assumed to be a valid directory block.
 538 */
 539static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
 540				       struct buffer_head **bh)
 541{
 542	int ret;
 543	struct buffer_head *tmp = *bh;
 544
 545	ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
 546			       ocfs2_validate_dir_block);
 547	if (ret) {
 548		mlog_errno(ret);
 549		goto out;
 550	}
 551
 552	if (ocfs2_supports_dir_trailer(dir)) {
 553		ret = ocfs2_check_dir_trailer(dir, tmp);
 554		if (ret) {
 555			if (!*bh)
 556				brelse(tmp);
 557			mlog_errno(ret);
 558			goto out;
 559		}
 560	}
 561
 562	if (!ret && !*bh)
 563		*bh = tmp;
 564out:
 565	return ret;
 566}
 567
 568static int ocfs2_validate_dx_root(struct super_block *sb,
 569				  struct buffer_head *bh)
 570{
 571	int ret;
 572	struct ocfs2_dx_root_block *dx_root;
 573
 574	BUG_ON(!buffer_uptodate(bh));
 575
 576	dx_root = (struct ocfs2_dx_root_block *) bh->b_data;
 577
 578	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check);
 579	if (ret) {
 580		mlog(ML_ERROR,
 581		     "Checksum failed for dir index root block %llu\n",
 582		     (unsigned long long)bh->b_blocknr);
 583		return ret;
 584	}
 585
 586	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
 587		ret = ocfs2_error(sb,
 588				  "Dir Index Root # %llu has bad signature %.*s\n",
 589				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
 590				  7, dx_root->dr_signature);
 591	}
 592
 593	return ret;
 594}
 595
 596static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
 597			      struct buffer_head **dx_root_bh)
 598{
 599	int ret;
 600	u64 blkno = le64_to_cpu(di->i_dx_root);
 601	struct buffer_head *tmp = *dx_root_bh;
 602
 603	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 604			       ocfs2_validate_dx_root);
 605
 606	/* If ocfs2_read_block() got us a new bh, pass it up. */
 607	if (!ret && !*dx_root_bh)
 608		*dx_root_bh = tmp;
 609
 610	return ret;
 611}
 612
 613static int ocfs2_validate_dx_leaf(struct super_block *sb,
 614				  struct buffer_head *bh)
 615{
 616	int ret;
 617	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data;
 618
 619	BUG_ON(!buffer_uptodate(bh));
 620
 621	ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check);
 622	if (ret) {
 623		mlog(ML_ERROR,
 624		     "Checksum failed for dir index leaf block %llu\n",
 625		     (unsigned long long)bh->b_blocknr);
 626		return ret;
 627	}
 628
 629	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
 630		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
 631				  7, dx_leaf->dl_signature);
 632	}
 633
 634	return ret;
 635}
 636
 637static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
 638			      struct buffer_head **dx_leaf_bh)
 639{
 640	int ret;
 641	struct buffer_head *tmp = *dx_leaf_bh;
 642
 643	ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
 644			       ocfs2_validate_dx_leaf);
 645
 646	/* If ocfs2_read_block() got us a new bh, pass it up. */
 647	if (!ret && !*dx_leaf_bh)
 648		*dx_leaf_bh = tmp;
 649
 650	return ret;
 651}
 652
 653/*
 654 * Read a series of dx_leaf blocks. This expects all buffer_head
 655 * pointers to be NULL on function entry.
 656 */
 657static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
 658				struct buffer_head **dx_leaf_bhs)
 659{
 660	int ret;
 661
 662	ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
 663				ocfs2_validate_dx_leaf);
 664	if (ret)
 665		mlog_errno(ret);
 666
 667	return ret;
 668}
 669
 670static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
 671					       struct inode *dir,
 672					       struct ocfs2_dir_entry **res_dir)
 673{
 674	struct super_block *sb;
 675	struct buffer_head *bh_use[NAMEI_RA_SIZE];
 676	struct buffer_head *bh, *ret = NULL;
 677	unsigned long start, block, b;
 678	int ra_max = 0;		/* Number of bh's in the readahead
 679				   buffer, bh_use[] */
 680	int ra_ptr = 0;		/* Current index into readahead
 681				   buffer */
 682	int num = 0;
 683	int nblocks, i;
 684
 685	sb = dir->i_sb;
 686
 687	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 688	start = OCFS2_I(dir)->ip_dir_start_lookup;
 689	if (start >= nblocks)
 690		start = 0;
 691	block = start;
 692
 693restart:
 694	do {
 695		/*
 696		 * We deal with the read-ahead logic here.
 697		 */
 698		if (ra_ptr >= ra_max) {
 699			/* Refill the readahead buffer */
 700			ra_ptr = 0;
 701			b = block;
 702			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
 703				/*
 704				 * Terminate if we reach the end of the
 705				 * directory and must wrap, or if our
 706				 * search has finished at this block.
 707				 */
 708				if (b >= nblocks || (num && block == start)) {
 709					bh_use[ra_max] = NULL;
 710					break;
 711				}
 712				num++;
 713
 714				bh = NULL;
 715				ocfs2_read_dir_block(dir, b++, &bh,
 716							   OCFS2_BH_READAHEAD);
 717				bh_use[ra_max] = bh;
 718			}
 719		}
 720		if ((bh = bh_use[ra_ptr++]) == NULL)
 721			goto next;
 722		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
 723			/* read error, skip block & hope for the best.
 724			 * ocfs2_read_dir_block() has released the bh. */
 725			mlog(ML_ERROR, "reading directory %llu, "
 726				    "offset %lu\n",
 727				    (unsigned long long)OCFS2_I(dir)->ip_blkno,
 728				    block);
 729			goto next;
 730		}
 731		i = ocfs2_search_dirblock(bh, dir, name, namelen,
 732					  block << sb->s_blocksize_bits,
 733					  bh->b_data, sb->s_blocksize,
 734					  res_dir);
 735		if (i == 1) {
 736			OCFS2_I(dir)->ip_dir_start_lookup = block;
 737			ret = bh;
 738			goto cleanup_and_exit;
 739		} else {
 740			brelse(bh);
 741			if (i < 0)
 742				goto cleanup_and_exit;
 743		}
 744	next:
 745		if (++block >= nblocks)
 746			block = 0;
 747	} while (block != start);
 748
 749	/*
 750	 * If the directory has grown while we were searching, then
 751	 * search the last part of the directory before giving up.
 752	 */
 753	block = nblocks;
 754	nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
 755	if (block < nblocks) {
 756		start = 0;
 757		goto restart;
 758	}
 759
 760cleanup_and_exit:
 761	/* Clean up the read-ahead blocks */
 762	for (; ra_ptr < ra_max; ra_ptr++)
 763		brelse(bh_use[ra_ptr]);
 764
 765	trace_ocfs2_find_entry_el(ret);
 766	return ret;
 767}
 768
 769static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
 770				   struct ocfs2_extent_list *el,
 771				   u32 major_hash,
 772				   u32 *ret_cpos,
 773				   u64 *ret_phys_blkno,
 774				   unsigned int *ret_clen)
 775{
 776	int ret = 0, i, found;
 777	struct buffer_head *eb_bh = NULL;
 778	struct ocfs2_extent_block *eb;
 779	struct ocfs2_extent_rec *rec = NULL;
 780
 781	if (el->l_tree_depth) {
 782		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
 783				      &eb_bh);
 784		if (ret) {
 785			mlog_errno(ret);
 786			goto out;
 787		}
 788
 789		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
 790		el = &eb->h_list;
 791
 792		if (el->l_tree_depth) {
 793			ret = ocfs2_error(inode->i_sb,
 794					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
 795					  inode->i_ino,
 796					  (unsigned long long)eb_bh->b_blocknr);
 797			goto out;
 798		}
 799	}
 800
 801	found = 0;
 802	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
 803		rec = &el->l_recs[i];
 804
 805		if (le32_to_cpu(rec->e_cpos) <= major_hash) {
 806			found = 1;
 807			break;
 808		}
 809	}
 810
 811	if (!found) {
 812		ret = ocfs2_error(inode->i_sb,
 813				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
 814				  inode->i_ino,
 815				  le32_to_cpu(rec->e_cpos),
 816				  ocfs2_rec_clusters(el, rec));
 817		goto out;
 818	}
 819
 820	if (ret_phys_blkno)
 821		*ret_phys_blkno = le64_to_cpu(rec->e_blkno);
 822	if (ret_cpos)
 823		*ret_cpos = le32_to_cpu(rec->e_cpos);
 824	if (ret_clen)
 825		*ret_clen = le16_to_cpu(rec->e_leaf_clusters);
 826
 827out:
 828	brelse(eb_bh);
 829	return ret;
 830}
 831
 832/*
 833 * Returns the block index, from the start of the cluster which this
 834 * hash belongs too.
 835 */
 836static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 837						   u32 minor_hash)
 838{
 839	return minor_hash & osb->osb_dx_mask;
 840}
 841
 842static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb,
 843					  struct ocfs2_dx_hinfo *hinfo)
 844{
 845	return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash);
 846}
 847
 848static int ocfs2_dx_dir_lookup(struct inode *inode,
 849			       struct ocfs2_extent_list *el,
 850			       struct ocfs2_dx_hinfo *hinfo,
 851			       u32 *ret_cpos,
 852			       u64 *ret_phys_blkno)
 853{
 854	int ret = 0;
 855	unsigned int cend, clen;
 856	u32 cpos;
 857	u64 blkno;
 858	u32 name_hash = hinfo->major_hash;
 859
 860	ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
 861				      &clen);
 862	if (ret) {
 863		mlog_errno(ret);
 864		goto out;
 865	}
 866
 867	cend = cpos + clen;
 868	if (name_hash >= cend) {
 869		/* We want the last cluster */
 870		blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1);
 871		cpos += clen - 1;
 872	} else {
 873		blkno += ocfs2_clusters_to_blocks(inode->i_sb,
 874						  name_hash - cpos);
 875		cpos = name_hash;
 876	}
 877
 878	/*
 879	 * We now have the cluster which should hold our entry. To
 880	 * find the exact block from the start of the cluster to
 881	 * search, we take the lower bits of the hash.
 882	 */
 883	blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo);
 884
 885	if (ret_phys_blkno)
 886		*ret_phys_blkno = blkno;
 887	if (ret_cpos)
 888		*ret_cpos = cpos;
 889
 890out:
 891
 892	return ret;
 893}
 894
 895static int ocfs2_dx_dir_search(const char *name, int namelen,
 896			       struct inode *dir,
 897			       struct ocfs2_dx_root_block *dx_root,
 898			       struct ocfs2_dir_lookup_result *res)
 899{
 900	int ret, i, found;
 901	u64 phys;
 902	struct buffer_head *dx_leaf_bh = NULL;
 903	struct ocfs2_dx_leaf *dx_leaf;
 904	struct ocfs2_dx_entry *dx_entry = NULL;
 905	struct buffer_head *dir_ent_bh = NULL;
 906	struct ocfs2_dir_entry *dir_ent = NULL;
 907	struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo;
 908	struct ocfs2_extent_list *dr_el;
 909	struct ocfs2_dx_entry_list *entry_list;
 910
 911	ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo);
 912
 913	if (ocfs2_dx_root_inline(dx_root)) {
 914		entry_list = &dx_root->dr_entries;
 915		goto search;
 916	}
 917
 918	dr_el = &dx_root->dr_list;
 919
 920	ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys);
 921	if (ret) {
 922		mlog_errno(ret);
 923		goto out;
 924	}
 925
 926	trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
 927				  namelen, name, hinfo->major_hash,
 928				  hinfo->minor_hash, (unsigned long long)phys);
 929
 930	ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
 931	if (ret) {
 932		mlog_errno(ret);
 933		goto out;
 934	}
 935
 936	dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
 937
 938	trace_ocfs2_dx_dir_search_leaf_info(
 939			le16_to_cpu(dx_leaf->dl_list.de_num_used),
 940			le16_to_cpu(dx_leaf->dl_list.de_count));
 941
 942	entry_list = &dx_leaf->dl_list;
 943
 944search:
 945	/*
 946	 * Empty leaf is legal, so no need to check for that.
 947	 */
 948	found = 0;
 949	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
 950		dx_entry = &entry_list->de_entries[i];
 951
 952		if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash)
 953		    || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash))
 954			continue;
 955
 956		/*
 957		 * Search unindexed leaf block now. We're not
 958		 * guaranteed to find anything.
 959		 */
 960		ret = ocfs2_read_dir_block_direct(dir,
 961					  le64_to_cpu(dx_entry->dx_dirent_blk),
 962					  &dir_ent_bh);
 963		if (ret) {
 964			mlog_errno(ret);
 965			goto out;
 966		}
 967
 968		/*
 969		 * XXX: We should check the unindexed block here,
 970		 * before using it.
 971		 */
 972
 973		found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen,
 974					      0, dir_ent_bh->b_data,
 975					      dir->i_sb->s_blocksize, &dir_ent);
 976		if (found == 1)
 977			break;
 978
 979		if (found == -1) {
 980			/* This means we found a bad directory entry. */
 981			ret = -EIO;
 982			mlog_errno(ret);
 983			goto out;
 984		}
 985
 986		brelse(dir_ent_bh);
 987		dir_ent_bh = NULL;
 988	}
 989
 990	if (found <= 0) {
 991		ret = -ENOENT;
 992		goto out;
 993	}
 994
 995	res->dl_leaf_bh = dir_ent_bh;
 996	res->dl_entry = dir_ent;
 997	res->dl_dx_leaf_bh = dx_leaf_bh;
 998	res->dl_dx_entry = dx_entry;
 999
1000	ret = 0;
1001out:
1002	if (ret) {
1003		brelse(dx_leaf_bh);
1004		brelse(dir_ent_bh);
1005	}
1006	return ret;
1007}
1008
1009static int ocfs2_find_entry_dx(const char *name, int namelen,
1010			       struct inode *dir,
1011			       struct ocfs2_dir_lookup_result *lookup)
1012{
1013	int ret;
1014	struct buffer_head *di_bh = NULL;
1015	struct ocfs2_dinode *di;
1016	struct buffer_head *dx_root_bh = NULL;
1017	struct ocfs2_dx_root_block *dx_root;
1018
1019	ret = ocfs2_read_inode_block(dir, &di_bh);
1020	if (ret) {
1021		mlog_errno(ret);
1022		goto out;
1023	}
1024
1025	di = (struct ocfs2_dinode *)di_bh->b_data;
1026
1027	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
1028	if (ret) {
1029		mlog_errno(ret);
1030		goto out;
1031	}
1032	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
1033
1034	ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup);
1035	if (ret) {
1036		if (ret != -ENOENT)
1037			mlog_errno(ret);
1038		goto out;
1039	}
1040
1041	lookup->dl_dx_root_bh = dx_root_bh;
1042	dx_root_bh = NULL;
1043out:
1044	brelse(di_bh);
1045	brelse(dx_root_bh);
1046	return ret;
1047}
1048
1049/*
1050 * Try to find an entry of the provided name within 'dir'.
1051 *
1052 * If nothing was found, -ENOENT is returned. Otherwise, zero is
1053 * returned and the struct 'res' will contain information useful to
1054 * other directory manipulation functions.
1055 *
1056 * Caller can NOT assume anything about the contents of the
1057 * buffer_heads - they are passed back only so that it can be passed
1058 * into any one of the manipulation functions (add entry, delete
1059 * entry, etc). As an example, bh in the extent directory case is a
1060 * data block, in the inline-data case it actually points to an inode,
1061 * in the indexed directory case, multiple buffers are involved.
1062 */
1063int ocfs2_find_entry(const char *name, int namelen,
1064		     struct inode *dir, struct ocfs2_dir_lookup_result *lookup)
1065{
1066	struct buffer_head *bh;
1067	struct ocfs2_dir_entry *res_dir = NULL;
1068	int ret = 0;
1069
1070	if (ocfs2_dir_indexed(dir))
1071		return ocfs2_find_entry_dx(name, namelen, dir, lookup);
1072
1073	if (unlikely(i_size_read(dir) <= 0)) {
1074		ret = -EFSCORRUPTED;
1075		mlog_errno(ret);
1076		goto out;
1077	}
1078	/*
1079	 * The unindexed dir code only uses part of the lookup
1080	 * structure, so there's no reason to push it down further
1081	 * than this.
1082	 */
1083	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1084		if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) {
1085			ret = -EFSCORRUPTED;
1086			mlog_errno(ret);
1087			goto out;
1088		}
1089		bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
1090	} else {
1091		bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
1092	}
1093
1094	if (bh == NULL)
1095		return -ENOENT;
1096
1097	lookup->dl_leaf_bh = bh;
1098	lookup->dl_entry = res_dir;
1099out:
1100	return ret;
1101}
1102
1103/*
1104 * Update inode number and type of a previously found directory entry.
1105 */
1106int ocfs2_update_entry(struct inode *dir, handle_t *handle,
1107		       struct ocfs2_dir_lookup_result *res,
1108		       struct inode *new_entry_inode)
1109{
1110	int ret;
1111	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1112	struct ocfs2_dir_entry *de = res->dl_entry;
1113	struct buffer_head *de_bh = res->dl_leaf_bh;
1114
1115	/*
1116	 * The same code works fine for both inline-data and extent
1117	 * based directories, so no need to split this up.  The only
1118	 * difference is the journal_access function.
1119	 */
1120
1121	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1122		access = ocfs2_journal_access_di;
1123
1124	ret = access(handle, INODE_CACHE(dir), de_bh,
1125		     OCFS2_JOURNAL_ACCESS_WRITE);
1126	if (ret) {
1127		mlog_errno(ret);
1128		goto out;
1129	}
1130
1131	de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
1132	ocfs2_set_de_type(de, new_entry_inode->i_mode);
1133
1134	ocfs2_journal_dirty(handle, de_bh);
1135
1136out:
1137	return ret;
1138}
1139
1140/*
1141 * __ocfs2_delete_entry deletes a directory entry by merging it with the
1142 * previous entry
1143 */
1144static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1145				struct ocfs2_dir_entry *de_del,
1146				struct buffer_head *bh, char *first_de,
1147				unsigned int bytes)
1148{
1149	struct ocfs2_dir_entry *de, *pde;
1150	int i, status = -ENOENT;
1151	ocfs2_journal_access_func access = ocfs2_journal_access_db;
1152
1153	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1154		access = ocfs2_journal_access_di;
1155
1156	i = 0;
1157	pde = NULL;
1158	de = (struct ocfs2_dir_entry *) first_de;
1159	while (i < bytes) {
1160		if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) {
1161			status = -EIO;
1162			mlog_errno(status);
1163			goto bail;
1164		}
1165		if (de == de_del)  {
1166			status = access(handle, INODE_CACHE(dir), bh,
1167					OCFS2_JOURNAL_ACCESS_WRITE);
1168			if (status < 0) {
1169				status = -EIO;
1170				mlog_errno(status);
1171				goto bail;
1172			}
1173			if (pde)
1174				le16_add_cpu(&pde->rec_len,
1175						le16_to_cpu(de->rec_len));
1176			de->inode = 0;
1177			inode_inc_iversion(dir);
1178			ocfs2_journal_dirty(handle, bh);
1179			goto bail;
1180		}
1181		i += le16_to_cpu(de->rec_len);
1182		pde = de;
1183		de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1184	}
1185bail:
1186	return status;
1187}
1188
1189static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de)
1190{
1191	unsigned int hole;
1192
1193	if (le64_to_cpu(de->inode) == 0)
1194		hole = le16_to_cpu(de->rec_len);
1195	else
1196		hole = le16_to_cpu(de->rec_len) -
1197			OCFS2_DIR_REC_LEN(de->name_len);
1198
1199	return hole;
1200}
1201
1202static int ocfs2_find_max_rec_len(struct super_block *sb,
1203				  struct buffer_head *dirblock_bh)
1204{
1205	int size, this_hole, largest_hole = 0;
1206	char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data;
1207	struct ocfs2_dir_entry *de;
1208
1209	trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb);
1210	size = ocfs2_dir_trailer_blk_off(sb);
1211	limit = start + size;
1212	de_buf = start;
1213	de = (struct ocfs2_dir_entry *)de_buf;
1214	do {
1215		if (de_buf != trailer) {
1216			this_hole = ocfs2_figure_dirent_hole(de);
1217			if (this_hole > largest_hole)
1218				largest_hole = this_hole;
1219		}
1220
1221		de_buf += le16_to_cpu(de->rec_len);
1222		de = (struct ocfs2_dir_entry *)de_buf;
1223	} while (de_buf < limit);
1224
1225	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
1226		return largest_hole;
1227	return 0;
1228}
1229
1230static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list,
1231				       int index)
1232{
1233	int num_used = le16_to_cpu(entry_list->de_num_used);
1234
1235	if (num_used == 1 || index == (num_used - 1))
1236		goto clear;
1237
1238	memmove(&entry_list->de_entries[index],
1239		&entry_list->de_entries[index + 1],
1240		(num_used - index - 1)*sizeof(struct ocfs2_dx_entry));
1241clear:
1242	num_used--;
1243	memset(&entry_list->de_entries[num_used], 0,
1244	       sizeof(struct ocfs2_dx_entry));
1245	entry_list->de_num_used = cpu_to_le16(num_used);
1246}
1247
1248static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1249				 struct ocfs2_dir_lookup_result *lookup)
1250{
1251	int ret, index, max_rec_len, add_to_free_list = 0;
1252	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1253	struct buffer_head *leaf_bh = lookup->dl_leaf_bh;
1254	struct ocfs2_dx_leaf *dx_leaf;
1255	struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry;
1256	struct ocfs2_dir_block_trailer *trailer;
1257	struct ocfs2_dx_root_block *dx_root;
1258	struct ocfs2_dx_entry_list *entry_list;
1259
1260	/*
1261	 * This function gets a bit messy because we might have to
1262	 * modify the root block, regardless of whether the indexed
1263	 * entries are stored inline.
1264	 */
1265
1266	/*
1267	 * *Only* set 'entry_list' here, based on where we're looking
1268	 * for the indexed entries. Later, we might still want to
1269	 * journal both blocks, based on free list state.
1270	 */
1271	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
1272	if (ocfs2_dx_root_inline(dx_root)) {
1273		entry_list = &dx_root->dr_entries;
1274	} else {
1275		dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data;
1276		entry_list = &dx_leaf->dl_list;
1277	}
1278
1279	/* Neither of these are a disk corruption - that should have
1280	 * been caught by lookup, before we got here. */
1281	BUG_ON(le16_to_cpu(entry_list->de_count) <= 0);
1282	BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0);
1283
1284	index = (char *)dx_entry - (char *)entry_list->de_entries;
1285	index /= sizeof(*dx_entry);
1286
1287	if (index >= le16_to_cpu(entry_list->de_num_used)) {
1288		mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n",
1289		     (unsigned long long)OCFS2_I(dir)->ip_blkno, index,
1290		     entry_list, dx_entry);
1291		return -EIO;
1292	}
1293
1294	/*
1295	 * We know that removal of this dirent will leave enough room
1296	 * for a new one, so add this block to the free list if it
1297	 * isn't already there.
1298	 */
1299	trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
1300	if (trailer->db_free_rec_len == 0)
1301		add_to_free_list = 1;
1302
1303	/*
1304	 * Add the block holding our index into the journal before
1305	 * removing the unindexed entry. If we get an error return
1306	 * from __ocfs2_delete_entry(), then it hasn't removed the
1307	 * entry yet. Likewise, successful return means we *must*
1308	 * remove the indexed entry.
1309	 *
1310	 * We're also careful to journal the root tree block here as
1311	 * the entry count needs to be updated. Also, we might be
1312	 * adding to the start of the free list.
1313	 */
1314	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1315				      OCFS2_JOURNAL_ACCESS_WRITE);
1316	if (ret) {
1317		mlog_errno(ret);
1318		goto out;
1319	}
1320
1321	if (!ocfs2_dx_root_inline(dx_root)) {
1322		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
1323					      lookup->dl_dx_leaf_bh,
1324					      OCFS2_JOURNAL_ACCESS_WRITE);
1325		if (ret) {
1326			mlog_errno(ret);
1327			goto out;
1328		}
1329	}
1330
1331	trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1332				    index);
1333
1334	ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1335				   leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
1336	if (ret) {
1337		mlog_errno(ret);
1338		goto out;
1339	}
1340
1341	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh);
1342	trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1343	if (add_to_free_list) {
1344		trailer->db_free_next = dx_root->dr_free_blk;
1345		dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr);
1346		ocfs2_journal_dirty(handle, dx_root_bh);
1347	}
1348
1349	/* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */
1350	ocfs2_journal_dirty(handle, leaf_bh);
1351
1352	le32_add_cpu(&dx_root->dr_num_entries, -1);
1353	ocfs2_journal_dirty(handle, dx_root_bh);
1354
1355	ocfs2_dx_list_remove_entry(entry_list, index);
1356
1357	if (!ocfs2_dx_root_inline(dx_root))
1358		ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh);
1359
1360out:
1361	return ret;
1362}
1363
1364static inline int ocfs2_delete_entry_id(handle_t *handle,
1365					struct inode *dir,
1366					struct ocfs2_dir_entry *de_del,
1367					struct buffer_head *bh)
1368{
1369	int ret;
1370	struct buffer_head *di_bh = NULL;
1371	struct ocfs2_dinode *di;
1372	struct ocfs2_inline_data *data;
1373
1374	ret = ocfs2_read_inode_block(dir, &di_bh);
1375	if (ret) {
1376		mlog_errno(ret);
1377		goto out;
1378	}
1379
1380	di = (struct ocfs2_dinode *)di_bh->b_data;
1381	data = &di->id2.i_data;
1382
1383	ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
1384				   i_size_read(dir));
1385
1386	brelse(di_bh);
1387out:
1388	return ret;
1389}
1390
1391static inline int ocfs2_delete_entry_el(handle_t *handle,
1392					struct inode *dir,
1393					struct ocfs2_dir_entry *de_del,
1394					struct buffer_head *bh)
1395{
1396	return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
1397				    bh->b_size);
1398}
1399
1400/*
1401 * Delete a directory entry. Hide the details of directory
1402 * implementation from the caller.
1403 */
1404int ocfs2_delete_entry(handle_t *handle,
1405		       struct inode *dir,
1406		       struct ocfs2_dir_lookup_result *res)
1407{
1408	if (ocfs2_dir_indexed(dir))
1409		return ocfs2_delete_entry_dx(handle, dir, res);
1410
1411	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1412		return ocfs2_delete_entry_id(handle, dir, res->dl_entry,
1413					     res->dl_leaf_bh);
1414
1415	return ocfs2_delete_entry_el(handle, dir, res->dl_entry,
1416				     res->dl_leaf_bh);
1417}
1418
1419/*
1420 * Check whether 'de' has enough room to hold an entry of
1421 * 'new_rec_len' bytes.
1422 */
1423static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
1424					 unsigned int new_rec_len)
1425{
1426	unsigned int de_really_used;
1427
1428	/* Check whether this is an empty record with enough space */
1429	if (le64_to_cpu(de->inode) == 0 &&
1430	    le16_to_cpu(de->rec_len) >= new_rec_len)
1431		return 1;
1432
1433	/*
1434	 * Record might have free space at the end which we can
1435	 * use.
1436	 */
1437	de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
1438	if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
1439	    return 1;
1440
1441	return 0;
1442}
1443
1444static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf,
1445					  struct ocfs2_dx_entry *dx_new_entry)
1446{
1447	int i;
1448
1449	i = le16_to_cpu(dx_leaf->dl_list.de_num_used);
1450	dx_leaf->dl_list.de_entries[i] = *dx_new_entry;
1451
1452	le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1);
1453}
1454
1455static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list,
1456				       struct ocfs2_dx_hinfo *hinfo,
1457				       u64 dirent_blk)
1458{
1459	int i;
1460	struct ocfs2_dx_entry *dx_entry;
1461
1462	i = le16_to_cpu(entry_list->de_num_used);
1463	dx_entry = &entry_list->de_entries[i];
1464
1465	memset(dx_entry, 0, sizeof(*dx_entry));
1466	dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash);
1467	dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash);
1468	dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk);
1469
1470	le16_add_cpu(&entry_list->de_num_used, 1);
1471}
1472
1473static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
1474				      struct ocfs2_dx_hinfo *hinfo,
1475				      u64 dirent_blk,
1476				      struct buffer_head *dx_leaf_bh)
1477{
1478	int ret;
1479	struct ocfs2_dx_leaf *dx_leaf;
1480
1481	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
1482				      OCFS2_JOURNAL_ACCESS_WRITE);
1483	if (ret) {
1484		mlog_errno(ret);
1485		goto out;
1486	}
1487
1488	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
1489	ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk);
1490	ocfs2_journal_dirty(handle, dx_leaf_bh);
1491
1492out:
1493	return ret;
1494}
1495
1496static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle,
1497					struct ocfs2_dx_hinfo *hinfo,
1498					u64 dirent_blk,
1499					struct ocfs2_dx_root_block *dx_root)
1500{
1501	ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk);
1502}
1503
1504static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
1505			       struct ocfs2_dir_lookup_result *lookup)
1506{
1507	int ret = 0;
1508	struct ocfs2_dx_root_block *dx_root;
1509	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
1510
1511	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
1512				      OCFS2_JOURNAL_ACCESS_WRITE);
1513	if (ret) {
1514		mlog_errno(ret);
1515		goto out;
1516	}
1517
1518	dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data;
1519	if (ocfs2_dx_root_inline(dx_root)) {
1520		ocfs2_dx_inline_root_insert(dir, handle,
1521					    &lookup->dl_hinfo,
1522					    lookup->dl_leaf_bh->b_blocknr,
1523					    dx_root);
1524	} else {
1525		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo,
1526						 lookup->dl_leaf_bh->b_blocknr,
1527						 lookup->dl_dx_leaf_bh);
1528		if (ret)
1529			goto out;
1530	}
1531
1532	le32_add_cpu(&dx_root->dr_num_entries, 1);
1533	ocfs2_journal_dirty(handle, dx_root_bh);
1534
1535out:
1536	return ret;
1537}
1538
1539static void ocfs2_remove_block_from_free_list(struct inode *dir,
1540				       handle_t *handle,
1541				       struct ocfs2_dir_lookup_result *lookup)
1542{
1543	struct ocfs2_dir_block_trailer *trailer, *prev;
1544	struct ocfs2_dx_root_block *dx_root;
1545	struct buffer_head *bh;
1546
1547	trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1548
1549	if (ocfs2_free_list_at_root(lookup)) {
1550		bh = lookup->dl_dx_root_bh;
1551		dx_root = (struct ocfs2_dx_root_block *)bh->b_data;
1552		dx_root->dr_free_blk = trailer->db_free_next;
1553	} else {
1554		bh = lookup->dl_prev_leaf_bh;
1555		prev = ocfs2_trailer_from_bh(bh, dir->i_sb);
1556		prev->db_free_next = trailer->db_free_next;
1557	}
1558
1559	trailer->db_free_rec_len = cpu_to_le16(0);
1560	trailer->db_free_next = cpu_to_le64(0);
1561
1562	ocfs2_journal_dirty(handle, bh);
1563	ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1564}
1565
1566/*
1567 * This expects that a journal write has been reserved on
1568 * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh
1569 */
1570static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle,
1571				   struct ocfs2_dir_lookup_result *lookup)
1572{
1573	int max_rec_len;
1574	struct ocfs2_dir_block_trailer *trailer;
1575
1576	/* Walk dl_leaf_bh to figure out what the new free rec_len is. */
1577	max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh);
1578	if (max_rec_len) {
1579		/*
1580		 * There's still room in this block, so no need to remove it
1581		 * from the free list. In this case, we just want to update
1582		 * the rec len accounting.
1583		 */
1584		trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb);
1585		trailer->db_free_rec_len = cpu_to_le16(max_rec_len);
1586		ocfs2_journal_dirty(handle, lookup->dl_leaf_bh);
1587	} else {
1588		ocfs2_remove_block_from_free_list(dir, handle, lookup);
1589	}
1590}
1591
1592/* we don't always have a dentry for what we want to add, so people
1593 * like orphan dir can call this instead.
1594 *
1595 * The lookup context must have been filled from
1596 * ocfs2_prepare_dir_for_insert.
1597 */
1598int __ocfs2_add_entry(handle_t *handle,
1599		      struct inode *dir,
1600		      const char *name, int namelen,
1601		      struct inode *inode, u64 blkno,
1602		      struct buffer_head *parent_fe_bh,
1603		      struct ocfs2_dir_lookup_result *lookup)
1604{
1605	unsigned long offset;
1606	unsigned short rec_len;
1607	struct ocfs2_dir_entry *de, *de1;
1608	struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
1609	struct super_block *sb = dir->i_sb;
1610	int retval;
1611	unsigned int size = sb->s_blocksize;
1612	struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1613	char *data_start = insert_bh->b_data;
1614
 
 
 
1615	if (ocfs2_dir_indexed(dir)) {
1616		struct buffer_head *bh;
1617
1618		/*
1619		 * An indexed dir may require that we update the free space
1620		 * list. Reserve a write to the previous node in the list so
1621		 * that we don't fail later.
1622		 *
1623		 * XXX: This can be either a dx_root_block, or an unindexed
1624		 * directory tree leaf block.
1625		 */
1626		if (ocfs2_free_list_at_root(lookup)) {
1627			bh = lookup->dl_dx_root_bh;
1628			retval = ocfs2_journal_access_dr(handle,
1629						 INODE_CACHE(dir), bh,
1630						 OCFS2_JOURNAL_ACCESS_WRITE);
1631		} else {
1632			bh = lookup->dl_prev_leaf_bh;
1633			retval = ocfs2_journal_access_db(handle,
1634						 INODE_CACHE(dir), bh,
1635						 OCFS2_JOURNAL_ACCESS_WRITE);
1636		}
1637		if (retval) {
1638			mlog_errno(retval);
1639			return retval;
1640		}
1641	} else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1642		data_start = di->id2.i_data.id_data;
1643		size = i_size_read(dir);
1644
1645		BUG_ON(insert_bh != parent_fe_bh);
1646	}
1647
1648	rec_len = OCFS2_DIR_REC_LEN(namelen);
1649	offset = 0;
1650	de = (struct ocfs2_dir_entry *) data_start;
1651	while (1) {
1652		BUG_ON((char *)de >= (size + data_start));
1653
1654		/* These checks should've already been passed by the
1655		 * prepare function, but I guess we can leave them
1656		 * here anyway. */
1657		if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start,
1658					   size, offset)) {
1659			retval = -ENOENT;
1660			goto bail;
1661		}
1662		if (ocfs2_match(namelen, name, de)) {
1663			retval = -EEXIST;
1664			goto bail;
1665		}
1666
1667		/* We're guaranteed that we should have space, so we
1668		 * can't possibly have hit the trailer...right? */
1669		mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size),
1670				"Hit dir trailer trying to insert %.*s "
1671			        "(namelen %d) into directory %llu.  "
1672				"offset is %lu, trailer offset is %d\n",
1673				namelen, name, namelen,
1674				(unsigned long long)parent_fe_bh->b_blocknr,
1675				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
1676
1677		if (ocfs2_dirent_would_fit(de, rec_len)) {
1678			inode_set_mtime_to_ts(dir,
1679					      inode_set_ctime_current(dir));
1680			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
1681			if (retval < 0) {
1682				mlog_errno(retval);
1683				goto bail;
1684			}
1685
1686			if (insert_bh == parent_fe_bh)
1687				retval = ocfs2_journal_access_di(handle,
1688								 INODE_CACHE(dir),
1689								 insert_bh,
1690								 OCFS2_JOURNAL_ACCESS_WRITE);
1691			else {
1692				retval = ocfs2_journal_access_db(handle,
1693								 INODE_CACHE(dir),
1694								 insert_bh,
1695					      OCFS2_JOURNAL_ACCESS_WRITE);
1696
1697				if (!retval && ocfs2_dir_indexed(dir))
1698					retval = ocfs2_dx_dir_insert(dir,
1699								handle,
1700								lookup);
1701			}
1702
1703			if (retval) {
1704				mlog_errno(retval);
1705				goto bail;
1706			}
1707
1708			/* By now the buffer is marked for journaling */
1709			offset += le16_to_cpu(de->rec_len);
1710			if (le64_to_cpu(de->inode)) {
1711				de1 = (struct ocfs2_dir_entry *)((char *) de +
1712					OCFS2_DIR_REC_LEN(de->name_len));
1713				de1->rec_len =
1714					cpu_to_le16(le16_to_cpu(de->rec_len) -
1715					OCFS2_DIR_REC_LEN(de->name_len));
1716				de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
1717				de = de1;
1718			}
1719			de->file_type = FT_UNKNOWN;
1720			if (blkno) {
1721				de->inode = cpu_to_le64(blkno);
1722				ocfs2_set_de_type(de, inode->i_mode);
1723			} else
1724				de->inode = 0;
1725			de->name_len = namelen;
1726			memcpy(de->name, name, namelen);
1727
1728			if (ocfs2_dir_indexed(dir))
1729				ocfs2_recalc_free_list(dir, handle, lookup);
1730
1731			inode_inc_iversion(dir);
1732			ocfs2_journal_dirty(handle, insert_bh);
1733			retval = 0;
1734			goto bail;
1735		}
1736
1737		offset += le16_to_cpu(de->rec_len);
1738		de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
1739	}
1740
1741	/* when you think about it, the assert above should prevent us
1742	 * from ever getting here. */
1743	retval = -ENOSPC;
1744bail:
1745	if (retval)
1746		mlog_errno(retval);
1747
1748	return retval;
1749}
1750
1751static int ocfs2_dir_foreach_blk_id(struct inode *inode,
1752				    u64 *f_version,
1753				    struct dir_context *ctx)
1754{
1755	int ret, i;
1756	unsigned long offset = ctx->pos;
1757	struct buffer_head *di_bh = NULL;
1758	struct ocfs2_dinode *di;
1759	struct ocfs2_inline_data *data;
1760	struct ocfs2_dir_entry *de;
1761
1762	ret = ocfs2_read_inode_block(inode, &di_bh);
1763	if (ret) {
1764		mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
1765		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1766		goto out;
1767	}
1768
1769	di = (struct ocfs2_dinode *)di_bh->b_data;
1770	data = &di->id2.i_data;
1771
1772	while (ctx->pos < i_size_read(inode)) {
1773		/* If the dir block has changed since the last call to
1774		 * readdir(2), then we might be pointing to an invalid
1775		 * dirent right now.  Scan from the start of the block
1776		 * to make sure. */
1777		if (!inode_eq_iversion(inode, *f_version)) {
1778			for (i = 0; i < i_size_read(inode) && i < offset; ) {
1779				de = (struct ocfs2_dir_entry *)
1780					(data->id_data + i);
1781				/* It's too expensive to do a full
1782				 * dirent test each time round this
1783				 * loop, but we do have to test at
1784				 * least that it is non-zero.  A
1785				 * failure will be detected in the
1786				 * dirent test below. */
1787				if (le16_to_cpu(de->rec_len) <
1788				    OCFS2_DIR_REC_LEN(1))
1789					break;
1790				i += le16_to_cpu(de->rec_len);
1791			}
1792			ctx->pos = offset = i;
1793			*f_version = inode_query_iversion(inode);
1794		}
1795
1796		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
1797		if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data,
1798					   i_size_read(inode), ctx->pos)) {
1799			/* On error, skip the f_pos to the end. */
1800			ctx->pos = i_size_read(inode);
1801			break;
1802		}
1803		offset += le16_to_cpu(de->rec_len);
1804		if (le64_to_cpu(de->inode)) {
 
 
 
 
 
1805			if (!dir_emit(ctx, de->name, de->name_len,
1806				      le64_to_cpu(de->inode),
1807				      fs_ftype_to_dtype(de->file_type)))
1808				goto out;
1809		}
1810		ctx->pos += le16_to_cpu(de->rec_len);
1811	}
1812out:
1813	brelse(di_bh);
1814	return 0;
1815}
1816
1817/*
1818 * NOTE: This function can be called against unindexed directories,
1819 * and indexed ones.
1820 */
1821static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1822				    u64 *f_version,
1823				    struct dir_context *ctx,
1824				    bool persist)
1825{
1826	unsigned long offset, blk, last_ra_blk = 0;
1827	int i;
1828	struct buffer_head * bh, * tmp;
1829	struct ocfs2_dir_entry * de;
1830	struct super_block * sb = inode->i_sb;
1831	unsigned int ra_sectors = 16;
1832	int stored = 0;
1833
1834	bh = NULL;
1835
1836	offset = ctx->pos & (sb->s_blocksize - 1);
1837
1838	while (ctx->pos < i_size_read(inode)) {
1839		blk = ctx->pos >> sb->s_blocksize_bits;
1840		if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
1841			/* Skip the corrupt dirblock and keep trying */
1842			ctx->pos += sb->s_blocksize - offset;
1843			continue;
1844		}
1845
1846		/* The idea here is to begin with 8k read-ahead and to stay
1847		 * 4k ahead of our current position.
1848		 *
1849		 * TODO: Use the pagecache for this. We just need to
1850		 * make sure it's cluster-safe... */
1851		if (!last_ra_blk
1852		    || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) {
1853			for (i = ra_sectors >> (sb->s_blocksize_bits - 9);
1854			     i > 0; i--) {
1855				tmp = NULL;
1856				if (!ocfs2_read_dir_block(inode, ++blk, &tmp,
1857							  OCFS2_BH_READAHEAD))
1858					brelse(tmp);
1859			}
1860			last_ra_blk = blk;
1861			ra_sectors = 8;
1862		}
1863
1864		/* If the dir block has changed since the last call to
1865		 * readdir(2), then we might be pointing to an invalid
1866		 * dirent right now.  Scan from the start of the block
1867		 * to make sure. */
1868		if (!inode_eq_iversion(inode, *f_version)) {
1869			for (i = 0; i < sb->s_blocksize && i < offset; ) {
1870				de = (struct ocfs2_dir_entry *) (bh->b_data + i);
1871				/* It's too expensive to do a full
1872				 * dirent test each time round this
1873				 * loop, but we do have to test at
1874				 * least that it is non-zero.  A
1875				 * failure will be detected in the
1876				 * dirent test below. */
1877				if (le16_to_cpu(de->rec_len) <
1878				    OCFS2_DIR_REC_LEN(1))
1879					break;
1880				i += le16_to_cpu(de->rec_len);
1881			}
1882			offset = i;
1883			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
1884				| offset;
1885			*f_version = inode_query_iversion(inode);
1886		}
1887
1888		while (ctx->pos < i_size_read(inode)
1889		       && offset < sb->s_blocksize) {
1890			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
1891			if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data,
1892						   sb->s_blocksize, offset)) {
1893				/* On error, skip the f_pos to the
1894				   next block. */
1895				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1896				break;
 
1897			}
1898			if (le64_to_cpu(de->inode)) {
 
 
 
 
1899				if (!dir_emit(ctx, de->name,
1900						de->name_len,
1901						le64_to_cpu(de->inode),
1902					fs_ftype_to_dtype(de->file_type))) {
1903					brelse(bh);
1904					return 0;
1905				}
1906				stored++;
1907			}
1908			offset += le16_to_cpu(de->rec_len);
1909			ctx->pos += le16_to_cpu(de->rec_len);
1910		}
1911		offset = 0;
1912		brelse(bh);
1913		bh = NULL;
1914		if (!persist && stored)
1915			break;
1916	}
1917	return 0;
1918}
1919
1920static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
1921				 struct dir_context *ctx,
1922				 bool persist)
1923{
1924	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1925		return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
1926	return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
1927}
1928
1929/*
1930 * This is intended to be called from inside other kernel functions,
1931 * so we fake some arguments.
1932 */
1933int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
1934{
1935	u64 version = inode_query_iversion(inode);
1936	ocfs2_dir_foreach_blk(inode, &version, ctx, true);
1937	return 0;
1938}
1939
1940/*
1941 * ocfs2_readdir()
1942 *
1943 */
1944int ocfs2_readdir(struct file *file, struct dir_context *ctx)
1945{
1946	int error = 0;
1947	struct inode *inode = file_inode(file);
1948	struct ocfs2_file_private *fp = file->private_data;
1949	int lock_level = 0;
1950
1951	trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
1952
1953	error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level, 1);
1954	if (lock_level && error >= 0) {
1955		/* We release EX lock which used to update atime
1956		 * and get PR lock again to reduce contention
1957		 * on commonly accessed directories. */
1958		ocfs2_inode_unlock(inode, 1);
1959		lock_level = 0;
1960		error = ocfs2_inode_lock(inode, NULL, 0);
1961	}
1962	if (error < 0) {
1963		if (error != -ENOENT)
1964			mlog_errno(error);
1965		/* we haven't got any yet, so propagate the error. */
1966		goto bail_nolock;
1967	}
1968
1969	error = ocfs2_dir_foreach_blk(inode, &fp->cookie, ctx, false);
1970
1971	ocfs2_inode_unlock(inode, lock_level);
1972	if (error)
1973		mlog_errno(error);
1974
1975bail_nolock:
1976
1977	return error;
1978}
1979
1980/*
1981 * NOTE: this should always be called with parent dir i_rwsem taken.
1982 */
1983int ocfs2_find_files_on_disk(const char *name,
1984			     int namelen,
1985			     u64 *blkno,
1986			     struct inode *inode,
1987			     struct ocfs2_dir_lookup_result *lookup)
1988{
1989	int status = -ENOENT;
1990
1991	trace_ocfs2_find_files_on_disk(namelen, name, blkno,
1992				(unsigned long long)OCFS2_I(inode)->ip_blkno);
1993
1994	status = ocfs2_find_entry(name, namelen, inode, lookup);
1995	if (status)
1996		goto leave;
1997
1998	*blkno = le64_to_cpu(lookup->dl_entry->inode);
1999
2000	status = 0;
2001leave:
2002
2003	return status;
2004}
2005
2006/*
2007 * Convenience function for callers which just want the block number
2008 * mapped to a name and don't require the full dirent info, etc.
2009 */
2010int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
2011			       int namelen, u64 *blkno)
2012{
2013	int ret;
2014	struct ocfs2_dir_lookup_result lookup = { NULL, };
2015
2016	ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup);
2017	ocfs2_free_dir_lookup_result(&lookup);
2018
2019	return ret;
2020}
2021
2022/* Check for a name within a directory.
2023 *
2024 * Return 0 if the name does not exist
2025 * Return -EEXIST if the directory contains the name
2026 * Return -EFSCORRUPTED if found corruption
2027 *
2028 * Callers should have i_rwsem + a cluster lock on dir
2029 */
2030int ocfs2_check_dir_for_entry(struct inode *dir,
2031			      const char *name,
2032			      int namelen)
2033{
2034	int ret = 0;
2035	struct ocfs2_dir_lookup_result lookup = { NULL, };
2036
2037	trace_ocfs2_check_dir_for_entry(
2038		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2039
2040	ret = ocfs2_find_entry(name, namelen, dir, &lookup);
2041	if (ret == 0) {
2042		ret = -EEXIST;
2043		mlog_errno(ret);
2044	} else if (ret == -ENOENT) {
2045		ret = 0;
2046	}
2047
2048	ocfs2_free_dir_lookup_result(&lookup);
2049
2050	return ret;
2051}
2052
2053struct ocfs2_empty_dir_priv {
2054	struct dir_context ctx;
2055	unsigned seen_dot;
2056	unsigned seen_dot_dot;
2057	unsigned seen_other;
2058	unsigned dx_dir;
2059};
2060static bool ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name,
2061				   int name_len, loff_t pos, u64 ino,
2062				   unsigned type)
2063{
2064	struct ocfs2_empty_dir_priv *p =
2065		container_of(ctx, struct ocfs2_empty_dir_priv, ctx);
2066
2067	/*
2068	 * Check the positions of "." and ".." records to be sure
2069	 * they're in the correct place.
2070	 *
2071	 * Indexed directories don't need to proceed past the first
2072	 * two entries, so we end the scan after seeing '..'. Despite
2073	 * that, we allow the scan to proceed In the event that we
2074	 * have a corrupted indexed directory (no dot or dot dot
2075	 * entries). This allows us to double check for existing
2076	 * entries which might not have been found in the index.
2077	 */
2078	if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
2079		p->seen_dot = 1;
2080		return true;
2081	}
2082
2083	if (name_len == 2 && !strncmp("..", name, 2) &&
2084	    pos == OCFS2_DIR_REC_LEN(1)) {
2085		p->seen_dot_dot = 1;
2086
2087		if (p->dx_dir && p->seen_dot)
2088			return false;
2089
2090		return true;
2091	}
2092
2093	p->seen_other = 1;
2094	return false;
2095}
2096
2097static int ocfs2_empty_dir_dx(struct inode *inode,
2098			      struct ocfs2_empty_dir_priv *priv)
2099{
2100	int ret;
2101	struct buffer_head *di_bh = NULL;
2102	struct buffer_head *dx_root_bh = NULL;
2103	struct ocfs2_dinode *di;
2104	struct ocfs2_dx_root_block *dx_root;
2105
2106	priv->dx_dir = 1;
2107
2108	ret = ocfs2_read_inode_block(inode, &di_bh);
2109	if (ret) {
2110		mlog_errno(ret);
2111		goto out;
2112	}
2113	di = (struct ocfs2_dinode *)di_bh->b_data;
2114
2115	ret = ocfs2_read_dx_root(inode, di, &dx_root_bh);
2116	if (ret) {
2117		mlog_errno(ret);
2118		goto out;
2119	}
2120	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2121
2122	if (le32_to_cpu(dx_root->dr_num_entries) != 2)
2123		priv->seen_other = 1;
2124
2125out:
2126	brelse(di_bh);
2127	brelse(dx_root_bh);
2128	return ret;
2129}
2130
2131/*
2132 * routine to check that the specified directory is empty (for rmdir)
2133 *
2134 * Returns 1 if dir is empty, zero otherwise.
2135 *
2136 * XXX: This is a performance problem for unindexed directories.
2137 */
2138int ocfs2_empty_dir(struct inode *inode)
2139{
2140	int ret;
2141	struct ocfs2_empty_dir_priv priv = {
2142		.ctx.actor = ocfs2_empty_dir_filldir,
2143	};
2144
2145	if (ocfs2_dir_indexed(inode)) {
2146		ret = ocfs2_empty_dir_dx(inode, &priv);
2147		if (ret)
2148			mlog_errno(ret);
2149		/*
2150		 * We still run ocfs2_dir_foreach to get the checks
2151		 * for "." and "..".
2152		 */
2153	}
2154
2155	ret = ocfs2_dir_foreach(inode, &priv.ctx);
2156	if (ret)
2157		mlog_errno(ret);
2158
2159	if (!priv.seen_dot || !priv.seen_dot_dot) {
2160		mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
2161		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
2162		/*
2163		 * XXX: Is it really safe to allow an unlink to continue?
2164		 */
2165		return 1;
2166	}
2167
2168	return !priv.seen_other;
2169}
2170
2171/*
2172 * Fills "." and ".." dirents in a new directory block. Returns dirent for
2173 * "..", which might be used during creation of a directory with a trailing
2174 * header. It is otherwise safe to ignore the return code.
2175 */
2176static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
2177							  struct inode *parent,
2178							  char *start,
2179							  unsigned int size)
2180{
2181	struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
2182
2183	de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
2184	de->name_len = 1;
2185	de->rec_len =
2186		cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
2187	strcpy(de->name, ".");
2188	ocfs2_set_de_type(de, S_IFDIR);
2189
2190	de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
2191	de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
2192	de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
2193	de->name_len = 2;
2194	strcpy(de->name, "..");
2195	ocfs2_set_de_type(de, S_IFDIR);
2196
2197	return de;
2198}
2199
2200/*
2201 * This works together with code in ocfs2_mknod_locked() which sets
2202 * the inline-data flag and initializes the inline-data section.
2203 */
2204static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
2205				 handle_t *handle,
2206				 struct inode *parent,
2207				 struct inode *inode,
2208				 struct buffer_head *di_bh)
2209{
2210	int ret;
2211	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2212	struct ocfs2_inline_data *data = &di->id2.i_data;
2213	unsigned int size = le16_to_cpu(data->id_count);
2214
2215	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2216				      OCFS2_JOURNAL_ACCESS_WRITE);
2217	if (ret) {
2218		mlog_errno(ret);
2219		goto out;
2220	}
2221
2222	ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
2223	ocfs2_journal_dirty(handle, di_bh);
2224
2225	i_size_write(inode, size);
2226	set_nlink(inode, 2);
2227	inode->i_blocks = ocfs2_inode_sector_count(inode);
2228
2229	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2230	if (ret < 0)
2231		mlog_errno(ret);
2232
2233out:
2234	return ret;
2235}
2236
2237static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2238				 handle_t *handle,
2239				 struct inode *parent,
2240				 struct inode *inode,
2241				 struct buffer_head *fe_bh,
2242				 struct ocfs2_alloc_context *data_ac,
2243				 struct buffer_head **ret_new_bh)
2244{
2245	int status;
2246	unsigned int size = osb->sb->s_blocksize;
2247	struct buffer_head *new_bh = NULL;
2248	struct ocfs2_dir_entry *de;
2249
2250	if (ocfs2_new_dir_wants_trailer(inode))
2251		size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2252
2253	status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
2254				     data_ac, NULL, &new_bh);
2255	if (status < 0) {
2256		mlog_errno(status);
2257		goto bail;
2258	}
2259
2260	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2261
2262	status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
2263					 OCFS2_JOURNAL_ACCESS_CREATE);
2264	if (status < 0) {
2265		mlog_errno(status);
2266		goto bail;
2267	}
2268	memset(new_bh->b_data, 0, osb->sb->s_blocksize);
2269
2270	de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size);
2271	if (ocfs2_new_dir_wants_trailer(inode)) {
2272		int size = le16_to_cpu(de->rec_len);
2273
2274		/*
2275		 * Figure out the size of the hole left over after
2276		 * insertion of '.' and '..'. The trailer wants this
2277		 * information.
2278		 */
2279		size -= OCFS2_DIR_REC_LEN(2);
2280		size -= sizeof(struct ocfs2_dir_block_trailer);
2281
2282		ocfs2_init_dir_trailer(inode, new_bh, size);
2283	}
2284
2285	ocfs2_journal_dirty(handle, new_bh);
2286
2287	i_size_write(inode, inode->i_sb->s_blocksize);
2288	set_nlink(inode, 2);
2289	inode->i_blocks = ocfs2_inode_sector_count(inode);
2290	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
2291	if (status < 0) {
2292		mlog_errno(status);
2293		goto bail;
2294	}
2295
2296	status = 0;
2297	if (ret_new_bh) {
2298		*ret_new_bh = new_bh;
2299		new_bh = NULL;
2300	}
2301bail:
2302	brelse(new_bh);
2303
2304	return status;
2305}
2306
2307static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2308				     handle_t *handle, struct inode *dir,
2309				     struct buffer_head *di_bh,
2310				     struct buffer_head *dirdata_bh,
2311				     struct ocfs2_alloc_context *meta_ac,
2312				     int dx_inline, u32 num_entries,
2313				     struct buffer_head **ret_dx_root_bh)
2314{
2315	int ret;
2316	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2317	u16 dr_suballoc_bit;
2318	u64 suballoc_loc, dr_blkno;
2319	unsigned int num_bits;
2320	struct buffer_head *dx_root_bh = NULL;
2321	struct ocfs2_dx_root_block *dx_root;
2322	struct ocfs2_dir_block_trailer *trailer =
2323		ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb);
2324
2325	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
2326				   &dr_suballoc_bit, &num_bits, &dr_blkno);
2327	if (ret) {
2328		mlog_errno(ret);
2329		goto out;
2330	}
2331
2332	trace_ocfs2_dx_dir_attach_index(
2333				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2334				(unsigned long long)dr_blkno);
2335
2336	dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2337	if (dx_root_bh == NULL) {
2338		ret = -ENOMEM;
2339		goto out;
2340	}
2341	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
2342
2343	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
2344				      OCFS2_JOURNAL_ACCESS_CREATE);
2345	if (ret < 0) {
2346		mlog_errno(ret);
2347		goto out;
2348	}
2349
2350	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2351	memset(dx_root, 0, osb->sb->s_blocksize);
2352	strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
2353	dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
2354	dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
2355	dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
2356	dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
2357	dx_root->dr_blkno = cpu_to_le64(dr_blkno);
2358	dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno);
2359	dx_root->dr_num_entries = cpu_to_le32(num_entries);
2360	if (le16_to_cpu(trailer->db_free_rec_len))
2361		dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr);
2362	else
2363		dx_root->dr_free_blk = cpu_to_le64(0);
2364
2365	if (dx_inline) {
2366		dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE;
2367		dx_root->dr_entries.de_count =
2368			cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb));
2369	} else {
2370		dx_root->dr_list.l_count =
2371			cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
2372	}
2373	ocfs2_journal_dirty(handle, dx_root_bh);
2374
2375	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2376				      OCFS2_JOURNAL_ACCESS_CREATE);
2377	if (ret) {
2378		mlog_errno(ret);
2379		goto out;
2380	}
2381
2382	di->i_dx_root = cpu_to_le64(dr_blkno);
2383
2384	spin_lock(&OCFS2_I(dir)->ip_lock);
2385	OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2386	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2387	spin_unlock(&OCFS2_I(dir)->ip_lock);
2388
2389	ocfs2_journal_dirty(handle, di_bh);
2390
2391	*ret_dx_root_bh = dx_root_bh;
2392	dx_root_bh = NULL;
2393
2394out:
2395	brelse(dx_root_bh);
2396	return ret;
2397}
2398
2399static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2400				       handle_t *handle, struct inode *dir,
2401				       struct buffer_head **dx_leaves,
2402				       int num_dx_leaves, u64 start_blk)
2403{
2404	int ret, i;
2405	struct ocfs2_dx_leaf *dx_leaf;
2406	struct buffer_head *bh;
2407
2408	for (i = 0; i < num_dx_leaves; i++) {
2409		bh = sb_getblk(osb->sb, start_blk + i);
2410		if (bh == NULL) {
2411			ret = -ENOMEM;
2412			goto out;
2413		}
2414		dx_leaves[i] = bh;
2415
2416		ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
2417
2418		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
2419					      OCFS2_JOURNAL_ACCESS_CREATE);
2420		if (ret < 0) {
2421			mlog_errno(ret);
2422			goto out;
2423		}
2424
2425		dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
2426
2427		memset(dx_leaf, 0, osb->sb->s_blocksize);
2428		strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
2429		dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
2430		dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
2431		dx_leaf->dl_list.de_count =
2432			cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2433
2434		trace_ocfs2_dx_dir_format_cluster(
2435				(unsigned long long)OCFS2_I(dir)->ip_blkno,
2436				(unsigned long long)bh->b_blocknr,
2437				le16_to_cpu(dx_leaf->dl_list.de_count));
2438
2439		ocfs2_journal_dirty(handle, bh);
2440	}
2441
2442	ret = 0;
2443out:
2444	return ret;
2445}
2446
2447/*
2448 * Allocates and formats a new cluster for use in an indexed dir
2449 * leaf. This version will not do the extent insert, so that it can be
2450 * used by operations which need careful ordering.
2451 */
2452static int __ocfs2_dx_dir_new_cluster(struct inode *dir,
2453				      u32 cpos, handle_t *handle,
2454				      struct ocfs2_alloc_context *data_ac,
2455				      struct buffer_head **dx_leaves,
2456				      int num_dx_leaves, u64 *ret_phys_blkno)
2457{
2458	int ret;
2459	u32 phys, num;
2460	u64 phys_blkno;
2461	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2462
2463	/*
2464	 * XXX: For create, this should claim cluster for the index
2465	 * *before* the unindexed insert so that we have a better
2466	 * chance of contiguousness as the directory grows in number
2467	 * of entries.
2468	 */
2469	ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num);
2470	if (ret) {
2471		mlog_errno(ret);
2472		goto out;
2473	}
2474
2475	/*
2476	 * Format the new cluster first. That way, we're inserting
2477	 * valid data.
2478	 */
2479	phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys);
2480	ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves,
2481					  num_dx_leaves, phys_blkno);
2482	if (ret) {
2483		mlog_errno(ret);
2484		goto out;
2485	}
2486
2487	*ret_phys_blkno = phys_blkno;
2488out:
2489	return ret;
2490}
2491
2492static int ocfs2_dx_dir_new_cluster(struct inode *dir,
2493				    struct ocfs2_extent_tree *et,
2494				    u32 cpos, handle_t *handle,
2495				    struct ocfs2_alloc_context *data_ac,
2496				    struct ocfs2_alloc_context *meta_ac,
2497				    struct buffer_head **dx_leaves,
2498				    int num_dx_leaves)
2499{
2500	int ret;
2501	u64 phys_blkno;
2502
2503	ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
2504					 num_dx_leaves, &phys_blkno);
2505	if (ret) {
2506		mlog_errno(ret);
2507		goto out;
2508	}
2509
2510	ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
2511				  meta_ac);
2512	if (ret)
2513		mlog_errno(ret);
2514out:
2515	return ret;
2516}
2517
2518static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb,
2519							int *ret_num_leaves)
2520{
2521	int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1);
2522	struct buffer_head **dx_leaves;
2523
2524	dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *),
2525			    GFP_NOFS);
2526	if (dx_leaves && ret_num_leaves)
2527		*ret_num_leaves = num_dx_leaves;
2528
2529	return dx_leaves;
2530}
2531
2532static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb,
2533				 handle_t *handle,
2534				 struct inode *parent,
2535				 struct inode *inode,
2536				 struct buffer_head *di_bh,
2537				 struct ocfs2_alloc_context *data_ac,
2538				 struct ocfs2_alloc_context *meta_ac)
2539{
2540	int ret;
2541	struct buffer_head *leaf_bh = NULL;
2542	struct buffer_head *dx_root_bh = NULL;
2543	struct ocfs2_dx_hinfo hinfo;
2544	struct ocfs2_dx_root_block *dx_root;
2545	struct ocfs2_dx_entry_list *entry_list;
2546
2547	/*
2548	 * Our strategy is to create the directory as though it were
2549	 * unindexed, then add the index block. This works with very
2550	 * little complication since the state of a new directory is a
2551	 * very well known quantity.
2552	 *
2553	 * Essentially, we have two dirents ("." and ".."), in the 1st
2554	 * block which need indexing. These are easily inserted into
2555	 * the index block.
2556	 */
2557
2558	ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh,
2559				    data_ac, &leaf_bh);
2560	if (ret) {
2561		mlog_errno(ret);
2562		goto out;
2563	}
2564
2565	ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh,
2566					meta_ac, 1, 2, &dx_root_bh);
2567	if (ret) {
2568		mlog_errno(ret);
2569		goto out;
2570	}
2571	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2572	entry_list = &dx_root->dr_entries;
2573
2574	/* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */
2575	ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo);
2576	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2577
2578	ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo);
2579	ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr);
2580
2581out:
2582	brelse(dx_root_bh);
2583	brelse(leaf_bh);
2584	return ret;
2585}
2586
2587int ocfs2_fill_new_dir(struct ocfs2_super *osb,
2588		       handle_t *handle,
2589		       struct inode *parent,
2590		       struct inode *inode,
2591		       struct buffer_head *fe_bh,
2592		       struct ocfs2_alloc_context *data_ac,
2593		       struct ocfs2_alloc_context *meta_ac)
2594
2595{
2596	BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
2597
2598	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2599		return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
2600
2601	if (ocfs2_supports_indexed_dirs(osb))
2602		return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh,
2603					     data_ac, meta_ac);
2604
2605	return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
2606				     data_ac, NULL);
2607}
2608
2609static int ocfs2_dx_dir_index_block(struct inode *dir,
2610				    handle_t *handle,
2611				    struct buffer_head **dx_leaves,
2612				    int num_dx_leaves,
2613				    u32 *num_dx_entries,
2614				    struct buffer_head *dirent_bh)
2615{
2616	int ret = 0, namelen, i;
2617	char *de_buf, *limit;
2618	struct ocfs2_dir_entry *de;
2619	struct buffer_head *dx_leaf_bh;
2620	struct ocfs2_dx_hinfo hinfo;
2621	u64 dirent_blk = dirent_bh->b_blocknr;
2622
2623	de_buf = dirent_bh->b_data;
2624	limit = de_buf + dir->i_sb->s_blocksize;
2625
2626	while (de_buf < limit) {
2627		de = (struct ocfs2_dir_entry *)de_buf;
2628
2629		namelen = de->name_len;
2630		if (!namelen || !de->inode)
2631			goto inc;
2632
2633		ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo);
2634
2635		i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo);
2636		dx_leaf_bh = dx_leaves[i];
2637
2638		ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo,
2639						 dirent_blk, dx_leaf_bh);
2640		if (ret) {
2641			mlog_errno(ret);
2642			goto out;
2643		}
2644
2645		*num_dx_entries = *num_dx_entries + 1;
2646
2647inc:
2648		de_buf += le16_to_cpu(de->rec_len);
2649	}
2650
2651out:
2652	return ret;
2653}
2654
2655/*
2656 * XXX: This expects dx_root_bh to already be part of the transaction.
2657 */
2658static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2659					 struct buffer_head *dx_root_bh,
2660					 struct buffer_head *dirent_bh)
2661{
2662	char *de_buf, *limit;
2663	struct ocfs2_dx_root_block *dx_root;
2664	struct ocfs2_dir_entry *de;
2665	struct ocfs2_dx_hinfo hinfo;
2666	u64 dirent_blk = dirent_bh->b_blocknr;
2667
2668	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
2669
2670	de_buf = dirent_bh->b_data;
2671	limit = de_buf + dir->i_sb->s_blocksize;
2672
2673	while (de_buf < limit) {
2674		de = (struct ocfs2_dir_entry *)de_buf;
2675
2676		if (!de->name_len || !de->inode)
2677			goto inc;
2678
2679		ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2680
2681		trace_ocfs2_dx_dir_index_root_block(
2682				(unsigned long long)dir->i_ino,
2683				hinfo.major_hash, hinfo.minor_hash,
2684				de->name_len, de->name,
2685				le16_to_cpu(dx_root->dr_entries.de_num_used));
2686
2687		ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2688					   dirent_blk);
2689
2690		le32_add_cpu(&dx_root->dr_num_entries, 1);
2691inc:
2692		de_buf += le16_to_cpu(de->rec_len);
2693	}
2694}
2695
2696/*
2697 * Count the number of inline directory entries in di_bh and compare
2698 * them against the number of entries we can hold in an inline dx root
2699 * block.
2700 */
2701static int ocfs2_new_dx_should_be_inline(struct inode *dir,
2702					 struct buffer_head *di_bh)
2703{
2704	int dirent_count = 0;
2705	char *de_buf, *limit;
2706	struct ocfs2_dir_entry *de;
2707	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2708
2709	de_buf = di->id2.i_data.id_data;
2710	limit = de_buf + i_size_read(dir);
2711
2712	while (de_buf < limit) {
2713		de = (struct ocfs2_dir_entry *)de_buf;
2714
2715		if (de->name_len && de->inode)
2716			dirent_count++;
2717
2718		de_buf += le16_to_cpu(de->rec_len);
2719	}
2720
2721	/* We are careful to leave room for one extra record. */
2722	return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb);
2723}
2724
2725/*
2726 * Expand rec_len of the rightmost dirent in a directory block so that it
2727 * contains the end of our valid space for dirents. We do this during
2728 * expansion from an inline directory to one with extents. The first dir block
2729 * in that case is taken from the inline data portion of the inode block.
2730 *
2731 * This will also return the largest amount of contiguous space for a dirent
2732 * in the block. That value is *not* necessarily the last dirent, even after
2733 * expansion. The directory indexing code wants this value for free space
2734 * accounting. We do this here since we're already walking the entire dir
2735 * block.
2736 *
2737 * We add the dir trailer if this filesystem wants it.
2738 */
2739static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size,
2740					     struct inode *dir)
2741{
2742	struct super_block *sb = dir->i_sb;
2743	struct ocfs2_dir_entry *de;
2744	struct ocfs2_dir_entry *prev_de;
2745	char *de_buf, *limit;
2746	unsigned int new_size = sb->s_blocksize;
2747	unsigned int bytes, this_hole;
2748	unsigned int largest_hole = 0;
2749
2750	if (ocfs2_new_dir_wants_trailer(dir))
2751		new_size = ocfs2_dir_trailer_blk_off(sb);
2752
2753	bytes = new_size - old_size;
2754
2755	limit = start + old_size;
2756	de_buf = start;
2757	de = (struct ocfs2_dir_entry *)de_buf;
2758	do {
2759		this_hole = ocfs2_figure_dirent_hole(de);
2760		if (this_hole > largest_hole)
2761			largest_hole = this_hole;
2762
2763		prev_de = de;
2764		de_buf += le16_to_cpu(de->rec_len);
2765		de = (struct ocfs2_dir_entry *)de_buf;
2766	} while (de_buf < limit);
2767
2768	le16_add_cpu(&prev_de->rec_len, bytes);
2769
2770	/* We need to double check this after modification of the final
2771	 * dirent. */
2772	this_hole = ocfs2_figure_dirent_hole(prev_de);
2773	if (this_hole > largest_hole)
2774		largest_hole = this_hole;
2775
2776	if (largest_hole >= OCFS2_DIR_MIN_REC_LEN)
2777		return largest_hole;
2778	return 0;
2779}
2780
2781/*
2782 * We allocate enough clusters to fulfill "blocks_wanted", but set
2783 * i_size to exactly one block. Ocfs2_extend_dir() will handle the
2784 * rest automatically for us.
2785 *
2786 * *first_block_bh is a pointer to the 1st data block allocated to the
2787 *  directory.
2788 */
2789static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2790				   unsigned int blocks_wanted,
2791				   struct ocfs2_dir_lookup_result *lookup,
2792				   struct buffer_head **first_block_bh)
2793{
2794	u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0;
2795	struct super_block *sb = dir->i_sb;
2796	int ret, i, num_dx_leaves = 0, dx_inline = 0,
2797		credits = ocfs2_inline_to_extents_credits(sb);
2798	u64 dx_insert_blkno, blkno,
2799		bytes = blocks_wanted << sb->s_blocksize_bits;
2800	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2801	struct ocfs2_inode_info *oi = OCFS2_I(dir);
2802	struct ocfs2_alloc_context *data_ac = NULL;
2803	struct ocfs2_alloc_context *meta_ac = NULL;
2804	struct buffer_head *dirdata_bh = NULL;
2805	struct buffer_head *dx_root_bh = NULL;
2806	struct buffer_head **dx_leaves = NULL;
2807	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2808	handle_t *handle;
2809	struct ocfs2_extent_tree et;
2810	struct ocfs2_extent_tree dx_et;
2811	int did_quota = 0, bytes_allocated = 0;
2812
2813	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
2814
2815	alloc = ocfs2_clusters_for_bytes(sb, bytes);
2816	dx_alloc = 0;
2817
2818	down_write(&oi->ip_alloc_sem);
2819
2820	if (ocfs2_supports_indexed_dirs(osb)) {
2821		credits += ocfs2_add_dir_index_credits(sb);
2822
2823		dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh);
2824		if (!dx_inline) {
2825			/* Add one more cluster for an index leaf */
2826			dx_alloc++;
2827			dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb,
2828								&num_dx_leaves);
2829			if (!dx_leaves) {
2830				ret = -ENOMEM;
2831				mlog_errno(ret);
2832				goto out;
2833			}
2834		}
2835
2836		/* This gets us the dx_root */
2837		ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
2838		if (ret) {
2839			mlog_errno(ret);
2840			goto out;
2841		}
2842	}
2843
2844	/*
2845	 * We should never need more than 2 clusters for the unindexed
2846	 * tree - maximum dirent size is far less than one block. In
2847	 * fact, the only time we'd need more than one cluster is if
2848	 * blocksize == clustersize and the dirent won't fit in the
2849	 * extra space that the expansion to a single block gives. As
2850	 * of today, that only happens on 4k/4k file systems.
2851	 */
2852	BUG_ON(alloc > 2);
2853
2854	ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac);
2855	if (ret) {
2856		mlog_errno(ret);
2857		goto out;
2858	}
2859
2860	/*
2861	 * Prepare for worst case allocation scenario of two separate
2862	 * extents in the unindexed tree.
2863	 */
2864	if (alloc == 2)
2865		credits += OCFS2_SUBALLOC_ALLOC;
2866
2867	handle = ocfs2_start_trans(osb, credits);
2868	if (IS_ERR(handle)) {
2869		ret = PTR_ERR(handle);
2870		mlog_errno(ret);
2871		goto out;
2872	}
2873
2874	ret = dquot_alloc_space_nodirty(dir,
2875		ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
2876	if (ret)
2877		goto out_commit;
2878	did_quota = 1;
2879
2880	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2881		/*
2882		 * Allocate our index cluster first, to maximize the
2883		 * possibility that unindexed leaves grow
2884		 * contiguously.
2885		 */
2886		ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac,
2887						 dx_leaves, num_dx_leaves,
2888						 &dx_insert_blkno);
2889		if (ret) {
2890			mlog_errno(ret);
2891			goto out_commit;
2892		}
2893		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2894	}
2895
2896	/*
2897	 * Try to claim as many clusters as the bitmap can give though
2898	 * if we only get one now, that's enough to continue. The rest
2899	 * will be claimed after the conversion to extents.
2900	 */
2901	if (ocfs2_dir_resv_allowed(osb))
2902		data_ac->ac_resv = &oi->ip_la_data_resv;
2903	ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len);
2904	if (ret) {
2905		mlog_errno(ret);
2906		goto out_commit;
2907	}
2908	bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
2909
2910	/*
2911	 * Operations are carefully ordered so that we set up the new
2912	 * data block first. The conversion from inline data to
2913	 * extents follows.
2914	 */
2915	blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
2916	dirdata_bh = sb_getblk(sb, blkno);
2917	if (!dirdata_bh) {
2918		ret = -ENOMEM;
2919		mlog_errno(ret);
2920		goto out_commit;
2921	}
2922
2923	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
2924
2925	ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
2926				      OCFS2_JOURNAL_ACCESS_CREATE);
2927	if (ret) {
2928		mlog_errno(ret);
2929		goto out_commit;
2930	}
2931
2932	memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
2933	memset(dirdata_bh->b_data + i_size_read(dir), 0,
2934	       sb->s_blocksize - i_size_read(dir));
2935	i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir);
2936	if (ocfs2_new_dir_wants_trailer(dir)) {
2937		/*
2938		 * Prepare the dir trailer up front. It will otherwise look
2939		 * like a valid dirent. Even if inserting the index fails
2940		 * (unlikely), then all we'll have done is given first dir
2941		 * block a small amount of fragmentation.
2942		 */
2943		ocfs2_init_dir_trailer(dir, dirdata_bh, i);
2944	}
2945
2946	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2947	ocfs2_journal_dirty(handle, dirdata_bh);
2948
2949	if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
2950		/*
2951		 * Dx dirs with an external cluster need to do this up
2952		 * front. Inline dx root's get handled later, after
2953		 * we've allocated our root block. We get passed back
2954		 * a total number of items so that dr_num_entries can
2955		 * be correctly set once the dx_root has been
2956		 * allocated.
2957		 */
2958		ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves,
2959					       num_dx_leaves, &num_dx_entries,
2960					       dirdata_bh);
2961		if (ret) {
2962			mlog_errno(ret);
2963			goto out_commit;
2964		}
2965	}
2966
2967	/*
2968	 * Set extent, i_size, etc on the directory. After this, the
2969	 * inode should contain the same exact dirents as before and
2970	 * be fully accessible from system calls.
2971	 *
2972	 * We let the later dirent insert modify c/mtime - to the user
2973	 * the data hasn't changed.
2974	 */
2975	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
2976				      OCFS2_JOURNAL_ACCESS_CREATE);
2977	if (ret) {
2978		mlog_errno(ret);
2979		goto out_commit;
2980	}
2981
2982	spin_lock(&oi->ip_lock);
2983	oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
2984	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2985	spin_unlock(&oi->ip_lock);
2986
2987	ocfs2_dinode_new_extent_list(dir, di);
2988
2989	i_size_write(dir, sb->s_blocksize);
2990	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
2991
2992	di->i_size = cpu_to_le64(sb->s_blocksize);
2993	di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(dir));
2994	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(dir));
2995	ocfs2_update_inode_fsync_trans(handle, dir, 1);
2996
2997	/*
2998	 * This should never fail as our extent list is empty and all
2999	 * related blocks have been journaled already.
3000	 */
3001	ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
3002				  0, NULL);
3003	if (ret) {
3004		mlog_errno(ret);
3005		goto out_commit;
3006	}
3007
3008	/*
3009	 * Set i_blocks after the extent insert for the most up to
3010	 * date ip_clusters value.
3011	 */
3012	dir->i_blocks = ocfs2_inode_sector_count(dir);
3013
3014	ocfs2_journal_dirty(handle, di_bh);
3015
3016	if (ocfs2_supports_indexed_dirs(osb)) {
3017		ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
3018						dirdata_bh, meta_ac, dx_inline,
3019						num_dx_entries, &dx_root_bh);
3020		if (ret) {
3021			mlog_errno(ret);
3022			goto out_commit;
3023		}
3024
3025		if (dx_inline) {
3026			ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
3027						      dirdata_bh);
3028		} else {
3029			ocfs2_init_dx_root_extent_tree(&dx_et,
3030						       INODE_CACHE(dir),
3031						       dx_root_bh);
3032			ret = ocfs2_insert_extent(handle, &dx_et, 0,
3033						  dx_insert_blkno, 1, 0, NULL);
3034			if (ret)
3035				mlog_errno(ret);
3036		}
3037	}
3038
3039	/*
3040	 * We asked for two clusters, but only got one in the 1st
3041	 * pass. Claim the 2nd cluster as a separate extent.
3042	 */
3043	if (alloc > len) {
3044		ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
3045					   &len);
3046		if (ret) {
3047			mlog_errno(ret);
3048			goto out_commit;
3049		}
3050		blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
3051
3052		ret = ocfs2_insert_extent(handle, &et, 1,
3053					  blkno, len, 0, NULL);
3054		if (ret) {
3055			mlog_errno(ret);
3056			goto out_commit;
3057		}
3058		bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1);
3059	}
3060
3061	*first_block_bh = dirdata_bh;
3062	dirdata_bh = NULL;
3063	if (ocfs2_supports_indexed_dirs(osb)) {
3064		unsigned int off;
3065
3066		if (!dx_inline) {
3067			/*
3068			 * We need to return the correct block within the
3069			 * cluster which should hold our entry.
3070			 */
3071			off = ocfs2_dx_dir_hash_idx(osb,
3072						    &lookup->dl_hinfo);
3073			get_bh(dx_leaves[off]);
3074			lookup->dl_dx_leaf_bh = dx_leaves[off];
3075		}
3076		lookup->dl_dx_root_bh = dx_root_bh;
3077		dx_root_bh = NULL;
3078	}
3079
3080out_commit:
3081	if (ret < 0 && did_quota)
3082		dquot_free_space_nodirty(dir, bytes_allocated);
3083
3084	ocfs2_commit_trans(osb, handle);
3085
3086out:
3087	up_write(&oi->ip_alloc_sem);
3088	if (data_ac)
3089		ocfs2_free_alloc_context(data_ac);
3090	if (meta_ac)
3091		ocfs2_free_alloc_context(meta_ac);
3092
3093	if (dx_leaves) {
3094		for (i = 0; i < num_dx_leaves; i++)
3095			brelse(dx_leaves[i]);
3096		kfree(dx_leaves);
3097	}
3098
3099	brelse(dirdata_bh);
3100	brelse(dx_root_bh);
3101
3102	return ret;
3103}
3104
3105/* returns a bh of the 1st new block in the allocation. */
3106static int ocfs2_do_extend_dir(struct super_block *sb,
3107			       handle_t *handle,
3108			       struct inode *dir,
3109			       struct buffer_head *parent_fe_bh,
3110			       struct ocfs2_alloc_context *data_ac,
3111			       struct ocfs2_alloc_context *meta_ac,
3112			       struct buffer_head **new_bh)
3113{
3114	int status;
3115	int extend, did_quota = 0;
3116	u64 p_blkno, v_blkno;
3117
3118	spin_lock(&OCFS2_I(dir)->ip_lock);
3119	extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
3120	spin_unlock(&OCFS2_I(dir)->ip_lock);
3121
3122	if (extend) {
3123		u32 offset = OCFS2_I(dir)->ip_clusters;
3124
3125		status = dquot_alloc_space_nodirty(dir,
3126					ocfs2_clusters_to_bytes(sb, 1));
3127		if (status)
3128			goto bail;
3129		did_quota = 1;
3130
3131		status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
3132					      1, 0, parent_fe_bh, handle,
3133					      data_ac, meta_ac, NULL);
3134		BUG_ON(status == -EAGAIN);
3135		if (status < 0) {
3136			mlog_errno(status);
3137			goto bail;
3138		}
3139	}
3140
3141	v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
3142	status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
3143	if (status < 0) {
3144		mlog_errno(status);
3145		goto bail;
3146	}
3147
3148	*new_bh = sb_getblk(sb, p_blkno);
3149	if (!*new_bh) {
3150		status = -ENOMEM;
3151		mlog_errno(status);
3152		goto bail;
3153	}
3154	status = 0;
3155bail:
3156	if (did_quota && status < 0)
3157		dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3158	return status;
3159}
3160
3161/*
3162 * Assumes you already have a cluster lock on the directory.
3163 *
3164 * 'blocks_wanted' is only used if we have an inline directory which
3165 * is to be turned into an extent based one. The size of the dirent to
3166 * insert might be larger than the space gained by growing to just one
3167 * block, so we may have to grow the inode by two blocks in that case.
3168 *
3169 * If the directory is already indexed, dx_root_bh must be provided.
3170 */
3171static int ocfs2_extend_dir(struct ocfs2_super *osb,
3172			    struct inode *dir,
3173			    struct buffer_head *parent_fe_bh,
3174			    unsigned int blocks_wanted,
3175			    struct ocfs2_dir_lookup_result *lookup,
3176			    struct buffer_head **new_de_bh)
3177{
3178	int status = 0;
3179	int credits, num_free_extents, drop_alloc_sem = 0;
3180	loff_t dir_i_size;
3181	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
3182	struct ocfs2_extent_list *el = &fe->id2.i_list;
3183	struct ocfs2_alloc_context *data_ac = NULL;
3184	struct ocfs2_alloc_context *meta_ac = NULL;
3185	handle_t *handle = NULL;
3186	struct buffer_head *new_bh = NULL;
3187	struct ocfs2_dir_entry * de;
3188	struct super_block *sb = osb->sb;
3189	struct ocfs2_extent_tree et;
3190	struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3191
3192	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3193		/*
3194		 * This would be a code error as an inline directory should
3195		 * never have an index root.
3196		 */
3197		BUG_ON(dx_root_bh);
3198
3199		status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
3200						 blocks_wanted, lookup,
3201						 &new_bh);
3202		if (status) {
3203			mlog_errno(status);
3204			goto bail;
3205		}
3206
3207		/* Expansion from inline to an indexed directory will
3208		 * have given us this. */
3209		dx_root_bh = lookup->dl_dx_root_bh;
3210
3211		if (blocks_wanted == 1) {
3212			/*
3213			 * If the new dirent will fit inside the space
3214			 * created by pushing out to one block, then
3215			 * we can complete the operation
3216			 * here. Otherwise we have to expand i_size
3217			 * and format the 2nd block below.
3218			 */
3219			BUG_ON(new_bh == NULL);
3220			goto bail_bh;
3221		}
3222
3223		/*
3224		 * Get rid of 'new_bh' - we want to format the 2nd
3225		 * data block and return that instead.
3226		 */
3227		brelse(new_bh);
3228		new_bh = NULL;
3229
3230		down_write(&OCFS2_I(dir)->ip_alloc_sem);
3231		drop_alloc_sem = 1;
3232		dir_i_size = i_size_read(dir);
3233		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3234		goto do_extend;
3235	}
3236
3237	down_write(&OCFS2_I(dir)->ip_alloc_sem);
3238	drop_alloc_sem = 1;
3239	dir_i_size = i_size_read(dir);
3240	trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3241			       dir_i_size);
3242
3243	/* dir->i_size is always block aligned. */
3244	spin_lock(&OCFS2_I(dir)->ip_lock);
3245	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
3246		spin_unlock(&OCFS2_I(dir)->ip_lock);
3247		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
3248					      parent_fe_bh);
3249		num_free_extents = ocfs2_num_free_extents(&et);
3250		if (num_free_extents < 0) {
3251			status = num_free_extents;
3252			mlog_errno(status);
3253			goto bail;
3254		}
3255
3256		if (!num_free_extents) {
3257			status = ocfs2_reserve_new_metadata(osb, el, &meta_ac);
3258			if (status < 0) {
3259				if (status != -ENOSPC)
3260					mlog_errno(status);
3261				goto bail;
3262			}
3263		}
3264
3265		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
3266		if (status < 0) {
3267			if (status != -ENOSPC)
3268				mlog_errno(status);
3269			goto bail;
3270		}
3271
3272		if (ocfs2_dir_resv_allowed(osb))
3273			data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
3274
3275		credits = ocfs2_calc_extend_credits(sb, el);
3276	} else {
3277		spin_unlock(&OCFS2_I(dir)->ip_lock);
3278		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
3279	}
3280
3281do_extend:
3282	if (ocfs2_dir_indexed(dir))
3283		credits++; /* For attaching the new dirent block to the
3284			    * dx_root */
3285
3286	handle = ocfs2_start_trans(osb, credits);
3287	if (IS_ERR(handle)) {
3288		status = PTR_ERR(handle);
3289		handle = NULL;
3290		mlog_errno(status);
3291		goto bail;
3292	}
3293
3294	status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
3295				     data_ac, meta_ac, &new_bh);
3296	if (status < 0) {
3297		mlog_errno(status);
3298		goto bail;
3299	}
3300
3301	ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
3302
3303	status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
3304					 OCFS2_JOURNAL_ACCESS_CREATE);
3305	if (status < 0) {
3306		mlog_errno(status);
3307		goto bail;
3308	}
3309	memset(new_bh->b_data, 0, sb->s_blocksize);
3310
3311	de = (struct ocfs2_dir_entry *) new_bh->b_data;
3312	de->inode = 0;
3313	if (ocfs2_supports_dir_trailer(dir)) {
3314		de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb));
3315
3316		ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len));
3317
3318		if (ocfs2_dir_indexed(dir)) {
3319			status = ocfs2_dx_dir_link_trailer(dir, handle,
3320							   dx_root_bh, new_bh);
3321			if (status) {
3322				mlog_errno(status);
3323				goto bail;
3324			}
3325		}
3326	} else {
3327		de->rec_len = cpu_to_le16(sb->s_blocksize);
3328	}
3329	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3330	ocfs2_journal_dirty(handle, new_bh);
3331
3332	dir_i_size += dir->i_sb->s_blocksize;
3333	i_size_write(dir, dir_i_size);
3334	dir->i_blocks = ocfs2_inode_sector_count(dir);
3335	status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
3336	if (status < 0) {
3337		mlog_errno(status);
3338		goto bail;
3339	}
3340
3341bail_bh:
3342	*new_de_bh = new_bh;
3343	get_bh(*new_de_bh);
3344bail:
3345	if (handle)
3346		ocfs2_commit_trans(osb, handle);
3347	if (drop_alloc_sem)
3348		up_write(&OCFS2_I(dir)->ip_alloc_sem);
3349
3350	if (data_ac)
3351		ocfs2_free_alloc_context(data_ac);
3352	if (meta_ac)
3353		ocfs2_free_alloc_context(meta_ac);
3354
3355	brelse(new_bh);
3356
3357	return status;
3358}
3359
3360static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
3361				   const char *name, int namelen,
3362				   struct buffer_head **ret_de_bh,
3363				   unsigned int *blocks_wanted)
3364{
3365	int ret;
3366	struct super_block *sb = dir->i_sb;
3367	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3368	struct ocfs2_dir_entry *de, *last_de = NULL;
3369	char *first_de, *de_buf, *limit;
3370	unsigned long offset = 0;
3371	unsigned int rec_len, new_rec_len, free_space;
3372
3373	/*
3374	 * This calculates how many free bytes we'd have in block zero, should
3375	 * this function force expansion to an extent tree.
3376	 */
3377	if (ocfs2_new_dir_wants_trailer(dir))
3378		free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir);
3379	else
3380		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
3381
3382	first_de = di->id2.i_data.id_data;
3383	de_buf = first_de;
3384	limit = de_buf + i_size_read(dir);
3385	rec_len = OCFS2_DIR_REC_LEN(namelen);
3386
3387	while (de_buf < limit) {
3388		de = (struct ocfs2_dir_entry *)de_buf;
3389
3390		if (!ocfs2_check_dir_entry(dir, de, di_bh, first_de,
3391					   i_size_read(dir), offset)) {
3392			ret = -ENOENT;
3393			goto out;
3394		}
3395		if (ocfs2_match(namelen, name, de)) {
3396			ret = -EEXIST;
3397			goto out;
3398		}
3399		/*
3400		 * No need to check for a trailing dirent record here as
3401		 * they're not used for inline dirs.
3402		 */
3403
3404		if (ocfs2_dirent_would_fit(de, rec_len)) {
3405			/* Ok, we found a spot. Return this bh and let
3406			 * the caller actually fill it in. */
3407			*ret_de_bh = di_bh;
3408			get_bh(*ret_de_bh);
3409			ret = 0;
3410			goto out;
3411		}
3412
3413		last_de = de;
3414		de_buf += le16_to_cpu(de->rec_len);
3415		offset += le16_to_cpu(de->rec_len);
3416	}
3417
3418	/*
3419	 * We're going to require expansion of the directory - figure
3420	 * out how many blocks we'll need so that a place for the
3421	 * dirent can be found.
3422	 */
3423	*blocks_wanted = 1;
3424	new_rec_len = le16_to_cpu(last_de->rec_len) + free_space;
3425	if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
3426		*blocks_wanted = 2;
3427
3428	ret = -ENOSPC;
3429out:
3430	return ret;
3431}
3432
3433static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
3434				   int namelen, struct buffer_head **ret_de_bh)
3435{
3436	unsigned long offset;
3437	struct buffer_head *bh = NULL;
3438	unsigned short rec_len;
3439	struct ocfs2_dir_entry *de;
3440	struct super_block *sb = dir->i_sb;
3441	int status;
3442	int blocksize = dir->i_sb->s_blocksize;
3443
3444	status = ocfs2_read_dir_block(dir, 0, &bh, 0);
3445	if (status)
3446		goto bail;
3447
3448	rec_len = OCFS2_DIR_REC_LEN(namelen);
3449	offset = 0;
3450	de = (struct ocfs2_dir_entry *) bh->b_data;
3451	while (1) {
3452		if ((char *)de >= sb->s_blocksize + bh->b_data) {
3453			brelse(bh);
3454			bh = NULL;
3455
3456			if (i_size_read(dir) <= offset) {
3457				/*
3458				 * Caller will have to expand this
3459				 * directory.
3460				 */
3461				status = -ENOSPC;
3462				goto bail;
3463			}
3464			status = ocfs2_read_dir_block(dir,
3465					     offset >> sb->s_blocksize_bits,
3466					     &bh, 0);
3467			if (status)
3468				goto bail;
3469
3470			/* move to next block */
3471			de = (struct ocfs2_dir_entry *) bh->b_data;
3472		}
3473		if (!ocfs2_check_dir_entry(dir, de, bh, bh->b_data, blocksize,
3474					   offset)) {
3475			status = -ENOENT;
3476			goto bail;
3477		}
3478		if (ocfs2_match(namelen, name, de)) {
3479			status = -EEXIST;
3480			goto bail;
3481		}
3482
3483		if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize,
3484					   blocksize))
3485			goto next;
3486
3487		if (ocfs2_dirent_would_fit(de, rec_len)) {
3488			/* Ok, we found a spot. Return this bh and let
3489			 * the caller actually fill it in. */
3490			*ret_de_bh = bh;
3491			get_bh(*ret_de_bh);
3492			status = 0;
3493			goto bail;
3494		}
3495next:
3496		offset += le16_to_cpu(de->rec_len);
3497		de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
3498	}
3499
3500bail:
3501	brelse(bh);
3502	if (status)
3503		mlog_errno(status);
3504
3505	return status;
3506}
3507
3508static int dx_leaf_sort_cmp(const void *a, const void *b)
3509{
3510	const struct ocfs2_dx_entry *entry1 = a;
3511	const struct ocfs2_dx_entry *entry2 = b;
3512	u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash);
3513	u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash);
3514	u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash);
3515	u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash);
3516
3517	if (major_hash1 > major_hash2)
3518		return 1;
3519	if (major_hash1 < major_hash2)
3520		return -1;
3521
3522	/*
3523	 * It is not strictly necessary to sort by minor
3524	 */
3525	if (minor_hash1 > minor_hash2)
3526		return 1;
3527	if (minor_hash1 < minor_hash2)
3528		return -1;
3529	return 0;
3530}
3531
 
 
 
 
 
 
 
 
 
 
3532static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
3533{
3534	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3535	int i, num = le16_to_cpu(dl_list->de_num_used);
3536
3537	for (i = 0; i < (num - 1); i++) {
3538		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) !=
3539		    le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash))
3540			return 0;
3541	}
3542
3543	return 1;
3544}
3545
3546/*
3547 * Find the optimal value to split this leaf on. This expects the leaf
3548 * entries to be in sorted order.
3549 *
3550 * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is
3551 * the hash we want to insert.
3552 *
3553 * This function is only concerned with the major hash - that which
3554 * determines which cluster an item belongs to.
3555 */
3556static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf,
3557					u32 leaf_cpos, u32 insert_hash,
3558					u32 *split_hash)
3559{
3560	struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
3561	int i, num_used = le16_to_cpu(dl_list->de_num_used);
3562	int allsame;
3563
3564	/*
3565	 * There's a couple rare, but nasty corner cases we have to
3566	 * check for here. All of them involve a leaf where all value
3567	 * have the same hash, which is what we look for first.
3568	 *
3569	 * Most of the time, all of the above is false, and we simply
3570	 * pick the median value for a split.
3571	 */
3572	allsame = ocfs2_dx_leaf_same_major(dx_leaf);
3573	if (allsame) {
3574		u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash);
3575
3576		if (val == insert_hash) {
3577			/*
3578			 * No matter where we would choose to split,
3579			 * the new entry would want to occupy the same
3580			 * block as these. Since there's no space left
3581			 * in their existing block, we know there
3582			 * won't be space after the split.
3583			 */
3584			return -ENOSPC;
3585		}
3586
3587		if (val == leaf_cpos) {
3588			/*
3589			 * Because val is the same as leaf_cpos (which
3590			 * is the smallest value this leaf can have),
3591			 * yet is not equal to insert_hash, then we
3592			 * know that insert_hash *must* be larger than
3593			 * val (and leaf_cpos). At least cpos+1 in value.
3594			 *
3595			 * We also know then, that there cannot be an
3596			 * adjacent extent (otherwise we'd be looking
3597			 * at it). Choosing this value gives us a
3598			 * chance to get some contiguousness.
3599			 */
3600			*split_hash = leaf_cpos + 1;
3601			return 0;
3602		}
3603
3604		if (val > insert_hash) {
3605			/*
3606			 * val can not be the same as insert hash, and
3607			 * also must be larger than leaf_cpos. Also,
3608			 * we know that there can't be a leaf between
3609			 * cpos and val, otherwise the entries with
3610			 * hash 'val' would be there.
3611			 */
3612			*split_hash = val;
3613			return 0;
3614		}
3615
3616		*split_hash = insert_hash;
3617		return 0;
3618	}
3619
3620	/*
3621	 * Since the records are sorted and the checks above
3622	 * guaranteed that not all records in this block are the same,
3623	 * we simple travel forward, from the median, and pick the 1st
3624	 * record whose value is larger than leaf_cpos.
3625	 */
3626	for (i = (num_used / 2); i < num_used; i++)
3627		if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) >
3628		    leaf_cpos)
3629			break;
3630
3631	BUG_ON(i == num_used); /* Should be impossible */
3632	*split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash);
3633	return 0;
3634}
3635
3636/*
3637 * Transfer all entries in orig_dx_leaves whose major hash is equal to or
3638 * larger than split_hash into new_dx_leaves. We use a temporary
3639 * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks.
3640 *
3641 * Since the block offset inside a leaf (cluster) is a constant mask
3642 * of minor_hash, we can optimize - an item at block offset X within
3643 * the original cluster, will be at offset X within the new cluster.
3644 */
3645static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
3646				       handle_t *handle,
3647				       struct ocfs2_dx_leaf *tmp_dx_leaf,
3648				       struct buffer_head **orig_dx_leaves,
3649				       struct buffer_head **new_dx_leaves,
3650				       int num_dx_leaves)
3651{
3652	int i, j, num_used;
3653	u32 major_hash;
3654	struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
3655	struct ocfs2_dx_entry_list *orig_list, *tmp_list;
3656	struct ocfs2_dx_entry *dx_entry;
3657
3658	tmp_list = &tmp_dx_leaf->dl_list;
3659
3660	for (i = 0; i < num_dx_leaves; i++) {
3661		orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
3662		orig_list = &orig_dx_leaf->dl_list;
3663		new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
 
3664
3665		num_used = le16_to_cpu(orig_list->de_num_used);
3666
3667		memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize);
3668		tmp_list->de_num_used = cpu_to_le16(0);
3669		memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used);
3670
3671		for (j = 0; j < num_used; j++) {
3672			dx_entry = &orig_list->de_entries[j];
3673			major_hash = le32_to_cpu(dx_entry->dx_major_hash);
3674			if (major_hash >= split_hash)
3675				ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf,
3676							      dx_entry);
3677			else
3678				ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf,
3679							      dx_entry);
3680		}
3681		memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize);
3682
3683		ocfs2_journal_dirty(handle, orig_dx_leaves[i]);
3684		ocfs2_journal_dirty(handle, new_dx_leaves[i]);
3685	}
3686}
3687
3688static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb,
3689					  struct ocfs2_dx_root_block *dx_root)
3690{
3691	int credits = ocfs2_clusters_to_blocks(osb->sb, 3);
3692
3693	credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list);
3694	credits += ocfs2_quota_trans_credits(osb->sb);
3695	return credits;
3696}
3697
3698/*
3699 * Find the median value in dx_leaf_bh and allocate a new leaf to move
3700 * half our entries into.
3701 */
3702static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3703				  struct buffer_head *dx_root_bh,
3704				  struct buffer_head *dx_leaf_bh,
3705				  struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos,
3706				  u64 leaf_blkno)
3707{
3708	struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3709	int credits, ret, i, num_used, did_quota = 0;
3710	u32 cpos, split_hash, insert_hash = hinfo->major_hash;
3711	u64 orig_leaves_start;
3712	int num_dx_leaves;
3713	struct buffer_head **orig_dx_leaves = NULL;
3714	struct buffer_head **new_dx_leaves = NULL;
3715	struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL;
3716	struct ocfs2_extent_tree et;
3717	handle_t *handle = NULL;
3718	struct ocfs2_dx_root_block *dx_root;
3719	struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3720
3721	trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3722				     (unsigned long long)leaf_blkno,
3723				     insert_hash);
3724
3725	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3726
3727	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3728	/*
3729	 * XXX: This is a rather large limit. We should use a more
3730	 * realistic value.
3731	 */
3732	if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX)
3733		return -ENOSPC;
3734
3735	num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used);
3736	if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) {
3737		mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: "
3738		     "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno,
3739		     (unsigned long long)leaf_blkno, num_used);
3740		ret = -EIO;
3741		goto out;
3742	}
3743
3744	orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
3745	if (!orig_dx_leaves) {
3746		ret = -ENOMEM;
3747		mlog_errno(ret);
3748		goto out;
3749	}
3750
3751	new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL);
3752	if (!new_dx_leaves) {
3753		ret = -ENOMEM;
3754		mlog_errno(ret);
3755		goto out;
3756	}
3757
3758	ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac);
3759	if (ret) {
3760		if (ret != -ENOSPC)
3761			mlog_errno(ret);
3762		goto out;
3763	}
3764
3765	credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root);
3766	handle = ocfs2_start_trans(osb, credits);
3767	if (IS_ERR(handle)) {
3768		ret = PTR_ERR(handle);
3769		handle = NULL;
3770		mlog_errno(ret);
3771		goto out;
3772	}
3773
3774	ret = dquot_alloc_space_nodirty(dir,
3775				       ocfs2_clusters_to_bytes(dir->i_sb, 1));
3776	if (ret)
3777		goto out_commit;
3778	did_quota = 1;
3779
3780	ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
3781				      OCFS2_JOURNAL_ACCESS_WRITE);
3782	if (ret) {
3783		mlog_errno(ret);
3784		goto out_commit;
3785	}
3786
3787	/*
3788	 * This block is changing anyway, so we can sort it in place.
3789	 */
3790	sort(dx_leaf->dl_list.de_entries, num_used,
3791	     sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
3792	     NULL);
3793
3794	ocfs2_journal_dirty(handle, dx_leaf_bh);
3795
3796	ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
3797					   &split_hash);
3798	if (ret) {
3799		mlog_errno(ret);
3800		goto  out_commit;
3801	}
3802
3803	trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3804
3805	/*
3806	 * We have to carefully order operations here. There are items
3807	 * which want to be in the new cluster before insert, but in
3808	 * order to put those items in the new cluster, we alter the
3809	 * old cluster. A failure to insert gets nasty.
3810	 *
3811	 * So, start by reserving writes to the old
3812	 * cluster. ocfs2_dx_dir_new_cluster will reserve writes on
3813	 * the new cluster for us, before inserting it. The insert
3814	 * won't happen if there's an error before that. Once the
3815	 * insert is done then, we can transfer from one leaf into the
3816	 * other without fear of hitting any error.
3817	 */
3818
3819	/*
3820	 * The leaf transfer wants some scratch space so that we don't
3821	 * wind up doing a bunch of expensive memmove().
3822	 */
3823	tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS);
3824	if (!tmp_dx_leaf) {
3825		ret = -ENOMEM;
3826		mlog_errno(ret);
3827		goto out_commit;
3828	}
3829
3830	orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno);
3831	ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves,
3832				   orig_dx_leaves);
3833	if (ret) {
3834		mlog_errno(ret);
3835		goto out_commit;
3836	}
3837
3838	cpos = split_hash;
3839	ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3840				       data_ac, meta_ac, new_dx_leaves,
3841				       num_dx_leaves);
3842	if (ret) {
3843		mlog_errno(ret);
3844		goto out_commit;
3845	}
3846
3847	for (i = 0; i < num_dx_leaves; i++) {
3848		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3849					      orig_dx_leaves[i],
3850					      OCFS2_JOURNAL_ACCESS_WRITE);
3851		if (ret) {
3852			mlog_errno(ret);
3853			goto out_commit;
3854		}
3855
3856		ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3857					      new_dx_leaves[i],
3858					      OCFS2_JOURNAL_ACCESS_WRITE);
3859		if (ret) {
3860			mlog_errno(ret);
3861			goto out_commit;
3862		}
3863	}
3864
3865	ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
3866				   orig_dx_leaves, new_dx_leaves, num_dx_leaves);
3867
3868out_commit:
3869	if (ret < 0 && did_quota)
3870		dquot_free_space_nodirty(dir,
3871				ocfs2_clusters_to_bytes(dir->i_sb, 1));
3872
3873	ocfs2_update_inode_fsync_trans(handle, dir, 1);
3874	ocfs2_commit_trans(osb, handle);
3875
3876out:
3877	if (orig_dx_leaves || new_dx_leaves) {
3878		for (i = 0; i < num_dx_leaves; i++) {
3879			if (orig_dx_leaves)
3880				brelse(orig_dx_leaves[i]);
3881			if (new_dx_leaves)
3882				brelse(new_dx_leaves[i]);
3883		}
3884		kfree(orig_dx_leaves);
3885		kfree(new_dx_leaves);
3886	}
3887
3888	if (meta_ac)
3889		ocfs2_free_alloc_context(meta_ac);
3890	if (data_ac)
3891		ocfs2_free_alloc_context(data_ac);
3892
3893	kfree(tmp_dx_leaf);
3894	return ret;
3895}
3896
3897static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir,
3898				   struct buffer_head *di_bh,
3899				   struct buffer_head *dx_root_bh,
3900				   const char *name, int namelen,
3901				   struct ocfs2_dir_lookup_result *lookup)
3902{
3903	int ret, rebalanced = 0;
3904	struct ocfs2_dx_root_block *dx_root;
3905	struct buffer_head *dx_leaf_bh = NULL;
3906	struct ocfs2_dx_leaf *dx_leaf;
3907	u64 blkno;
3908	u32 leaf_cpos;
3909
3910	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3911
3912restart_search:
3913	ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo,
3914				  &leaf_cpos, &blkno);
3915	if (ret) {
3916		mlog_errno(ret);
3917		goto out;
3918	}
3919
3920	ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh);
3921	if (ret) {
3922		mlog_errno(ret);
3923		goto out;
3924	}
3925
3926	dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data;
3927
3928	if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >=
3929	    le16_to_cpu(dx_leaf->dl_list.de_count)) {
3930		if (rebalanced) {
3931			/*
3932			 * Rebalancing should have provided us with
3933			 * space in an appropriate leaf.
3934			 *
3935			 * XXX: Is this an abnormal condition then?
3936			 * Should we print a message here?
3937			 */
3938			ret = -ENOSPC;
3939			goto out;
3940		}
3941
3942		ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh,
3943					     &lookup->dl_hinfo, leaf_cpos,
3944					     blkno);
3945		if (ret) {
3946			if (ret != -ENOSPC)
3947				mlog_errno(ret);
3948			goto out;
3949		}
3950
3951		/*
3952		 * Restart the lookup. The rebalance might have
3953		 * changed which block our item fits into. Mark our
3954		 * progress, so we only execute this once.
3955		 */
3956		brelse(dx_leaf_bh);
3957		dx_leaf_bh = NULL;
3958		rebalanced = 1;
3959		goto restart_search;
3960	}
3961
3962	lookup->dl_dx_leaf_bh = dx_leaf_bh;
3963	dx_leaf_bh = NULL;
3964
3965out:
3966	brelse(dx_leaf_bh);
3967	return ret;
3968}
3969
3970static int ocfs2_search_dx_free_list(struct inode *dir,
3971				     struct buffer_head *dx_root_bh,
3972				     int namelen,
3973				     struct ocfs2_dir_lookup_result *lookup)
3974{
3975	int ret = -ENOSPC;
3976	struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL;
3977	struct ocfs2_dir_block_trailer *db;
3978	u64 next_block;
3979	int rec_len = OCFS2_DIR_REC_LEN(namelen);
3980	struct ocfs2_dx_root_block *dx_root;
3981
3982	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
3983	next_block = le64_to_cpu(dx_root->dr_free_blk);
3984
3985	while (next_block) {
3986		brelse(prev_leaf_bh);
3987		prev_leaf_bh = leaf_bh;
3988		leaf_bh = NULL;
3989
3990		ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh);
3991		if (ret) {
3992			mlog_errno(ret);
3993			goto out;
3994		}
3995
3996		db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb);
3997		if (rec_len <= le16_to_cpu(db->db_free_rec_len)) {
3998			lookup->dl_leaf_bh = leaf_bh;
3999			lookup->dl_prev_leaf_bh = prev_leaf_bh;
4000			leaf_bh = NULL;
4001			prev_leaf_bh = NULL;
4002			break;
4003		}
4004
4005		next_block = le64_to_cpu(db->db_free_next);
4006	}
4007
4008	if (!next_block)
4009		ret = -ENOSPC;
4010
4011out:
4012
4013	brelse(leaf_bh);
4014	brelse(prev_leaf_bh);
4015	return ret;
4016}
4017
4018static int ocfs2_expand_inline_dx_root(struct inode *dir,
4019				       struct buffer_head *dx_root_bh)
4020{
4021	int ret, num_dx_leaves, i, j, did_quota = 0;
4022	struct buffer_head **dx_leaves = NULL;
4023	struct ocfs2_extent_tree et;
4024	u64 insert_blkno;
4025	struct ocfs2_alloc_context *data_ac = NULL;
4026	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4027	handle_t *handle = NULL;
4028	struct ocfs2_dx_root_block *dx_root;
4029	struct ocfs2_dx_entry_list *entry_list;
4030	struct ocfs2_dx_entry *dx_entry;
4031	struct ocfs2_dx_leaf *target_leaf;
4032
4033	ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4034	if (ret) {
4035		mlog_errno(ret);
4036		goto out;
4037	}
4038
4039	dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves);
4040	if (!dx_leaves) {
4041		ret = -ENOMEM;
4042		mlog_errno(ret);
4043		goto out;
4044	}
4045
4046	handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb));
4047	if (IS_ERR(handle)) {
4048		ret = PTR_ERR(handle);
4049		mlog_errno(ret);
4050		goto out;
4051	}
4052
4053	ret = dquot_alloc_space_nodirty(dir,
4054				       ocfs2_clusters_to_bytes(osb->sb, 1));
4055	if (ret)
4056		goto out_commit;
4057	did_quota = 1;
4058
4059	/*
4060	 * We do this up front, before the allocation, so that a
4061	 * failure to add the dx_root_bh to the journal won't result
4062	 * us losing clusters.
4063	 */
4064	ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
4065				      OCFS2_JOURNAL_ACCESS_WRITE);
4066	if (ret) {
4067		mlog_errno(ret);
4068		goto out_commit;
4069	}
4070
4071	ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves,
4072					 num_dx_leaves, &insert_blkno);
4073	if (ret) {
4074		mlog_errno(ret);
4075		goto out_commit;
4076	}
4077
4078	/*
4079	 * Transfer the entries from our dx_root into the appropriate
4080	 * block
4081	 */
4082	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4083	entry_list = &dx_root->dr_entries;
4084
4085	for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) {
4086		dx_entry = &entry_list->de_entries[i];
4087
4088		j = __ocfs2_dx_dir_hash_idx(osb,
4089					    le32_to_cpu(dx_entry->dx_minor_hash));
4090		target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data;
4091
4092		ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry);
4093
4094		/* Each leaf has been passed to the journal already
4095		 * via __ocfs2_dx_dir_new_cluster() */
4096	}
4097
4098	dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE;
4099	memset(&dx_root->dr_list, 0, osb->sb->s_blocksize -
4100	       offsetof(struct ocfs2_dx_root_block, dr_list));
4101	dx_root->dr_list.l_count =
4102		cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
4103
4104	/* This should never fail considering we start with an empty
4105	 * dx_root. */
4106	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4107	ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
4108	if (ret)
4109		mlog_errno(ret);
4110	did_quota = 0;
4111
4112	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4113	ocfs2_journal_dirty(handle, dx_root_bh);
4114
4115out_commit:
4116	if (ret < 0 && did_quota)
4117		dquot_free_space_nodirty(dir,
4118					  ocfs2_clusters_to_bytes(dir->i_sb, 1));
4119
4120	ocfs2_commit_trans(osb, handle);
4121
4122out:
4123	if (data_ac)
4124		ocfs2_free_alloc_context(data_ac);
4125
4126	if (dx_leaves) {
4127		for (i = 0; i < num_dx_leaves; i++)
4128			brelse(dx_leaves[i]);
4129		kfree(dx_leaves);
4130	}
4131	return ret;
4132}
4133
4134static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh)
4135{
4136	struct ocfs2_dx_root_block *dx_root;
4137	struct ocfs2_dx_entry_list *entry_list;
4138
4139	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4140	entry_list = &dx_root->dr_entries;
4141
4142	if (le16_to_cpu(entry_list->de_num_used) >=
4143	    le16_to_cpu(entry_list->de_count))
4144		return -ENOSPC;
4145
4146	return 0;
4147}
4148
4149static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir,
4150					   struct buffer_head *di_bh,
4151					   const char *name,
4152					   int namelen,
4153					   struct ocfs2_dir_lookup_result *lookup)
4154{
4155	int ret, free_dx_root = 1;
4156	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4157	struct buffer_head *dx_root_bh = NULL;
4158	struct buffer_head *leaf_bh = NULL;
4159	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4160	struct ocfs2_dx_root_block *dx_root;
4161
4162	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4163	if (ret) {
4164		mlog_errno(ret);
4165		goto out;
4166	}
4167
4168	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4169	if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) {
4170		ret = -ENOSPC;
4171		mlog_errno(ret);
4172		goto out;
4173	}
4174
4175	if (ocfs2_dx_root_inline(dx_root)) {
4176		ret = ocfs2_inline_dx_has_space(dx_root_bh);
4177
4178		if (ret == 0)
4179			goto search_el;
4180
4181		/*
4182		 * We ran out of room in the root block. Expand it to
4183		 * an extent, then allow ocfs2_find_dir_space_dx to do
4184		 * the rest.
4185		 */
4186		ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh);
4187		if (ret) {
4188			mlog_errno(ret);
4189			goto out;
4190		}
4191	}
4192
4193	/*
4194	 * Insert preparation for an indexed directory is split into two
4195	 * steps. The call to find_dir_space_dx reserves room in the index for
4196	 * an additional item. If we run out of space there, it's a real error
4197	 * we can't continue on.
4198	 */
4199	ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name,
4200				      namelen, lookup);
4201	if (ret) {
4202		mlog_errno(ret);
4203		goto out;
4204	}
4205
4206search_el:
4207	/*
4208	 * Next, we need to find space in the unindexed tree. This call
4209	 * searches using the free space linked list. If the unindexed tree
4210	 * lacks sufficient space, we'll expand it below. The expansion code
4211	 * is smart enough to add any new blocks to the free space list.
4212	 */
4213	ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup);
4214	if (ret && ret != -ENOSPC) {
4215		mlog_errno(ret);
4216		goto out;
4217	}
4218
4219	/* Do this up here - ocfs2_extend_dir might need the dx_root */
4220	lookup->dl_dx_root_bh = dx_root_bh;
4221	free_dx_root = 0;
4222
4223	if (ret == -ENOSPC) {
4224		ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh);
4225
4226		if (ret) {
4227			mlog_errno(ret);
4228			goto out;
4229		}
4230
4231		/*
4232		 * We make the assumption here that new leaf blocks are added
4233		 * to the front of our free list.
4234		 */
4235		lookup->dl_prev_leaf_bh = NULL;
4236		lookup->dl_leaf_bh = leaf_bh;
4237	}
4238
4239out:
4240	if (free_dx_root)
4241		brelse(dx_root_bh);
4242	return ret;
4243}
4244
4245/*
4246 * Get a directory ready for insert. Any directory allocation required
4247 * happens here. Success returns zero, and enough context in the dir
4248 * lookup result that ocfs2_add_entry() will be able complete the task
4249 * with minimal performance impact.
4250 */
4251int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4252				 struct inode *dir,
4253				 struct buffer_head *parent_fe_bh,
4254				 const char *name,
4255				 int namelen,
4256				 struct ocfs2_dir_lookup_result *lookup)
4257{
4258	int ret;
4259	unsigned int blocks_wanted = 1;
4260	struct buffer_head *bh = NULL;
4261
4262	trace_ocfs2_prepare_dir_for_insert(
4263		(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4264
 
 
 
 
 
 
4265	/*
4266	 * Do this up front to reduce confusion.
4267	 *
4268	 * The directory might start inline, then be turned into an
4269	 * indexed one, in which case we'd need to hash deep inside
4270	 * ocfs2_find_dir_space_id(). Since
4271	 * ocfs2_prepare_dx_dir_for_insert() also needs this hash
4272	 * done, there seems no point in spreading out the calls. We
4273	 * can optimize away the case where the file system doesn't
4274	 * support indexing.
4275	 */
4276	if (ocfs2_supports_indexed_dirs(osb))
4277		ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo);
4278
4279	if (ocfs2_dir_indexed(dir)) {
4280		ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh,
4281						      name, namelen, lookup);
4282		if (ret)
4283			mlog_errno(ret);
4284		goto out;
4285	}
4286
4287	if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4288		ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
4289					      namelen, &bh, &blocks_wanted);
4290	} else
4291		ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
4292
4293	if (ret && ret != -ENOSPC) {
4294		mlog_errno(ret);
4295		goto out;
4296	}
4297
4298	if (ret == -ENOSPC) {
4299		/*
4300		 * We have to expand the directory to add this name.
4301		 */
4302		BUG_ON(bh);
4303
4304		ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
4305				       lookup, &bh);
4306		if (ret) {
4307			if (ret != -ENOSPC)
4308				mlog_errno(ret);
4309			goto out;
4310		}
4311
4312		BUG_ON(!bh);
4313	}
4314
4315	lookup->dl_leaf_bh = bh;
4316	bh = NULL;
4317out:
4318	brelse(bh);
4319	return ret;
4320}
4321
4322static int ocfs2_dx_dir_remove_index(struct inode *dir,
4323				     struct buffer_head *di_bh,
4324				     struct buffer_head *dx_root_bh)
4325{
4326	int ret;
4327	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4328	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4329	struct ocfs2_dx_root_block *dx_root;
4330	struct inode *dx_alloc_inode = NULL;
4331	struct buffer_head *dx_alloc_bh = NULL;
4332	handle_t *handle;
4333	u64 blk;
4334	u16 bit;
4335	u64 bg_blkno;
4336
4337	dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data;
4338
4339	dx_alloc_inode = ocfs2_get_system_file_inode(osb,
4340					EXTENT_ALLOC_SYSTEM_INODE,
4341					le16_to_cpu(dx_root->dr_suballoc_slot));
4342	if (!dx_alloc_inode) {
4343		ret = -ENOMEM;
4344		mlog_errno(ret);
4345		goto out;
4346	}
4347	inode_lock(dx_alloc_inode);
4348
4349	ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1);
4350	if (ret) {
4351		mlog_errno(ret);
4352		goto out_mutex;
4353	}
4354
4355	handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS);
4356	if (IS_ERR(handle)) {
4357		ret = PTR_ERR(handle);
4358		mlog_errno(ret);
4359		goto out_unlock;
4360	}
4361
4362	ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
4363				      OCFS2_JOURNAL_ACCESS_WRITE);
4364	if (ret) {
4365		mlog_errno(ret);
4366		goto out_commit;
4367	}
4368
4369	spin_lock(&OCFS2_I(dir)->ip_lock);
4370	OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4371	di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4372	spin_unlock(&OCFS2_I(dir)->ip_lock);
4373	di->i_dx_root = cpu_to_le64(0ULL);
4374	ocfs2_update_inode_fsync_trans(handle, dir, 1);
4375
4376	ocfs2_journal_dirty(handle, di_bh);
4377
4378	blk = le64_to_cpu(dx_root->dr_blkno);
4379	bit = le16_to_cpu(dx_root->dr_suballoc_bit);
4380	if (dx_root->dr_suballoc_loc)
4381		bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc);
4382	else
4383		bg_blkno = ocfs2_which_suballoc_group(blk, bit);
4384	ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh,
4385				       bit, bg_blkno, 1);
4386	if (ret)
4387		mlog_errno(ret);
4388
4389out_commit:
4390	ocfs2_commit_trans(osb, handle);
4391
4392out_unlock:
4393	ocfs2_inode_unlock(dx_alloc_inode, 1);
4394
4395out_mutex:
4396	inode_unlock(dx_alloc_inode);
4397	brelse(dx_alloc_bh);
4398out:
4399	iput(dx_alloc_inode);
4400	return ret;
4401}
4402
4403int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
4404{
4405	int ret;
4406	unsigned int clen;
4407	u32 major_hash = UINT_MAX, p_cpos, cpos;
4408	u64 blkno;
4409	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
4410	struct buffer_head *dx_root_bh = NULL;
4411	struct ocfs2_dx_root_block *dx_root;
4412	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4413	struct ocfs2_cached_dealloc_ctxt dealloc;
4414	struct ocfs2_extent_tree et;
4415
4416	ocfs2_init_dealloc_ctxt(&dealloc);
4417
4418	if (!ocfs2_dir_indexed(dir))
4419		return 0;
4420
4421	ret = ocfs2_read_dx_root(dir, di, &dx_root_bh);
4422	if (ret) {
4423		mlog_errno(ret);
4424		goto out;
4425	}
4426	dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
4427
4428	if (ocfs2_dx_root_inline(dx_root))
4429		goto remove_index;
4430
4431	ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
4432
4433	/* XXX: What if dr_clusters is too large? */
4434	while (le32_to_cpu(dx_root->dr_clusters)) {
4435		ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list,
4436					      major_hash, &cpos, &blkno, &clen);
4437		if (ret) {
4438			mlog_errno(ret);
4439			goto out;
4440		}
4441
4442		p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
4443
4444		ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
4445					       &dealloc, 0, false);
4446		if (ret) {
4447			mlog_errno(ret);
4448			goto out;
4449		}
4450
4451		if (cpos == 0)
4452			break;
4453
4454		major_hash = cpos - 1;
4455	}
4456
4457remove_index:
4458	ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh);
4459	if (ret) {
4460		mlog_errno(ret);
4461		goto out;
4462	}
4463
4464	ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
4465out:
4466	ocfs2_schedule_truncate_log_flush(osb, 1);
4467	ocfs2_run_deallocs(osb, &dealloc);
4468
4469	brelse(dx_root_bh);
4470	return ret;
4471}