Linux Audio

Check our new training course

Loading...
v4.10.11
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * refcounttree.c
   5 *
   6 * Copyright (C) 2009 Oracle.  All rights reserved.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public
  10 * License version 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include <linux/sort.h>
  19#include <cluster/masklog.h>
  20#include "ocfs2.h"
  21#include "inode.h"
  22#include "alloc.h"
  23#include "suballoc.h"
  24#include "journal.h"
  25#include "uptodate.h"
  26#include "super.h"
  27#include "buffer_head_io.h"
  28#include "blockcheck.h"
  29#include "refcounttree.h"
  30#include "sysfile.h"
  31#include "dlmglue.h"
  32#include "extent_map.h"
  33#include "aops.h"
  34#include "xattr.h"
  35#include "namei.h"
  36#include "ocfs2_trace.h"
  37#include "file.h"
  38
  39#include <linux/bio.h>
  40#include <linux/blkdev.h>
  41#include <linux/slab.h>
  42#include <linux/writeback.h>
  43#include <linux/pagevec.h>
  44#include <linux/swap.h>
  45#include <linux/security.h>
  46#include <linux/fsnotify.h>
  47#include <linux/quotaops.h>
  48#include <linux/namei.h>
  49#include <linux/mount.h>
  50#include <linux/posix_acl.h>
  51
  52struct ocfs2_cow_context {
  53	struct inode *inode;
  54	u32 cow_start;
  55	u32 cow_len;
  56	struct ocfs2_extent_tree data_et;
  57	struct ocfs2_refcount_tree *ref_tree;
  58	struct buffer_head *ref_root_bh;
  59	struct ocfs2_alloc_context *meta_ac;
  60	struct ocfs2_alloc_context *data_ac;
  61	struct ocfs2_cached_dealloc_ctxt dealloc;
  62	void *cow_object;
  63	struct ocfs2_post_refcount *post_refcount;
  64	int extra_credits;
  65	int (*get_clusters)(struct ocfs2_cow_context *context,
  66			    u32 v_cluster, u32 *p_cluster,
  67			    u32 *num_clusters,
  68			    unsigned int *extent_flags);
  69	int (*cow_duplicate_clusters)(handle_t *handle,
  70				      struct inode *inode,
  71				      u32 cpos, u32 old_cluster,
  72				      u32 new_cluster, u32 new_len);
  73};
  74
  75static inline struct ocfs2_refcount_tree *
  76cache_info_to_refcount(struct ocfs2_caching_info *ci)
  77{
  78	return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  79}
  80
  81static int ocfs2_validate_refcount_block(struct super_block *sb,
  82					 struct buffer_head *bh)
  83{
  84	int rc;
  85	struct ocfs2_refcount_block *rb =
  86		(struct ocfs2_refcount_block *)bh->b_data;
  87
  88	trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
  89
  90	BUG_ON(!buffer_uptodate(bh));
  91
  92	/*
  93	 * If the ecc fails, we return the error but otherwise
  94	 * leave the filesystem running.  We know any error is
  95	 * local to this block.
  96	 */
  97	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  98	if (rc) {
  99		mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
 100		     (unsigned long long)bh->b_blocknr);
 101		return rc;
 102	}
 103
 104
 105	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
 106		rc = ocfs2_error(sb,
 107				 "Refcount block #%llu has bad signature %.*s\n",
 108				 (unsigned long long)bh->b_blocknr, 7,
 109				 rb->rf_signature);
 110		goto out;
 111	}
 112
 113	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
 114		rc = ocfs2_error(sb,
 115				 "Refcount block #%llu has an invalid rf_blkno of %llu\n",
 116				 (unsigned long long)bh->b_blocknr,
 117				 (unsigned long long)le64_to_cpu(rb->rf_blkno));
 118		goto out;
 119	}
 120
 121	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
 122		rc = ocfs2_error(sb,
 123				 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
 124				 (unsigned long long)bh->b_blocknr,
 125				 le32_to_cpu(rb->rf_fs_generation));
 126		goto out;
 127	}
 128out:
 129	return rc;
 130}
 131
 132static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
 133				     u64 rb_blkno,
 134				     struct buffer_head **bh)
 135{
 136	int rc;
 137	struct buffer_head *tmp = *bh;
 138
 139	rc = ocfs2_read_block(ci, rb_blkno, &tmp,
 140			      ocfs2_validate_refcount_block);
 141
 142	/* If ocfs2_read_block() got us a new bh, pass it up. */
 143	if (!rc && !*bh)
 144		*bh = tmp;
 145
 146	return rc;
 147}
 148
 149static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
 150{
 151	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 152
 153	return rf->rf_blkno;
 154}
 155
 156static struct super_block *
 157ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
 158{
 159	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 160
 161	return rf->rf_sb;
 162}
 163
 164static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
 
 165{
 166	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 167
 168	spin_lock(&rf->rf_lock);
 169}
 170
 171static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
 
 172{
 173	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 174
 175	spin_unlock(&rf->rf_lock);
 176}
 177
 178static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
 179{
 180	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 181
 182	mutex_lock(&rf->rf_io_mutex);
 183}
 184
 185static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
 186{
 187	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 188
 189	mutex_unlock(&rf->rf_io_mutex);
 190}
 191
 192static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
 193	.co_owner		= ocfs2_refcount_cache_owner,
 194	.co_get_super		= ocfs2_refcount_cache_get_super,
 195	.co_cache_lock		= ocfs2_refcount_cache_lock,
 196	.co_cache_unlock	= ocfs2_refcount_cache_unlock,
 197	.co_io_lock		= ocfs2_refcount_cache_io_lock,
 198	.co_io_unlock		= ocfs2_refcount_cache_io_unlock,
 199};
 200
 201static struct ocfs2_refcount_tree *
 202ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
 203{
 204	struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
 205	struct ocfs2_refcount_tree *tree = NULL;
 206
 207	while (n) {
 208		tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
 209
 210		if (blkno < tree->rf_blkno)
 211			n = n->rb_left;
 212		else if (blkno > tree->rf_blkno)
 213			n = n->rb_right;
 214		else
 215			return tree;
 216	}
 217
 218	return NULL;
 219}
 220
 221/* osb_lock is already locked. */
 222static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
 223				       struct ocfs2_refcount_tree *new)
 224{
 225	u64 rf_blkno = new->rf_blkno;
 226	struct rb_node *parent = NULL;
 227	struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
 228	struct ocfs2_refcount_tree *tmp;
 229
 230	while (*p) {
 231		parent = *p;
 232
 233		tmp = rb_entry(parent, struct ocfs2_refcount_tree,
 234			       rf_node);
 235
 236		if (rf_blkno < tmp->rf_blkno)
 237			p = &(*p)->rb_left;
 238		else if (rf_blkno > tmp->rf_blkno)
 239			p = &(*p)->rb_right;
 240		else {
 241			/* This should never happen! */
 242			mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
 243			     (unsigned long long)rf_blkno);
 244			BUG();
 245		}
 246	}
 247
 248	rb_link_node(&new->rf_node, parent, p);
 249	rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
 250}
 251
 252static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
 253{
 254	ocfs2_metadata_cache_exit(&tree->rf_ci);
 255	ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
 256	ocfs2_lock_res_free(&tree->rf_lockres);
 257	kfree(tree);
 258}
 259
 260static inline void
 261ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
 262					struct ocfs2_refcount_tree *tree)
 263{
 264	rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
 265	if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
 266		osb->osb_ref_tree_lru = NULL;
 267}
 268
 269static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
 270					struct ocfs2_refcount_tree *tree)
 271{
 272	spin_lock(&osb->osb_lock);
 273	ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 274	spin_unlock(&osb->osb_lock);
 275}
 276
 277static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
 278{
 279	struct ocfs2_refcount_tree *tree =
 280		container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
 281
 282	ocfs2_free_refcount_tree(tree);
 283}
 284
 285static inline void
 286ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
 287{
 288	kref_get(&tree->rf_getcnt);
 289}
 290
 291static inline void
 292ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
 293{
 294	kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
 295}
 296
 297static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
 298					       struct super_block *sb)
 299{
 300	ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
 301	mutex_init(&new->rf_io_mutex);
 302	new->rf_sb = sb;
 303	spin_lock_init(&new->rf_lock);
 304}
 305
 306static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
 307					struct ocfs2_refcount_tree *new,
 308					u64 rf_blkno, u32 generation)
 309{
 310	init_rwsem(&new->rf_sem);
 311	ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
 312				     rf_blkno, generation);
 313}
 314
 315static struct ocfs2_refcount_tree*
 316ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
 317{
 318	struct ocfs2_refcount_tree *new;
 319
 320	new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
 321	if (!new)
 322		return NULL;
 323
 324	new->rf_blkno = rf_blkno;
 325	kref_init(&new->rf_getcnt);
 326	ocfs2_init_refcount_tree_ci(new, osb->sb);
 327
 328	return new;
 329}
 330
 331static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
 332				   struct ocfs2_refcount_tree **ret_tree)
 333{
 334	int ret = 0;
 335	struct ocfs2_refcount_tree *tree, *new = NULL;
 336	struct buffer_head *ref_root_bh = NULL;
 337	struct ocfs2_refcount_block *ref_rb;
 338
 339	spin_lock(&osb->osb_lock);
 340	if (osb->osb_ref_tree_lru &&
 341	    osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
 342		tree = osb->osb_ref_tree_lru;
 343	else
 344		tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 345	if (tree)
 346		goto out;
 347
 348	spin_unlock(&osb->osb_lock);
 349
 350	new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
 351	if (!new) {
 352		ret = -ENOMEM;
 353		mlog_errno(ret);
 354		return ret;
 355	}
 356	/*
 357	 * We need the generation to create the refcount tree lock and since
 358	 * it isn't changed during the tree modification, we are safe here to
 359	 * read without protection.
 360	 * We also have to purge the cache after we create the lock since the
 361	 * refcount block may have the stale data. It can only be trusted when
 362	 * we hold the refcount lock.
 363	 */
 364	ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
 365	if (ret) {
 366		mlog_errno(ret);
 367		ocfs2_metadata_cache_exit(&new->rf_ci);
 368		kfree(new);
 369		return ret;
 370	}
 371
 372	ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 373	new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
 374	ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
 375				      new->rf_generation);
 376	ocfs2_metadata_cache_purge(&new->rf_ci);
 377
 378	spin_lock(&osb->osb_lock);
 379	tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 380	if (tree)
 381		goto out;
 382
 383	ocfs2_insert_refcount_tree(osb, new);
 384
 385	tree = new;
 386	new = NULL;
 387
 388out:
 389	*ret_tree = tree;
 390
 391	osb->osb_ref_tree_lru = tree;
 392
 393	spin_unlock(&osb->osb_lock);
 394
 395	if (new)
 396		ocfs2_free_refcount_tree(new);
 397
 398	brelse(ref_root_bh);
 399	return ret;
 400}
 401
 402static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
 403{
 404	int ret;
 405	struct buffer_head *di_bh = NULL;
 406	struct ocfs2_dinode *di;
 407
 408	ret = ocfs2_read_inode_block(inode, &di_bh);
 409	if (ret) {
 410		mlog_errno(ret);
 411		goto out;
 412	}
 413
 414	BUG_ON(!ocfs2_is_refcount_inode(inode));
 415
 416	di = (struct ocfs2_dinode *)di_bh->b_data;
 417	*ref_blkno = le64_to_cpu(di->i_refcount_loc);
 418	brelse(di_bh);
 419out:
 420	return ret;
 421}
 422
 423static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 424				      struct ocfs2_refcount_tree *tree, int rw)
 425{
 426	int ret;
 427
 428	ret = ocfs2_refcount_lock(tree, rw);
 429	if (ret) {
 430		mlog_errno(ret);
 431		goto out;
 432	}
 433
 434	if (rw)
 435		down_write(&tree->rf_sem);
 436	else
 437		down_read(&tree->rf_sem);
 438
 439out:
 440	return ret;
 441}
 442
 443/*
 444 * Lock the refcount tree pointed by ref_blkno and return the tree.
 445 * In most case, we lock the tree and read the refcount block.
 446 * So read it here if the caller really needs it.
 447 *
 448 * If the tree has been re-created by other node, it will free the
 449 * old one and re-create it.
 450 */
 451int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 452			     u64 ref_blkno, int rw,
 453			     struct ocfs2_refcount_tree **ret_tree,
 454			     struct buffer_head **ref_bh)
 455{
 456	int ret, delete_tree = 0;
 457	struct ocfs2_refcount_tree *tree = NULL;
 458	struct buffer_head *ref_root_bh = NULL;
 459	struct ocfs2_refcount_block *rb;
 460
 461again:
 462	ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
 463	if (ret) {
 464		mlog_errno(ret);
 465		return ret;
 466	}
 467
 468	ocfs2_refcount_tree_get(tree);
 469
 470	ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
 471	if (ret) {
 472		mlog_errno(ret);
 473		ocfs2_refcount_tree_put(tree);
 474		goto out;
 475	}
 476
 477	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
 478					&ref_root_bh);
 479	if (ret) {
 480		mlog_errno(ret);
 481		ocfs2_unlock_refcount_tree(osb, tree, rw);
 482		goto out;
 483	}
 484
 485	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 486	/*
 487	 * If the refcount block has been freed and re-created, we may need
 488	 * to recreate the refcount tree also.
 489	 *
 490	 * Here we just remove the tree from the rb-tree, and the last
 491	 * kref holder will unlock and delete this refcount_tree.
 492	 * Then we goto "again" and ocfs2_get_refcount_tree will create
 493	 * the new refcount tree for us.
 494	 */
 495	if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
 496		if (!tree->rf_removed) {
 497			ocfs2_erase_refcount_tree_from_list(osb, tree);
 498			tree->rf_removed = 1;
 499			delete_tree = 1;
 500		}
 501
 502		ocfs2_unlock_refcount_tree(osb, tree, rw);
 503		/*
 504		 * We get an extra reference when we create the refcount
 505		 * tree, so another put will destroy it.
 506		 */
 507		if (delete_tree)
 508			ocfs2_refcount_tree_put(tree);
 509		brelse(ref_root_bh);
 510		ref_root_bh = NULL;
 511		goto again;
 512	}
 513
 514	*ret_tree = tree;
 515	if (ref_bh) {
 516		*ref_bh = ref_root_bh;
 517		ref_root_bh = NULL;
 518	}
 519out:
 520	brelse(ref_root_bh);
 521	return ret;
 522}
 523
 524void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
 525				struct ocfs2_refcount_tree *tree, int rw)
 526{
 527	if (rw)
 528		up_write(&tree->rf_sem);
 529	else
 530		up_read(&tree->rf_sem);
 531
 532	ocfs2_refcount_unlock(tree, rw);
 533	ocfs2_refcount_tree_put(tree);
 534}
 535
 536void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
 537{
 538	struct rb_node *node;
 539	struct ocfs2_refcount_tree *tree;
 540	struct rb_root *root = &osb->osb_rf_lock_tree;
 541
 542	while ((node = rb_last(root)) != NULL) {
 543		tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
 544
 545		trace_ocfs2_purge_refcount_trees(
 546				(unsigned long long) tree->rf_blkno);
 547
 548		rb_erase(&tree->rf_node, root);
 549		ocfs2_free_refcount_tree(tree);
 550	}
 551}
 552
 553/*
 554 * Create a refcount tree for an inode.
 555 * We take for granted that the inode is already locked.
 556 */
 557static int ocfs2_create_refcount_tree(struct inode *inode,
 558				      struct buffer_head *di_bh)
 559{
 560	int ret;
 561	handle_t *handle = NULL;
 562	struct ocfs2_alloc_context *meta_ac = NULL;
 563	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 564	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 565	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 566	struct buffer_head *new_bh = NULL;
 567	struct ocfs2_refcount_block *rb;
 568	struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
 569	u16 suballoc_bit_start;
 570	u32 num_got;
 571	u64 suballoc_loc, first_blkno;
 572
 573	BUG_ON(ocfs2_is_refcount_inode(inode));
 574
 575	trace_ocfs2_create_refcount_tree(
 576		(unsigned long long)OCFS2_I(inode)->ip_blkno);
 577
 578	ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
 579	if (ret) {
 580		mlog_errno(ret);
 581		goto out;
 582	}
 583
 584	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
 585	if (IS_ERR(handle)) {
 586		ret = PTR_ERR(handle);
 587		mlog_errno(ret);
 588		goto out;
 589	}
 590
 591	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 592				      OCFS2_JOURNAL_ACCESS_WRITE);
 593	if (ret) {
 594		mlog_errno(ret);
 595		goto out_commit;
 596	}
 597
 598	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
 599				   &suballoc_bit_start, &num_got,
 600				   &first_blkno);
 601	if (ret) {
 602		mlog_errno(ret);
 603		goto out_commit;
 604	}
 605
 606	new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
 607	if (!new_tree) {
 608		ret = -ENOMEM;
 609		mlog_errno(ret);
 610		goto out_commit;
 611	}
 612
 613	new_bh = sb_getblk(inode->i_sb, first_blkno);
 614	if (!new_bh) {
 615		ret = -ENOMEM;
 616		mlog_errno(ret);
 617		goto out_commit;
 618	}
 619	ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
 620
 621	ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
 622				      OCFS2_JOURNAL_ACCESS_CREATE);
 623	if (ret) {
 624		mlog_errno(ret);
 625		goto out_commit;
 626	}
 627
 628	/* Initialize ocfs2_refcount_block. */
 629	rb = (struct ocfs2_refcount_block *)new_bh->b_data;
 630	memset(rb, 0, inode->i_sb->s_blocksize);
 631	strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
 632	rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
 633	rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
 634	rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
 635	rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
 636	rb->rf_blkno = cpu_to_le64(first_blkno);
 637	rb->rf_count = cpu_to_le32(1);
 638	rb->rf_records.rl_count =
 639			cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
 640	spin_lock(&osb->osb_lock);
 641	rb->rf_generation = osb->s_next_generation++;
 642	spin_unlock(&osb->osb_lock);
 643
 644	ocfs2_journal_dirty(handle, new_bh);
 645
 646	spin_lock(&oi->ip_lock);
 647	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 648	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 649	di->i_refcount_loc = cpu_to_le64(first_blkno);
 650	spin_unlock(&oi->ip_lock);
 651
 652	trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
 653
 654	ocfs2_journal_dirty(handle, di_bh);
 655
 656	/*
 657	 * We have to init the tree lock here since it will use
 658	 * the generation number to create it.
 659	 */
 660	new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
 661	ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
 662				      new_tree->rf_generation);
 663
 664	spin_lock(&osb->osb_lock);
 665	tree = ocfs2_find_refcount_tree(osb, first_blkno);
 666
 667	/*
 668	 * We've just created a new refcount tree in this block.  If
 669	 * we found a refcount tree on the ocfs2_super, it must be
 670	 * one we just deleted.  We free the old tree before
 671	 * inserting the new tree.
 672	 */
 673	BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
 674	if (tree)
 675		ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 676	ocfs2_insert_refcount_tree(osb, new_tree);
 677	spin_unlock(&osb->osb_lock);
 678	new_tree = NULL;
 679	if (tree)
 680		ocfs2_refcount_tree_put(tree);
 681
 682out_commit:
 683	ocfs2_commit_trans(osb, handle);
 684
 685out:
 686	if (new_tree) {
 687		ocfs2_metadata_cache_exit(&new_tree->rf_ci);
 688		kfree(new_tree);
 689	}
 690
 691	brelse(new_bh);
 692	if (meta_ac)
 693		ocfs2_free_alloc_context(meta_ac);
 694
 695	return ret;
 696}
 697
 698static int ocfs2_set_refcount_tree(struct inode *inode,
 699				   struct buffer_head *di_bh,
 700				   u64 refcount_loc)
 701{
 702	int ret;
 703	handle_t *handle = NULL;
 704	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 705	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 706	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 707	struct buffer_head *ref_root_bh = NULL;
 708	struct ocfs2_refcount_block *rb;
 709	struct ocfs2_refcount_tree *ref_tree;
 710
 711	BUG_ON(ocfs2_is_refcount_inode(inode));
 712
 713	ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
 714				       &ref_tree, &ref_root_bh);
 715	if (ret) {
 716		mlog_errno(ret);
 717		return ret;
 718	}
 719
 720	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
 721	if (IS_ERR(handle)) {
 722		ret = PTR_ERR(handle);
 723		mlog_errno(ret);
 724		goto out;
 725	}
 726
 727	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 728				      OCFS2_JOURNAL_ACCESS_WRITE);
 729	if (ret) {
 730		mlog_errno(ret);
 731		goto out_commit;
 732	}
 733
 734	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
 735				      OCFS2_JOURNAL_ACCESS_WRITE);
 736	if (ret) {
 737		mlog_errno(ret);
 738		goto out_commit;
 739	}
 740
 741	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 742	le32_add_cpu(&rb->rf_count, 1);
 743
 744	ocfs2_journal_dirty(handle, ref_root_bh);
 745
 746	spin_lock(&oi->ip_lock);
 747	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 748	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 749	di->i_refcount_loc = cpu_to_le64(refcount_loc);
 750	spin_unlock(&oi->ip_lock);
 751	ocfs2_journal_dirty(handle, di_bh);
 752
 753out_commit:
 754	ocfs2_commit_trans(osb, handle);
 755out:
 756	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 757	brelse(ref_root_bh);
 758
 759	return ret;
 760}
 761
 762int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
 763{
 764	int ret, delete_tree = 0;
 765	handle_t *handle = NULL;
 766	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 767	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 768	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 769	struct ocfs2_refcount_block *rb;
 770	struct inode *alloc_inode = NULL;
 771	struct buffer_head *alloc_bh = NULL;
 772	struct buffer_head *blk_bh = NULL;
 773	struct ocfs2_refcount_tree *ref_tree;
 774	int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
 775	u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
 776	u16 bit = 0;
 777
 778	if (!ocfs2_is_refcount_inode(inode))
 779		return 0;
 780
 781	BUG_ON(!ref_blkno);
 782	ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
 783	if (ret) {
 784		mlog_errno(ret);
 785		return ret;
 786	}
 787
 788	rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
 789
 790	/*
 791	 * If we are the last user, we need to free the block.
 792	 * So lock the allocator ahead.
 793	 */
 794	if (le32_to_cpu(rb->rf_count) == 1) {
 795		blk = le64_to_cpu(rb->rf_blkno);
 796		bit = le16_to_cpu(rb->rf_suballoc_bit);
 797		if (rb->rf_suballoc_loc)
 798			bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
 799		else
 800			bg_blkno = ocfs2_which_suballoc_group(blk, bit);
 801
 802		alloc_inode = ocfs2_get_system_file_inode(osb,
 803					EXTENT_ALLOC_SYSTEM_INODE,
 804					le16_to_cpu(rb->rf_suballoc_slot));
 805		if (!alloc_inode) {
 806			ret = -ENOMEM;
 807			mlog_errno(ret);
 808			goto out;
 809		}
 810		inode_lock(alloc_inode);
 811
 812		ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
 813		if (ret) {
 814			mlog_errno(ret);
 815			goto out_mutex;
 816		}
 817
 818		credits += OCFS2_SUBALLOC_FREE;
 819	}
 820
 821	handle = ocfs2_start_trans(osb, credits);
 822	if (IS_ERR(handle)) {
 823		ret = PTR_ERR(handle);
 824		mlog_errno(ret);
 825		goto out_unlock;
 826	}
 827
 828	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 829				      OCFS2_JOURNAL_ACCESS_WRITE);
 830	if (ret) {
 831		mlog_errno(ret);
 832		goto out_commit;
 833	}
 834
 835	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
 836				      OCFS2_JOURNAL_ACCESS_WRITE);
 837	if (ret) {
 838		mlog_errno(ret);
 839		goto out_commit;
 840	}
 841
 842	spin_lock(&oi->ip_lock);
 843	oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
 844	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 845	di->i_refcount_loc = 0;
 846	spin_unlock(&oi->ip_lock);
 847	ocfs2_journal_dirty(handle, di_bh);
 848
 849	le32_add_cpu(&rb->rf_count , -1);
 850	ocfs2_journal_dirty(handle, blk_bh);
 851
 852	if (!rb->rf_count) {
 853		delete_tree = 1;
 854		ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
 855		ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
 856					       alloc_bh, bit, bg_blkno, 1);
 857		if (ret)
 858			mlog_errno(ret);
 859	}
 860
 861out_commit:
 862	ocfs2_commit_trans(osb, handle);
 863out_unlock:
 864	if (alloc_inode) {
 865		ocfs2_inode_unlock(alloc_inode, 1);
 866		brelse(alloc_bh);
 867	}
 868out_mutex:
 869	if (alloc_inode) {
 870		inode_unlock(alloc_inode);
 871		iput(alloc_inode);
 872	}
 873out:
 874	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 875	if (delete_tree)
 876		ocfs2_refcount_tree_put(ref_tree);
 877	brelse(blk_bh);
 878
 879	return ret;
 880}
 881
 882static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
 883					  struct buffer_head *ref_leaf_bh,
 884					  u64 cpos, unsigned int len,
 885					  struct ocfs2_refcount_rec *ret_rec,
 886					  int *index)
 887{
 888	int i = 0;
 889	struct ocfs2_refcount_block *rb =
 890		(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
 891	struct ocfs2_refcount_rec *rec = NULL;
 892
 893	for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
 894		rec = &rb->rf_records.rl_recs[i];
 895
 896		if (le64_to_cpu(rec->r_cpos) +
 897		    le32_to_cpu(rec->r_clusters) <= cpos)
 898			continue;
 899		else if (le64_to_cpu(rec->r_cpos) > cpos)
 900			break;
 901
 902		/* ok, cpos fail in this rec. Just return. */
 903		if (ret_rec)
 904			*ret_rec = *rec;
 905		goto out;
 906	}
 907
 908	if (ret_rec) {
 909		/* We meet with a hole here, so fake the rec. */
 910		ret_rec->r_cpos = cpu_to_le64(cpos);
 911		ret_rec->r_refcount = 0;
 912		if (i < le16_to_cpu(rb->rf_records.rl_used) &&
 913		    le64_to_cpu(rec->r_cpos) < cpos + len)
 914			ret_rec->r_clusters =
 915				cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
 916		else
 917			ret_rec->r_clusters = cpu_to_le32(len);
 918	}
 919
 920out:
 921	*index = i;
 922}
 923
 924/*
 925 * Try to remove refcount tree. The mechanism is:
 926 * 1) Check whether i_clusters == 0, if no, exit.
 927 * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
 928 * 3) Check whether we have inline xattr stored outside, if yes, exit.
 929 * 4) Remove the tree.
 930 */
 931int ocfs2_try_remove_refcount_tree(struct inode *inode,
 932				   struct buffer_head *di_bh)
 933{
 934	int ret;
 935	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 936	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 937
 938	down_write(&oi->ip_xattr_sem);
 939	down_write(&oi->ip_alloc_sem);
 940
 941	if (oi->ip_clusters)
 942		goto out;
 943
 944	if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
 945		goto out;
 946
 947	if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
 948	    ocfs2_has_inline_xattr_value_outside(inode, di))
 949		goto out;
 950
 951	ret = ocfs2_remove_refcount_tree(inode, di_bh);
 952	if (ret)
 953		mlog_errno(ret);
 954out:
 955	up_write(&oi->ip_alloc_sem);
 956	up_write(&oi->ip_xattr_sem);
 957	return 0;
 958}
 959
 960/*
 961 * Find the end range for a leaf refcount block indicated by
 962 * el->l_recs[index].e_blkno.
 963 */
 964static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
 965				       struct buffer_head *ref_root_bh,
 966				       struct ocfs2_extent_block *eb,
 967				       struct ocfs2_extent_list *el,
 968				       int index,  u32 *cpos_end)
 969{
 970	int ret, i, subtree_root;
 971	u32 cpos;
 972	u64 blkno;
 973	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
 974	struct ocfs2_path *left_path = NULL, *right_path = NULL;
 975	struct ocfs2_extent_tree et;
 976	struct ocfs2_extent_list *tmp_el;
 977
 978	if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
 979		/*
 980		 * We have a extent rec after index, so just use the e_cpos
 981		 * of the next extent rec.
 982		 */
 983		*cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
 984		return 0;
 985	}
 986
 987	if (!eb || (eb && !eb->h_next_leaf_blk)) {
 988		/*
 989		 * We are the last extent rec, so any high cpos should
 990		 * be stored in this leaf refcount block.
 991		 */
 992		*cpos_end = UINT_MAX;
 993		return 0;
 994	}
 995
 996	/*
 997	 * If the extent block isn't the last one, we have to find
 998	 * the subtree root between this extent block and the next
 999	 * leaf extent block and get the corresponding e_cpos from
1000	 * the subroot. Otherwise we may corrupt the b-tree.
1001	 */
1002	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1003
1004	left_path = ocfs2_new_path_from_et(&et);
1005	if (!left_path) {
1006		ret = -ENOMEM;
1007		mlog_errno(ret);
1008		goto out;
1009	}
1010
1011	cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
1012	ret = ocfs2_find_path(ci, left_path, cpos);
1013	if (ret) {
1014		mlog_errno(ret);
1015		goto out;
1016	}
1017
1018	right_path = ocfs2_new_path_from_path(left_path);
1019	if (!right_path) {
1020		ret = -ENOMEM;
1021		mlog_errno(ret);
1022		goto out;
1023	}
1024
1025	ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
1026	if (ret) {
1027		mlog_errno(ret);
1028		goto out;
1029	}
1030
1031	ret = ocfs2_find_path(ci, right_path, cpos);
1032	if (ret) {
1033		mlog_errno(ret);
1034		goto out;
1035	}
1036
1037	subtree_root = ocfs2_find_subtree_root(&et, left_path,
1038					       right_path);
1039
1040	tmp_el = left_path->p_node[subtree_root].el;
1041	blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
1042	for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
1043		if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
1044			*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
1045			break;
1046		}
1047	}
1048
1049	BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
1050
1051out:
1052	ocfs2_free_path(left_path);
1053	ocfs2_free_path(right_path);
1054	return ret;
1055}
1056
1057/*
1058 * Given a cpos and len, try to find the refcount record which contains cpos.
1059 * 1. If cpos can be found in one refcount record, return the record.
1060 * 2. If cpos can't be found, return a fake record which start from cpos
1061 *    and end at a small value between cpos+len and start of the next record.
1062 *    This fake record has r_refcount = 0.
1063 */
1064static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
1065				  struct buffer_head *ref_root_bh,
1066				  u64 cpos, unsigned int len,
1067				  struct ocfs2_refcount_rec *ret_rec,
1068				  int *index,
1069				  struct buffer_head **ret_bh)
1070{
1071	int ret = 0, i, found;
1072	u32 low_cpos, uninitialized_var(cpos_end);
1073	struct ocfs2_extent_list *el;
1074	struct ocfs2_extent_rec *rec = NULL;
1075	struct ocfs2_extent_block *eb = NULL;
1076	struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
1077	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1078	struct ocfs2_refcount_block *rb =
1079			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1080
1081	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
1082		ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
1083					      ret_rec, index);
1084		*ret_bh = ref_root_bh;
1085		get_bh(ref_root_bh);
1086		return 0;
1087	}
1088
1089	el = &rb->rf_list;
1090	low_cpos = cpos & OCFS2_32BIT_POS_MASK;
1091
1092	if (el->l_tree_depth) {
1093		ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
1094		if (ret) {
1095			mlog_errno(ret);
1096			goto out;
1097		}
1098
1099		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
1100		el = &eb->h_list;
1101
1102		if (el->l_tree_depth) {
1103			ret = ocfs2_error(sb,
1104					  "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
1105					  (unsigned long long)ocfs2_metadata_cache_owner(ci),
1106					  (unsigned long long)eb_bh->b_blocknr);
1107			goto out;
1108		}
1109	}
1110
1111	found = 0;
1112	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1113		rec = &el->l_recs[i];
1114
1115		if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
1116			found = 1;
1117			break;
1118		}
1119	}
1120
1121	if (found) {
1122		ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
1123						  eb, el, i, &cpos_end);
1124		if (ret) {
1125			mlog_errno(ret);
1126			goto out;
1127		}
1128
1129		if (cpos_end < low_cpos + len)
1130			len = cpos_end - low_cpos;
1131	}
1132
1133	ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
1134					&ref_leaf_bh);
1135	if (ret) {
1136		mlog_errno(ret);
1137		goto out;
1138	}
1139
1140	ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1141				      ret_rec, index);
1142	*ret_bh = ref_leaf_bh;
1143out:
1144	brelse(eb_bh);
1145	return ret;
1146}
1147
1148enum ocfs2_ref_rec_contig {
1149	REF_CONTIG_NONE = 0,
1150	REF_CONTIG_LEFT,
1151	REF_CONTIG_RIGHT,
1152	REF_CONTIG_LEFTRIGHT,
1153};
1154
1155static enum ocfs2_ref_rec_contig
1156	ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
1157				    int index)
1158{
1159	if ((rb->rf_records.rl_recs[index].r_refcount ==
1160	    rb->rf_records.rl_recs[index + 1].r_refcount) &&
1161	    (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
1162	    le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
1163	    le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
1164		return REF_CONTIG_RIGHT;
1165
1166	return REF_CONTIG_NONE;
1167}
1168
1169static enum ocfs2_ref_rec_contig
1170	ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
1171				  int index)
1172{
1173	enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
1174
1175	if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
1176		ret = ocfs2_refcount_rec_adjacent(rb, index);
1177
1178	if (index > 0) {
1179		enum ocfs2_ref_rec_contig tmp;
1180
1181		tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
1182
1183		if (tmp == REF_CONTIG_RIGHT) {
1184			if (ret == REF_CONTIG_RIGHT)
1185				ret = REF_CONTIG_LEFTRIGHT;
1186			else
1187				ret = REF_CONTIG_LEFT;
1188		}
1189	}
1190
1191	return ret;
1192}
1193
1194static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
1195					   int index)
1196{
1197	BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
1198	       rb->rf_records.rl_recs[index+1].r_refcount);
1199
1200	le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
1201		     le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
1202
1203	if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
1204		memmove(&rb->rf_records.rl_recs[index + 1],
1205			&rb->rf_records.rl_recs[index + 2],
1206			sizeof(struct ocfs2_refcount_rec) *
1207			(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
1208
1209	memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
1210	       0, sizeof(struct ocfs2_refcount_rec));
1211	le16_add_cpu(&rb->rf_records.rl_used, -1);
1212}
1213
1214/*
1215 * Merge the refcount rec if we are contiguous with the adjacent recs.
1216 */
1217static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
1218				     int index)
1219{
1220	enum ocfs2_ref_rec_contig contig =
1221				ocfs2_refcount_rec_contig(rb, index);
1222
1223	if (contig == REF_CONTIG_NONE)
1224		return;
1225
1226	if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
1227		BUG_ON(index == 0);
1228		index--;
1229	}
1230
1231	ocfs2_rotate_refcount_rec_left(rb, index);
1232
1233	if (contig == REF_CONTIG_LEFTRIGHT)
1234		ocfs2_rotate_refcount_rec_left(rb, index);
1235}
1236
1237/*
1238 * Change the refcount indexed by "index" in ref_bh.
1239 * If refcount reaches 0, remove it.
1240 */
1241static int ocfs2_change_refcount_rec(handle_t *handle,
1242				     struct ocfs2_caching_info *ci,
1243				     struct buffer_head *ref_leaf_bh,
1244				     int index, int merge, int change)
1245{
1246	int ret;
1247	struct ocfs2_refcount_block *rb =
1248			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1249	struct ocfs2_refcount_list *rl = &rb->rf_records;
1250	struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
1251
1252	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1253				      OCFS2_JOURNAL_ACCESS_WRITE);
1254	if (ret) {
1255		mlog_errno(ret);
1256		goto out;
1257	}
1258
1259	trace_ocfs2_change_refcount_rec(
1260		(unsigned long long)ocfs2_metadata_cache_owner(ci),
1261		index, le32_to_cpu(rec->r_refcount), change);
1262	le32_add_cpu(&rec->r_refcount, change);
1263
1264	if (!rec->r_refcount) {
1265		if (index != le16_to_cpu(rl->rl_used) - 1) {
1266			memmove(rec, rec + 1,
1267				(le16_to_cpu(rl->rl_used) - index - 1) *
1268				sizeof(struct ocfs2_refcount_rec));
1269			memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
1270			       0, sizeof(struct ocfs2_refcount_rec));
1271		}
1272
1273		le16_add_cpu(&rl->rl_used, -1);
1274	} else if (merge)
1275		ocfs2_refcount_rec_merge(rb, index);
1276
1277	ocfs2_journal_dirty(handle, ref_leaf_bh);
1278out:
1279	return ret;
1280}
1281
1282static int ocfs2_expand_inline_ref_root(handle_t *handle,
1283					struct ocfs2_caching_info *ci,
1284					struct buffer_head *ref_root_bh,
1285					struct buffer_head **ref_leaf_bh,
1286					struct ocfs2_alloc_context *meta_ac)
1287{
1288	int ret;
1289	u16 suballoc_bit_start;
1290	u32 num_got;
1291	u64 suballoc_loc, blkno;
1292	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1293	struct buffer_head *new_bh = NULL;
1294	struct ocfs2_refcount_block *new_rb;
1295	struct ocfs2_refcount_block *root_rb =
1296			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1297
1298	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1299				      OCFS2_JOURNAL_ACCESS_WRITE);
1300	if (ret) {
1301		mlog_errno(ret);
1302		goto out;
1303	}
1304
1305	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1306				   &suballoc_bit_start, &num_got,
1307				   &blkno);
1308	if (ret) {
1309		mlog_errno(ret);
1310		goto out;
1311	}
1312
1313	new_bh = sb_getblk(sb, blkno);
1314	if (new_bh == NULL) {
1315		ret = -ENOMEM;
1316		mlog_errno(ret);
1317		goto out;
1318	}
1319	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1320
1321	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1322				      OCFS2_JOURNAL_ACCESS_CREATE);
1323	if (ret) {
1324		mlog_errno(ret);
1325		goto out;
1326	}
1327
1328	/*
1329	 * Initialize ocfs2_refcount_block.
1330	 * It should contain the same information as the old root.
1331	 * so just memcpy it and change the corresponding field.
1332	 */
1333	memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1334
1335	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1336	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1337	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1338	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1339	new_rb->rf_blkno = cpu_to_le64(blkno);
1340	new_rb->rf_cpos = cpu_to_le32(0);
1341	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1342	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1343	ocfs2_journal_dirty(handle, new_bh);
1344
1345	/* Now change the root. */
1346	memset(&root_rb->rf_list, 0, sb->s_blocksize -
1347	       offsetof(struct ocfs2_refcount_block, rf_list));
1348	root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
1349	root_rb->rf_clusters = cpu_to_le32(1);
1350	root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
1351	root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
1352	root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
1353	root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
1354
1355	ocfs2_journal_dirty(handle, ref_root_bh);
1356
1357	trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
1358		le16_to_cpu(new_rb->rf_records.rl_used));
1359
1360	*ref_leaf_bh = new_bh;
1361	new_bh = NULL;
1362out:
1363	brelse(new_bh);
1364	return ret;
1365}
1366
1367static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
1368					   struct ocfs2_refcount_rec *next)
1369{
1370	if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
1371		ocfs2_get_ref_rec_low_cpos(next))
1372		return 1;
1373
1374	return 0;
1375}
1376
1377static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
1378{
1379	const struct ocfs2_refcount_rec *l = a, *r = b;
1380	u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
1381	u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
1382
1383	if (l_cpos > r_cpos)
1384		return 1;
1385	if (l_cpos < r_cpos)
1386		return -1;
1387	return 0;
1388}
1389
1390static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
1391{
1392	const struct ocfs2_refcount_rec *l = a, *r = b;
1393	u64 l_cpos = le64_to_cpu(l->r_cpos);
1394	u64 r_cpos = le64_to_cpu(r->r_cpos);
1395
1396	if (l_cpos > r_cpos)
1397		return 1;
1398	if (l_cpos < r_cpos)
1399		return -1;
1400	return 0;
1401}
1402
1403static void swap_refcount_rec(void *a, void *b, int size)
1404{
1405	struct ocfs2_refcount_rec *l = a, *r = b;
1406
1407	swap(*l, *r);
1408}
1409
1410/*
1411 * The refcount cpos are ordered by their 64bit cpos,
1412 * But we will use the low 32 bit to be the e_cpos in the b-tree.
1413 * So we need to make sure that this pos isn't intersected with others.
1414 *
1415 * Note: The refcount block is already sorted by their low 32 bit cpos,
1416 *       So just try the middle pos first, and we will exit when we find
1417 *       the good position.
1418 */
1419static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
1420					 u32 *split_pos, int *split_index)
1421{
1422	int num_used = le16_to_cpu(rl->rl_used);
1423	int delta, middle = num_used / 2;
1424
1425	for (delta = 0; delta < middle; delta++) {
1426		/* Let's check delta earlier than middle */
1427		if (ocfs2_refcount_rec_no_intersect(
1428					&rl->rl_recs[middle - delta - 1],
1429					&rl->rl_recs[middle - delta])) {
1430			*split_index = middle - delta;
1431			break;
1432		}
1433
1434		/* For even counts, don't walk off the end */
1435		if ((middle + delta + 1) == num_used)
1436			continue;
1437
1438		/* Now try delta past middle */
1439		if (ocfs2_refcount_rec_no_intersect(
1440					&rl->rl_recs[middle + delta],
1441					&rl->rl_recs[middle + delta + 1])) {
1442			*split_index = middle + delta + 1;
1443			break;
1444		}
1445	}
1446
1447	if (delta >= middle)
1448		return -ENOSPC;
1449
1450	*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
1451	return 0;
1452}
1453
1454static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1455					    struct buffer_head *new_bh,
1456					    u32 *split_cpos)
1457{
1458	int split_index = 0, num_moved, ret;
1459	u32 cpos = 0;
1460	struct ocfs2_refcount_block *rb =
1461			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1462	struct ocfs2_refcount_list *rl = &rb->rf_records;
1463	struct ocfs2_refcount_block *new_rb =
1464			(struct ocfs2_refcount_block *)new_bh->b_data;
1465	struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1466
1467	trace_ocfs2_divide_leaf_refcount_block(
1468		(unsigned long long)ref_leaf_bh->b_blocknr,
1469		le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
1470
1471	/*
1472	 * XXX: Improvement later.
1473	 * If we know all the high 32 bit cpos is the same, no need to sort.
1474	 *
1475	 * In order to make the whole process safe, we do:
1476	 * 1. sort the entries by their low 32 bit cpos first so that we can
1477	 *    find the split cpos easily.
1478	 * 2. call ocfs2_insert_extent to insert the new refcount block.
1479	 * 3. move the refcount rec to the new block.
1480	 * 4. sort the entries by their 64 bit cpos.
1481	 * 5. dirty the new_rb and rb.
1482	 */
1483	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1484	     sizeof(struct ocfs2_refcount_rec),
1485	     cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
1486
1487	ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1488	if (ret) {
1489		mlog_errno(ret);
1490		return ret;
1491	}
1492
1493	new_rb->rf_cpos = cpu_to_le32(cpos);
1494
1495	/* move refcount records starting from split_index to the new block. */
1496	num_moved = le16_to_cpu(rl->rl_used) - split_index;
1497	memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
1498	       num_moved * sizeof(struct ocfs2_refcount_rec));
1499
1500	/*ok, remove the entries we just moved over to the other block. */
1501	memset(&rl->rl_recs[split_index], 0,
1502	       num_moved * sizeof(struct ocfs2_refcount_rec));
1503
1504	/* change old and new rl_used accordingly. */
1505	le16_add_cpu(&rl->rl_used, -num_moved);
1506	new_rl->rl_used = cpu_to_le16(num_moved);
1507
1508	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1509	     sizeof(struct ocfs2_refcount_rec),
1510	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1511
1512	sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
1513	     sizeof(struct ocfs2_refcount_rec),
1514	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1515
1516	*split_cpos = cpos;
1517	return 0;
1518}
1519
1520static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1521					 struct ocfs2_caching_info *ci,
1522					 struct buffer_head *ref_root_bh,
1523					 struct buffer_head *ref_leaf_bh,
1524					 struct ocfs2_alloc_context *meta_ac)
1525{
1526	int ret;
1527	u16 suballoc_bit_start;
1528	u32 num_got, new_cpos;
1529	u64 suballoc_loc, blkno;
1530	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1531	struct ocfs2_refcount_block *root_rb =
1532			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1533	struct buffer_head *new_bh = NULL;
1534	struct ocfs2_refcount_block *new_rb;
1535	struct ocfs2_extent_tree ref_et;
1536
1537	BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
1538
1539	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1540				      OCFS2_JOURNAL_ACCESS_WRITE);
1541	if (ret) {
1542		mlog_errno(ret);
1543		goto out;
1544	}
1545
1546	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1547				      OCFS2_JOURNAL_ACCESS_WRITE);
1548	if (ret) {
1549		mlog_errno(ret);
1550		goto out;
1551	}
1552
1553	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1554				   &suballoc_bit_start, &num_got,
1555				   &blkno);
1556	if (ret) {
1557		mlog_errno(ret);
1558		goto out;
1559	}
1560
1561	new_bh = sb_getblk(sb, blkno);
1562	if (new_bh == NULL) {
1563		ret = -ENOMEM;
1564		mlog_errno(ret);
1565		goto out;
1566	}
1567	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1568
1569	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1570				      OCFS2_JOURNAL_ACCESS_CREATE);
1571	if (ret) {
1572		mlog_errno(ret);
1573		goto out;
1574	}
1575
1576	/* Initialize ocfs2_refcount_block. */
1577	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1578	memset(new_rb, 0, sb->s_blocksize);
1579	strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1580	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1581	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1582	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1583	new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1584	new_rb->rf_blkno = cpu_to_le64(blkno);
1585	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1586	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1587	new_rb->rf_records.rl_count =
1588				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
1589	new_rb->rf_generation = root_rb->rf_generation;
1590
1591	ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
1592	if (ret) {
1593		mlog_errno(ret);
1594		goto out;
1595	}
1596
1597	ocfs2_journal_dirty(handle, ref_leaf_bh);
1598	ocfs2_journal_dirty(handle, new_bh);
1599
1600	ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1601
1602	trace_ocfs2_new_leaf_refcount_block(
1603			(unsigned long long)new_bh->b_blocknr, new_cpos);
1604
1605	/* Insert the new leaf block with the specific offset cpos. */
1606	ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1607				  1, 0, meta_ac);
1608	if (ret)
1609		mlog_errno(ret);
1610
1611out:
1612	brelse(new_bh);
1613	return ret;
1614}
1615
1616static int ocfs2_expand_refcount_tree(handle_t *handle,
1617				      struct ocfs2_caching_info *ci,
1618				      struct buffer_head *ref_root_bh,
1619				      struct buffer_head *ref_leaf_bh,
1620				      struct ocfs2_alloc_context *meta_ac)
1621{
1622	int ret;
1623	struct buffer_head *expand_bh = NULL;
1624
1625	if (ref_root_bh == ref_leaf_bh) {
1626		/*
1627		 * the old root bh hasn't been expanded to a b-tree,
1628		 * so expand it first.
1629		 */
1630		ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
1631						   &expand_bh, meta_ac);
1632		if (ret) {
1633			mlog_errno(ret);
1634			goto out;
1635		}
1636	} else {
1637		expand_bh = ref_leaf_bh;
1638		get_bh(expand_bh);
1639	}
1640
1641
1642	/* Now add a new refcount block into the tree.*/
1643	ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
1644					    expand_bh, meta_ac);
1645	if (ret)
1646		mlog_errno(ret);
1647out:
1648	brelse(expand_bh);
1649	return ret;
1650}
1651
1652/*
1653 * Adjust the extent rec in b-tree representing ref_leaf_bh.
1654 *
1655 * Only called when we have inserted a new refcount rec at index 0
1656 * which means ocfs2_extent_rec.e_cpos may need some change.
1657 */
1658static int ocfs2_adjust_refcount_rec(handle_t *handle,
1659				     struct ocfs2_caching_info *ci,
1660				     struct buffer_head *ref_root_bh,
1661				     struct buffer_head *ref_leaf_bh,
1662				     struct ocfs2_refcount_rec *rec)
1663{
1664	int ret = 0, i;
1665	u32 new_cpos, old_cpos;
1666	struct ocfs2_path *path = NULL;
1667	struct ocfs2_extent_tree et;
1668	struct ocfs2_refcount_block *rb =
1669		(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1670	struct ocfs2_extent_list *el;
1671
1672	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
1673		goto out;
1674
1675	rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1676	old_cpos = le32_to_cpu(rb->rf_cpos);
1677	new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
1678	if (old_cpos <= new_cpos)
1679		goto out;
1680
1681	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1682
1683	path = ocfs2_new_path_from_et(&et);
1684	if (!path) {
1685		ret = -ENOMEM;
1686		mlog_errno(ret);
1687		goto out;
1688	}
1689
1690	ret = ocfs2_find_path(ci, path, old_cpos);
1691	if (ret) {
1692		mlog_errno(ret);
1693		goto out;
1694	}
1695
1696	/*
1697	 * 2 more credits, one for the leaf refcount block, one for
1698	 * the extent block contains the extent rec.
1699	 */
1700	ret = ocfs2_extend_trans(handle, 2);
1701	if (ret < 0) {
1702		mlog_errno(ret);
1703		goto out;
1704	}
1705
1706	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1707				      OCFS2_JOURNAL_ACCESS_WRITE);
1708	if (ret < 0) {
1709		mlog_errno(ret);
1710		goto out;
1711	}
1712
1713	ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
1714				      OCFS2_JOURNAL_ACCESS_WRITE);
1715	if (ret < 0) {
1716		mlog_errno(ret);
1717		goto out;
1718	}
1719
1720	/* change the leaf extent block first. */
1721	el = path_leaf_el(path);
1722
1723	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
1724		if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
1725			break;
1726
1727	BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
1728
1729	el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
1730
1731	/* change the r_cpos in the leaf block. */
1732	rb->rf_cpos = cpu_to_le32(new_cpos);
1733
1734	ocfs2_journal_dirty(handle, path_leaf_bh(path));
1735	ocfs2_journal_dirty(handle, ref_leaf_bh);
1736
1737out:
1738	ocfs2_free_path(path);
1739	return ret;
1740}
1741
1742static int ocfs2_insert_refcount_rec(handle_t *handle,
1743				     struct ocfs2_caching_info *ci,
1744				     struct buffer_head *ref_root_bh,
1745				     struct buffer_head *ref_leaf_bh,
1746				     struct ocfs2_refcount_rec *rec,
1747				     int index, int merge,
1748				     struct ocfs2_alloc_context *meta_ac)
1749{
1750	int ret;
1751	struct ocfs2_refcount_block *rb =
1752			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1753	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1754	struct buffer_head *new_bh = NULL;
1755
1756	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1757
1758	if (rf_list->rl_used == rf_list->rl_count) {
1759		u64 cpos = le64_to_cpu(rec->r_cpos);
1760		u32 len = le32_to_cpu(rec->r_clusters);
1761
1762		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1763						 ref_leaf_bh, meta_ac);
1764		if (ret) {
1765			mlog_errno(ret);
1766			goto out;
1767		}
1768
1769		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1770					     cpos, len, NULL, &index,
1771					     &new_bh);
1772		if (ret) {
1773			mlog_errno(ret);
1774			goto out;
1775		}
1776
1777		ref_leaf_bh = new_bh;
1778		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1779		rf_list = &rb->rf_records;
1780	}
1781
1782	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1783				      OCFS2_JOURNAL_ACCESS_WRITE);
1784	if (ret) {
1785		mlog_errno(ret);
1786		goto out;
1787	}
1788
1789	if (index < le16_to_cpu(rf_list->rl_used))
1790		memmove(&rf_list->rl_recs[index + 1],
1791			&rf_list->rl_recs[index],
1792			(le16_to_cpu(rf_list->rl_used) - index) *
1793			 sizeof(struct ocfs2_refcount_rec));
1794
1795	trace_ocfs2_insert_refcount_rec(
1796		(unsigned long long)ref_leaf_bh->b_blocknr, index,
1797		(unsigned long long)le64_to_cpu(rec->r_cpos),
1798		le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
1799
1800	rf_list->rl_recs[index] = *rec;
1801
1802	le16_add_cpu(&rf_list->rl_used, 1);
1803
1804	if (merge)
1805		ocfs2_refcount_rec_merge(rb, index);
1806
1807	ocfs2_journal_dirty(handle, ref_leaf_bh);
1808
1809	if (index == 0) {
1810		ret = ocfs2_adjust_refcount_rec(handle, ci,
1811						ref_root_bh,
1812						ref_leaf_bh, rec);
1813		if (ret)
1814			mlog_errno(ret);
1815	}
1816out:
1817	brelse(new_bh);
1818	return ret;
1819}
1820
1821/*
1822 * Split the refcount_rec indexed by "index" in ref_leaf_bh.
1823 * This is much simple than our b-tree code.
1824 * split_rec is the new refcount rec we want to insert.
1825 * If split_rec->r_refcount > 0, we are changing the refcount(in case we
1826 * increase refcount or decrease a refcount to non-zero).
1827 * If split_rec->r_refcount == 0, we are punching a hole in current refcount
1828 * rec( in case we decrease a refcount to zero).
1829 */
1830static int ocfs2_split_refcount_rec(handle_t *handle,
1831				    struct ocfs2_caching_info *ci,
1832				    struct buffer_head *ref_root_bh,
1833				    struct buffer_head *ref_leaf_bh,
1834				    struct ocfs2_refcount_rec *split_rec,
1835				    int index, int merge,
1836				    struct ocfs2_alloc_context *meta_ac,
1837				    struct ocfs2_cached_dealloc_ctxt *dealloc)
1838{
1839	int ret, recs_need;
1840	u32 len;
1841	struct ocfs2_refcount_block *rb =
1842			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1843	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1844	struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
1845	struct ocfs2_refcount_rec *tail_rec = NULL;
1846	struct buffer_head *new_bh = NULL;
1847
1848	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1849
1850	trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
1851		le32_to_cpu(orig_rec->r_clusters),
1852		le32_to_cpu(orig_rec->r_refcount),
1853		le64_to_cpu(split_rec->r_cpos),
1854		le32_to_cpu(split_rec->r_clusters),
1855		le32_to_cpu(split_rec->r_refcount));
1856
1857	/*
1858	 * If we just need to split the header or tail clusters,
1859	 * no more recs are needed, just split is OK.
1860	 * Otherwise we at least need one new recs.
1861	 */
1862	if (!split_rec->r_refcount &&
1863	    (split_rec->r_cpos == orig_rec->r_cpos ||
1864	     le64_to_cpu(split_rec->r_cpos) +
1865	     le32_to_cpu(split_rec->r_clusters) ==
1866	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1867		recs_need = 0;
1868	else
1869		recs_need = 1;
1870
1871	/*
1872	 * We need one more rec if we split in the middle and the new rec have
1873	 * some refcount in it.
1874	 */
1875	if (split_rec->r_refcount &&
1876	    (split_rec->r_cpos != orig_rec->r_cpos &&
1877	     le64_to_cpu(split_rec->r_cpos) +
1878	     le32_to_cpu(split_rec->r_clusters) !=
1879	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1880		recs_need++;
1881
1882	/* If the leaf block don't have enough record, expand it. */
1883	if (le16_to_cpu(rf_list->rl_used) + recs_need >
1884					 le16_to_cpu(rf_list->rl_count)) {
1885		struct ocfs2_refcount_rec tmp_rec;
1886		u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1887		len = le32_to_cpu(orig_rec->r_clusters);
1888		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1889						 ref_leaf_bh, meta_ac);
1890		if (ret) {
1891			mlog_errno(ret);
1892			goto out;
1893		}
1894
1895		/*
1896		 * We have to re-get it since now cpos may be moved to
1897		 * another leaf block.
1898		 */
1899		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1900					     cpos, len, &tmp_rec, &index,
1901					     &new_bh);
1902		if (ret) {
1903			mlog_errno(ret);
1904			goto out;
1905		}
1906
1907		ref_leaf_bh = new_bh;
1908		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1909		rf_list = &rb->rf_records;
1910		orig_rec = &rf_list->rl_recs[index];
1911	}
1912
1913	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1914				      OCFS2_JOURNAL_ACCESS_WRITE);
1915	if (ret) {
1916		mlog_errno(ret);
1917		goto out;
1918	}
1919
1920	/*
1921	 * We have calculated out how many new records we need and store
1922	 * in recs_need, so spare enough space first by moving the records
1923	 * after "index" to the end.
1924	 */
1925	if (index != le16_to_cpu(rf_list->rl_used) - 1)
1926		memmove(&rf_list->rl_recs[index + 1 + recs_need],
1927			&rf_list->rl_recs[index + 1],
1928			(le16_to_cpu(rf_list->rl_used) - index - 1) *
1929			 sizeof(struct ocfs2_refcount_rec));
1930
1931	len = (le64_to_cpu(orig_rec->r_cpos) +
1932	      le32_to_cpu(orig_rec->r_clusters)) -
1933	      (le64_to_cpu(split_rec->r_cpos) +
1934	      le32_to_cpu(split_rec->r_clusters));
1935
1936	/*
1937	 * If we have "len", the we will split in the tail and move it
1938	 * to the end of the space we have just spared.
1939	 */
1940	if (len) {
1941		tail_rec = &rf_list->rl_recs[index + recs_need];
1942
1943		memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
1944		le64_add_cpu(&tail_rec->r_cpos,
1945			     le32_to_cpu(tail_rec->r_clusters) - len);
1946		tail_rec->r_clusters = cpu_to_le32(len);
1947	}
1948
1949	/*
1950	 * If the split pos isn't the same as the original one, we need to
1951	 * split in the head.
1952	 *
1953	 * Note: We have the chance that split_rec.r_refcount = 0,
1954	 * recs_need = 0 and len > 0, which means we just cut the head from
1955	 * the orig_rec and in that case we have done some modification in
1956	 * orig_rec above, so the check for r_cpos is faked.
1957	 */
1958	if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
1959		len = le64_to_cpu(split_rec->r_cpos) -
1960		      le64_to_cpu(orig_rec->r_cpos);
1961		orig_rec->r_clusters = cpu_to_le32(len);
1962		index++;
1963	}
1964
1965	le16_add_cpu(&rf_list->rl_used, recs_need);
1966
1967	if (split_rec->r_refcount) {
1968		rf_list->rl_recs[index] = *split_rec;
1969		trace_ocfs2_split_refcount_rec_insert(
1970			(unsigned long long)ref_leaf_bh->b_blocknr, index,
1971			(unsigned long long)le64_to_cpu(split_rec->r_cpos),
1972			le32_to_cpu(split_rec->r_clusters),
1973			le32_to_cpu(split_rec->r_refcount));
1974
1975		if (merge)
1976			ocfs2_refcount_rec_merge(rb, index);
1977	}
1978
1979	ocfs2_journal_dirty(handle, ref_leaf_bh);
1980
1981out:
1982	brelse(new_bh);
1983	return ret;
1984}
1985
1986static int __ocfs2_increase_refcount(handle_t *handle,
1987				     struct ocfs2_caching_info *ci,
1988				     struct buffer_head *ref_root_bh,
1989				     u64 cpos, u32 len, int merge,
1990				     struct ocfs2_alloc_context *meta_ac,
1991				     struct ocfs2_cached_dealloc_ctxt *dealloc)
1992{
1993	int ret = 0, index;
1994	struct buffer_head *ref_leaf_bh = NULL;
1995	struct ocfs2_refcount_rec rec;
1996	unsigned int set_len = 0;
1997
1998	trace_ocfs2_increase_refcount_begin(
1999	     (unsigned long long)ocfs2_metadata_cache_owner(ci),
2000	     (unsigned long long)cpos, len);
2001
2002	while (len) {
2003		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2004					     cpos, len, &rec, &index,
2005					     &ref_leaf_bh);
2006		if (ret) {
2007			mlog_errno(ret);
2008			goto out;
2009		}
2010
2011		set_len = le32_to_cpu(rec.r_clusters);
2012
2013		/*
2014		 * Here we may meet with 3 situations:
2015		 *
2016		 * 1. If we find an already existing record, and the length
2017		 *    is the same, cool, we just need to increase the r_refcount
2018		 *    and it is OK.
2019		 * 2. If we find a hole, just insert it with r_refcount = 1.
2020		 * 3. If we are in the middle of one extent record, split
2021		 *    it.
2022		 */
2023		if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
2024		    set_len <= len) {
2025			trace_ocfs2_increase_refcount_change(
2026				(unsigned long long)cpos, set_len,
2027				le32_to_cpu(rec.r_refcount));
2028			ret = ocfs2_change_refcount_rec(handle, ci,
2029							ref_leaf_bh, index,
2030							merge, 1);
2031			if (ret) {
2032				mlog_errno(ret);
2033				goto out;
2034			}
2035		} else if (!rec.r_refcount) {
2036			rec.r_refcount = cpu_to_le32(1);
2037
2038			trace_ocfs2_increase_refcount_insert(
2039			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2040			     set_len);
2041			ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
2042							ref_leaf_bh,
2043							&rec, index,
2044							merge, meta_ac);
2045			if (ret) {
2046				mlog_errno(ret);
2047				goto out;
2048			}
2049		} else  {
2050			set_len = min((u64)(cpos + len),
2051				      le64_to_cpu(rec.r_cpos) + set_len) - cpos;
2052			rec.r_cpos = cpu_to_le64(cpos);
2053			rec.r_clusters = cpu_to_le32(set_len);
2054			le32_add_cpu(&rec.r_refcount, 1);
2055
2056			trace_ocfs2_increase_refcount_split(
2057			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2058			     set_len, le32_to_cpu(rec.r_refcount));
2059			ret = ocfs2_split_refcount_rec(handle, ci,
2060						       ref_root_bh, ref_leaf_bh,
2061						       &rec, index, merge,
2062						       meta_ac, dealloc);
2063			if (ret) {
2064				mlog_errno(ret);
2065				goto out;
2066			}
2067		}
2068
2069		cpos += set_len;
2070		len -= set_len;
2071		brelse(ref_leaf_bh);
2072		ref_leaf_bh = NULL;
2073	}
2074
2075out:
2076	brelse(ref_leaf_bh);
2077	return ret;
2078}
2079
2080static int ocfs2_remove_refcount_extent(handle_t *handle,
2081				struct ocfs2_caching_info *ci,
2082				struct buffer_head *ref_root_bh,
2083				struct buffer_head *ref_leaf_bh,
2084				struct ocfs2_alloc_context *meta_ac,
2085				struct ocfs2_cached_dealloc_ctxt *dealloc)
2086{
2087	int ret;
2088	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2089	struct ocfs2_refcount_block *rb =
2090			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2091	struct ocfs2_extent_tree et;
2092
2093	BUG_ON(rb->rf_records.rl_used);
2094
2095	trace_ocfs2_remove_refcount_extent(
2096		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2097		(unsigned long long)ref_leaf_bh->b_blocknr,
2098		le32_to_cpu(rb->rf_cpos));
2099
2100	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2101	ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
2102				  1, meta_ac, dealloc);
2103	if (ret) {
2104		mlog_errno(ret);
2105		goto out;
2106	}
2107
2108	ocfs2_remove_from_cache(ci, ref_leaf_bh);
2109
2110	/*
2111	 * add the freed block to the dealloc so that it will be freed
2112	 * when we run dealloc.
2113	 */
2114	ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
2115					le16_to_cpu(rb->rf_suballoc_slot),
2116					le64_to_cpu(rb->rf_suballoc_loc),
2117					le64_to_cpu(rb->rf_blkno),
2118					le16_to_cpu(rb->rf_suballoc_bit));
2119	if (ret) {
2120		mlog_errno(ret);
2121		goto out;
2122	}
2123
2124	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
2125				      OCFS2_JOURNAL_ACCESS_WRITE);
2126	if (ret) {
2127		mlog_errno(ret);
2128		goto out;
2129	}
2130
2131	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2132
2133	le32_add_cpu(&rb->rf_clusters, -1);
2134
2135	/*
2136	 * check whether we need to restore the root refcount block if
2137	 * there is no leaf extent block at atll.
2138	 */
2139	if (!rb->rf_list.l_next_free_rec) {
2140		BUG_ON(rb->rf_clusters);
2141
2142		trace_ocfs2_restore_refcount_block(
2143		     (unsigned long long)ref_root_bh->b_blocknr);
2144
2145		rb->rf_flags = 0;
2146		rb->rf_parent = 0;
2147		rb->rf_cpos = 0;
2148		memset(&rb->rf_records, 0, sb->s_blocksize -
2149		       offsetof(struct ocfs2_refcount_block, rf_records));
2150		rb->rf_records.rl_count =
2151				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
2152	}
2153
2154	ocfs2_journal_dirty(handle, ref_root_bh);
2155
2156out:
2157	return ret;
2158}
2159
2160int ocfs2_increase_refcount(handle_t *handle,
2161			    struct ocfs2_caching_info *ci,
2162			    struct buffer_head *ref_root_bh,
2163			    u64 cpos, u32 len,
2164			    struct ocfs2_alloc_context *meta_ac,
2165			    struct ocfs2_cached_dealloc_ctxt *dealloc)
2166{
2167	return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
2168					 cpos, len, 1,
2169					 meta_ac, dealloc);
2170}
2171
2172static int ocfs2_decrease_refcount_rec(handle_t *handle,
2173				struct ocfs2_caching_info *ci,
2174				struct buffer_head *ref_root_bh,
2175				struct buffer_head *ref_leaf_bh,
2176				int index, u64 cpos, unsigned int len,
2177				struct ocfs2_alloc_context *meta_ac,
2178				struct ocfs2_cached_dealloc_ctxt *dealloc)
2179{
2180	int ret;
2181	struct ocfs2_refcount_block *rb =
2182			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2183	struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
2184
2185	BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2186	BUG_ON(cpos + len >
2187	       le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2188
2189	trace_ocfs2_decrease_refcount_rec(
2190		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2191		(unsigned long long)cpos, len);
2192
2193	if (cpos == le64_to_cpu(rec->r_cpos) &&
2194	    len == le32_to_cpu(rec->r_clusters))
2195		ret = ocfs2_change_refcount_rec(handle, ci,
2196						ref_leaf_bh, index, 1, -1);
2197	else {
2198		struct ocfs2_refcount_rec split = *rec;
2199		split.r_cpos = cpu_to_le64(cpos);
2200		split.r_clusters = cpu_to_le32(len);
2201
2202		le32_add_cpu(&split.r_refcount, -1);
2203
2204		ret = ocfs2_split_refcount_rec(handle, ci,
2205					       ref_root_bh, ref_leaf_bh,
2206					       &split, index, 1,
2207					       meta_ac, dealloc);
2208	}
2209
2210	if (ret) {
2211		mlog_errno(ret);
2212		goto out;
2213	}
2214
2215	/* Remove the leaf refcount block if it contains no refcount record. */
2216	if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
2217		ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
2218						   ref_leaf_bh, meta_ac,
2219						   dealloc);
2220		if (ret)
2221			mlog_errno(ret);
2222	}
2223
2224out:
2225	return ret;
2226}
2227
2228static int __ocfs2_decrease_refcount(handle_t *handle,
2229				     struct ocfs2_caching_info *ci,
2230				     struct buffer_head *ref_root_bh,
2231				     u64 cpos, u32 len,
2232				     struct ocfs2_alloc_context *meta_ac,
2233				     struct ocfs2_cached_dealloc_ctxt *dealloc,
2234				     int delete)
2235{
2236	int ret = 0, index = 0;
2237	struct ocfs2_refcount_rec rec;
2238	unsigned int r_count = 0, r_len;
2239	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2240	struct buffer_head *ref_leaf_bh = NULL;
2241
2242	trace_ocfs2_decrease_refcount(
2243		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2244		(unsigned long long)cpos, len, delete);
2245
2246	while (len) {
2247		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2248					     cpos, len, &rec, &index,
2249					     &ref_leaf_bh);
2250		if (ret) {
2251			mlog_errno(ret);
2252			goto out;
2253		}
2254
2255		r_count = le32_to_cpu(rec.r_refcount);
2256		BUG_ON(r_count == 0);
2257		if (!delete)
2258			BUG_ON(r_count > 1);
2259
2260		r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2261			      le32_to_cpu(rec.r_clusters)) - cpos;
2262
2263		ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
2264						  ref_leaf_bh, index,
2265						  cpos, r_len,
2266						  meta_ac, dealloc);
2267		if (ret) {
2268			mlog_errno(ret);
2269			goto out;
2270		}
2271
2272		if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
2273			ret = ocfs2_cache_cluster_dealloc(dealloc,
2274					  ocfs2_clusters_to_blocks(sb, cpos),
2275							  r_len);
2276			if (ret) {
2277				mlog_errno(ret);
2278				goto out;
2279			}
2280		}
2281
2282		cpos += r_len;
2283		len -= r_len;
2284		brelse(ref_leaf_bh);
2285		ref_leaf_bh = NULL;
2286	}
2287
2288out:
2289	brelse(ref_leaf_bh);
2290	return ret;
2291}
2292
2293/* Caller must hold refcount tree lock. */
2294int ocfs2_decrease_refcount(struct inode *inode,
2295			    handle_t *handle, u32 cpos, u32 len,
2296			    struct ocfs2_alloc_context *meta_ac,
2297			    struct ocfs2_cached_dealloc_ctxt *dealloc,
2298			    int delete)
2299{
2300	int ret;
2301	u64 ref_blkno;
2302	struct buffer_head *ref_root_bh = NULL;
2303	struct ocfs2_refcount_tree *tree;
2304
2305	BUG_ON(!ocfs2_is_refcount_inode(inode));
2306
2307	ret = ocfs2_get_refcount_block(inode, &ref_blkno);
2308	if (ret) {
2309		mlog_errno(ret);
2310		goto out;
2311	}
2312
2313	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
2314	if (ret) {
2315		mlog_errno(ret);
2316		goto out;
2317	}
2318
2319	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
2320					&ref_root_bh);
2321	if (ret) {
2322		mlog_errno(ret);
2323		goto out;
2324	}
2325
2326	ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
2327					cpos, len, meta_ac, dealloc, delete);
2328	if (ret)
2329		mlog_errno(ret);
2330out:
2331	brelse(ref_root_bh);
2332	return ret;
2333}
2334
2335/*
2336 * Mark the already-existing extent at cpos as refcounted for len clusters.
2337 * This adds the refcount extent flag.
2338 *
2339 * If the existing extent is larger than the request, initiate a
2340 * split. An attempt will be made at merging with adjacent extents.
2341 *
2342 * The caller is responsible for passing down meta_ac if we'll need it.
2343 */
2344static int ocfs2_mark_extent_refcounted(struct inode *inode,
2345				struct ocfs2_extent_tree *et,
2346				handle_t *handle, u32 cpos,
2347				u32 len, u32 phys,
2348				struct ocfs2_alloc_context *meta_ac,
2349				struct ocfs2_cached_dealloc_ctxt *dealloc)
2350{
2351	int ret;
2352
2353	trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
2354					   cpos, len, phys);
2355
2356	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2357		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
2358				  inode->i_ino);
2359		goto out;
2360	}
2361
2362	ret = ocfs2_change_extent_flag(handle, et, cpos,
2363				       len, phys, meta_ac, dealloc,
2364				       OCFS2_EXT_REFCOUNTED, 0);
2365	if (ret)
2366		mlog_errno(ret);
2367
2368out:
2369	return ret;
2370}
2371
2372/*
2373 * Given some contiguous physical clusters, calculate what we need
2374 * for modifying their refcount.
2375 */
2376static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2377					    struct ocfs2_caching_info *ci,
2378					    struct buffer_head *ref_root_bh,
2379					    u64 start_cpos,
2380					    u32 clusters,
2381					    int *meta_add,
2382					    int *credits)
2383{
2384	int ret = 0, index, ref_blocks = 0, recs_add = 0;
2385	u64 cpos = start_cpos;
2386	struct ocfs2_refcount_block *rb;
2387	struct ocfs2_refcount_rec rec;
2388	struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2389	u32 len;
2390
2391	while (clusters) {
2392		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2393					     cpos, clusters, &rec,
2394					     &index, &ref_leaf_bh);
2395		if (ret) {
2396			mlog_errno(ret);
2397			goto out;
2398		}
2399
2400		if (ref_leaf_bh != prev_bh) {
2401			/*
2402			 * Now we encounter a new leaf block, so calculate
2403			 * whether we need to extend the old leaf.
2404			 */
2405			if (prev_bh) {
2406				rb = (struct ocfs2_refcount_block *)
2407							prev_bh->b_data;
2408
2409				if (le16_to_cpu(rb->rf_records.rl_used) +
2410				    recs_add >
2411				    le16_to_cpu(rb->rf_records.rl_count))
2412					ref_blocks++;
2413			}
2414
2415			recs_add = 0;
2416			*credits += 1;
2417			brelse(prev_bh);
2418			prev_bh = ref_leaf_bh;
2419			get_bh(prev_bh);
2420		}
2421
2422		trace_ocfs2_calc_refcount_meta_credits_iterate(
2423				recs_add, (unsigned long long)cpos, clusters,
2424				(unsigned long long)le64_to_cpu(rec.r_cpos),
2425				le32_to_cpu(rec.r_clusters),
2426				le32_to_cpu(rec.r_refcount), index);
2427
2428		len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2429			  le32_to_cpu(rec.r_clusters)) - cpos;
2430		/*
2431		 * We record all the records which will be inserted to the
2432		 * same refcount block, so that we can tell exactly whether
2433		 * we need a new refcount block or not.
2434		 *
2435		 * If we will insert a new one, this is easy and only happens
2436		 * during adding refcounted flag to the extent, so we don't
2437		 * have a chance of spliting. We just need one record.
2438		 *
2439		 * If the refcount rec already exists, that would be a little
2440		 * complicated. we may have to:
2441		 * 1) split at the beginning if the start pos isn't aligned.
2442		 *    we need 1 more record in this case.
2443		 * 2) split int the end if the end pos isn't aligned.
2444		 *    we need 1 more record in this case.
2445		 * 3) split in the middle because of file system fragmentation.
2446		 *    we need 2 more records in this case(we can't detect this
2447		 *    beforehand, so always think of the worst case).
2448		 */
2449		if (rec.r_refcount) {
2450			recs_add += 2;
2451			/* Check whether we need a split at the beginning. */
2452			if (cpos == start_cpos &&
2453			    cpos != le64_to_cpu(rec.r_cpos))
2454				recs_add++;
2455
2456			/* Check whether we need a split in the end. */
2457			if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2458			    le32_to_cpu(rec.r_clusters))
2459				recs_add++;
2460		} else
2461			recs_add++;
2462
2463		brelse(ref_leaf_bh);
2464		ref_leaf_bh = NULL;
2465		clusters -= len;
2466		cpos += len;
2467	}
2468
2469	if (prev_bh) {
2470		rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
2471
2472		if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
2473		    le16_to_cpu(rb->rf_records.rl_count))
2474			ref_blocks++;
2475
2476		*credits += 1;
2477	}
2478
2479	if (!ref_blocks)
2480		goto out;
2481
2482	*meta_add += ref_blocks;
2483	*credits += ref_blocks;
2484
2485	/*
2486	 * So we may need ref_blocks to insert into the tree.
2487	 * That also means we need to change the b-tree and add that number
2488	 * of records since we never merge them.
2489	 * We need one more block for expansion since the new created leaf
2490	 * block is also full and needs split.
2491	 */
2492	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2493	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
2494		struct ocfs2_extent_tree et;
2495
2496		ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2497		*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2498		*credits += ocfs2_calc_extend_credits(sb,
2499						      et.et_root_el);
2500	} else {
2501		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2502		*meta_add += 1;
2503	}
2504
2505out:
2506
2507	trace_ocfs2_calc_refcount_meta_credits(
2508		(unsigned long long)start_cpos, clusters,
2509		*meta_add, *credits);
2510	brelse(ref_leaf_bh);
2511	brelse(prev_bh);
2512	return ret;
2513}
2514
2515/*
2516 * For refcount tree, we will decrease some contiguous clusters
2517 * refcount count, so just go through it to see how many blocks
2518 * we gonna touch and whether we need to create new blocks.
2519 *
2520 * Normally the refcount blocks store these refcount should be
2521 * contiguous also, so that we can get the number easily.
2522 * We will at most add split 2 refcount records and 2 more
2523 * refcount blocks, so just check it in a rough way.
2524 *
2525 * Caller must hold refcount tree lock.
2526 */
2527int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2528					  u64 refcount_loc,
2529					  u64 phys_blkno,
2530					  u32 clusters,
2531					  int *credits,
2532					  int *ref_blocks)
2533{
2534	int ret;
2535	struct buffer_head *ref_root_bh = NULL;
2536	struct ocfs2_refcount_tree *tree;
2537	u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
2538
2539	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2540		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
2541				  inode->i_ino);
2542		goto out;
2543	}
2544
2545	BUG_ON(!ocfs2_is_refcount_inode(inode));
2546
2547	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
2548				      refcount_loc, &tree);
2549	if (ret) {
2550		mlog_errno(ret);
2551		goto out;
2552	}
2553
2554	ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
2555					&ref_root_bh);
2556	if (ret) {
2557		mlog_errno(ret);
2558		goto out;
2559	}
2560
2561	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
2562					       &tree->rf_ci,
2563					       ref_root_bh,
2564					       start_cpos, clusters,
2565					       ref_blocks, credits);
2566	if (ret) {
2567		mlog_errno(ret);
2568		goto out;
2569	}
2570
2571	trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
2572
2573out:
2574	brelse(ref_root_bh);
2575	return ret;
2576}
2577
2578#define	MAX_CONTIG_BYTES	1048576
2579
2580static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
2581{
2582	return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
2583}
2584
2585static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
2586{
2587	return ~(ocfs2_cow_contig_clusters(sb) - 1);
2588}
2589
2590/*
2591 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2592 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2593 * while still being less than or equal to it.
2594 *
2595 * The goal is to break the extent at a multiple of contig_clusters.
2596 */
2597static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
2598						 unsigned int start,
2599						 unsigned int cpos)
2600{
2601	BUG_ON(start > cpos);
2602
2603	return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2604}
2605
2606/*
2607 * Given a cluster count of len, pad it out so that it is a multiple
2608 * of contig_clusters.
2609 */
2610static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
2611						  unsigned int len)
2612{
2613	unsigned int padded =
2614		(len + (ocfs2_cow_contig_clusters(sb) - 1)) &
2615		ocfs2_cow_contig_mask(sb);
2616
2617	/* Did we wrap? */
2618	if (padded < len)
2619		padded = UINT_MAX;
2620
2621	return padded;
2622}
2623
2624/*
2625 * Calculate out the start and number of virtual clusters we need to to CoW.
2626 *
2627 * cpos is vitual start cluster position we want to do CoW in a
2628 * file and write_len is the cluster length.
2629 * max_cpos is the place where we want to stop CoW intentionally.
2630 *
2631 * Normal we will start CoW from the beginning of extent record cotaining cpos.
2632 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
2633 * get good I/O from the resulting extent tree.
2634 */
2635static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
2636					   struct ocfs2_extent_list *el,
2637					   u32 cpos,
2638					   u32 write_len,
2639					   u32 max_cpos,
2640					   u32 *cow_start,
2641					   u32 *cow_len)
2642{
2643	int ret = 0;
2644	int tree_height = le16_to_cpu(el->l_tree_depth), i;
2645	struct buffer_head *eb_bh = NULL;
2646	struct ocfs2_extent_block *eb = NULL;
2647	struct ocfs2_extent_rec *rec;
2648	unsigned int want_clusters, rec_end = 0;
2649	int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
2650	int leaf_clusters;
2651
2652	BUG_ON(cpos + write_len > max_cpos);
2653
2654	if (tree_height > 0) {
2655		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2656		if (ret) {
2657			mlog_errno(ret);
2658			goto out;
2659		}
2660
2661		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2662		el = &eb->h_list;
2663
2664		if (el->l_tree_depth) {
2665			ret = ocfs2_error(inode->i_sb,
2666					  "Inode %lu has non zero tree depth in leaf block %llu\n",
2667					  inode->i_ino,
2668					  (unsigned long long)eb_bh->b_blocknr);
2669			goto out;
2670		}
2671	}
2672
2673	*cow_len = 0;
2674	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2675		rec = &el->l_recs[i];
2676
2677		if (ocfs2_is_empty_extent(rec)) {
2678			mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
2679					"index %d\n", inode->i_ino, i);
2680			continue;
2681		}
2682
2683		if (le32_to_cpu(rec->e_cpos) +
2684		    le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2685			continue;
2686
2687		if (*cow_len == 0) {
2688			/*
2689			 * We should find a refcounted record in the
2690			 * first pass.
2691			 */
2692			BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
2693			*cow_start = le32_to_cpu(rec->e_cpos);
2694		}
2695
2696		/*
2697		 * If we encounter a hole, a non-refcounted record or
2698		 * pass the max_cpos, stop the search.
2699		 */
2700		if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
2701		    (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
2702		    (max_cpos <= le32_to_cpu(rec->e_cpos)))
2703			break;
2704
2705		leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
2706		rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
2707		if (rec_end > max_cpos) {
2708			rec_end = max_cpos;
2709			leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
2710		}
2711
2712		/*
2713		 * How many clusters do we actually need from
2714		 * this extent?  First we see how many we actually
2715		 * need to complete the write.  If that's smaller
2716		 * than contig_clusters, we try for contig_clusters.
2717		 */
2718		if (!*cow_len)
2719			want_clusters = write_len;
2720		else
2721			want_clusters = (cpos + write_len) -
2722				(*cow_start + *cow_len);
2723		if (want_clusters < contig_clusters)
2724			want_clusters = contig_clusters;
2725
2726		/*
2727		 * If the write does not cover the whole extent, we
2728		 * need to calculate how we're going to split the extent.
2729		 * We try to do it on contig_clusters boundaries.
2730		 *
2731		 * Any extent smaller than contig_clusters will be
2732		 * CoWed in its entirety.
2733		 */
2734		if (leaf_clusters <= contig_clusters)
2735			*cow_len += leaf_clusters;
2736		else if (*cow_len || (*cow_start == cpos)) {
2737			/*
2738			 * This extent needs to be CoW'd from its
2739			 * beginning, so all we have to do is compute
2740			 * how many clusters to grab.  We align
2741			 * want_clusters to the edge of contig_clusters
2742			 * to get better I/O.
2743			 */
2744			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2745							       want_clusters);
2746
2747			if (leaf_clusters < want_clusters)
2748				*cow_len += leaf_clusters;
2749			else
2750				*cow_len += want_clusters;
2751		} else if ((*cow_start + contig_clusters) >=
2752			   (cpos + write_len)) {
2753			/*
2754			 * Breaking off contig_clusters at the front
2755			 * of the extent will cover our write.  That's
2756			 * easy.
2757			 */
2758			*cow_len = contig_clusters;
2759		} else if ((rec_end - cpos) <= contig_clusters) {
2760			/*
2761			 * Breaking off contig_clusters at the tail of
2762			 * this extent will cover cpos.
2763			 */
2764			*cow_start = rec_end - contig_clusters;
2765			*cow_len = contig_clusters;
2766		} else if ((rec_end - cpos) <= want_clusters) {
2767			/*
2768			 * While we can't fit the entire write in this
2769			 * extent, we know that the write goes from cpos
2770			 * to the end of the extent.  Break that off.
2771			 * We try to break it at some multiple of
2772			 * contig_clusters from the front of the extent.
2773			 * Failing that (ie, cpos is within
2774			 * contig_clusters of the front), we'll CoW the
2775			 * entire extent.
2776			 */
2777			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2778							   *cow_start, cpos);
2779			*cow_len = rec_end - *cow_start;
2780		} else {
2781			/*
2782			 * Ok, the entire write lives in the middle of
2783			 * this extent.  Let's try to slice the extent up
2784			 * nicely.  Optimally, our CoW region starts at
2785			 * m*contig_clusters from the beginning of the
2786			 * extent and goes for n*contig_clusters,
2787			 * covering the entire write.
2788			 */
2789			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2790							   *cow_start, cpos);
2791
2792			want_clusters = (cpos + write_len) - *cow_start;
2793			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2794							       want_clusters);
2795			if (*cow_start + want_clusters <= rec_end)
2796				*cow_len = want_clusters;
2797			else
2798				*cow_len = rec_end - *cow_start;
2799		}
2800
2801		/* Have we covered our entire write yet? */
2802		if ((*cow_start + *cow_len) >= (cpos + write_len))
2803			break;
2804
2805		/*
2806		 * If we reach the end of the extent block and don't get enough
2807		 * clusters, continue with the next extent block if possible.
2808		 */
2809		if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
2810		    eb && eb->h_next_leaf_blk) {
2811			brelse(eb_bh);
2812			eb_bh = NULL;
2813
2814			ret = ocfs2_read_extent_block(INODE_CACHE(inode),
2815					       le64_to_cpu(eb->h_next_leaf_blk),
2816					       &eb_bh);
2817			if (ret) {
2818				mlog_errno(ret);
2819				goto out;
2820			}
2821
2822			eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2823			el = &eb->h_list;
2824			i = -1;
2825		}
2826	}
2827
2828out:
2829	brelse(eb_bh);
2830	return ret;
2831}
2832
2833/*
2834 * Prepare meta_ac, data_ac and calculate credits when we want to add some
2835 * num_clusters in data_tree "et" and change the refcount for the old
2836 * clusters(starting form p_cluster) in the refcount tree.
2837 *
2838 * Note:
2839 * 1. since we may split the old tree, so we at most will need num_clusters + 2
2840 *    more new leaf records.
2841 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
2842 *    just give data_ac = NULL.
2843 */
2844static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2845					u32 p_cluster, u32 num_clusters,
2846					struct ocfs2_extent_tree *et,
2847					struct ocfs2_caching_info *ref_ci,
2848					struct buffer_head *ref_root_bh,
2849					struct ocfs2_alloc_context **meta_ac,
2850					struct ocfs2_alloc_context **data_ac,
2851					int *credits)
2852{
2853	int ret = 0, meta_add = 0;
2854	int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
2855
2856	if (num_free_extents < 0) {
2857		ret = num_free_extents;
2858		mlog_errno(ret);
2859		goto out;
2860	}
2861
2862	if (num_free_extents < num_clusters + 2)
2863		meta_add =
2864			ocfs2_extend_meta_needed(et->et_root_el);
2865
2866	*credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
2867
2868	ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2869					       p_cluster, num_clusters,
2870					       &meta_add, credits);
2871	if (ret) {
2872		mlog_errno(ret);
2873		goto out;
2874	}
2875
2876	trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
2877	ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2878						meta_ac);
2879	if (ret) {
2880		mlog_errno(ret);
2881		goto out;
2882	}
2883
2884	if (data_ac) {
2885		ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
2886					     data_ac);
2887		if (ret)
2888			mlog_errno(ret);
2889	}
2890
2891out:
2892	if (ret) {
2893		if (*meta_ac) {
2894			ocfs2_free_alloc_context(*meta_ac);
2895			*meta_ac = NULL;
2896		}
2897	}
2898
2899	return ret;
2900}
2901
2902static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2903{
2904	BUG_ON(buffer_dirty(bh));
2905
2906	clear_buffer_mapped(bh);
2907
2908	return 0;
2909}
2910
2911int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2912				     struct inode *inode,
2913				     u32 cpos, u32 old_cluster,
2914				     u32 new_cluster, u32 new_len)
2915{
2916	int ret = 0, partial;
2917	struct super_block *sb = inode->i_sb;
2918	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2919	struct page *page;
2920	pgoff_t page_index;
2921	unsigned int from, to;
2922	loff_t offset, end, map_end;
2923	struct address_space *mapping = inode->i_mapping;
2924
2925	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2926					       new_cluster, new_len);
2927
2928	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2929	end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
2930	/*
2931	 * We only duplicate pages until we reach the page contains i_size - 1.
2932	 * So trim 'end' to i_size.
2933	 */
2934	if (end > i_size_read(inode))
2935		end = i_size_read(inode);
2936
2937	while (offset < end) {
2938		page_index = offset >> PAGE_SHIFT;
2939		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
2940		if (map_end > end)
2941			map_end = end;
2942
2943		/* from, to is the offset within the page. */
2944		from = offset & (PAGE_SIZE - 1);
2945		to = PAGE_SIZE;
2946		if (map_end & (PAGE_SIZE - 1))
2947			to = map_end & (PAGE_SIZE - 1);
2948
 
2949		page = find_or_create_page(mapping, page_index, GFP_NOFS);
2950		if (!page) {
2951			ret = -ENOMEM;
2952			mlog_errno(ret);
2953			break;
2954		}
2955
2956		/*
2957		 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2958		 * can't be dirtied before we CoW it out.
2959		 */
2960		if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2961			BUG_ON(PageDirty(page));
 
 
 
 
 
 
 
2962
2963		if (!PageUptodate(page)) {
2964			ret = block_read_full_page(page, ocfs2_get_block);
2965			if (ret) {
2966				mlog_errno(ret);
2967				goto unlock;
2968			}
2969			lock_page(page);
2970		}
2971
2972		if (page_has_buffers(page)) {
2973			ret = walk_page_buffers(handle, page_buffers(page),
2974						from, to, &partial,
2975						ocfs2_clear_cow_buffer);
2976			if (ret) {
2977				mlog_errno(ret);
2978				goto unlock;
2979			}
2980		}
2981
2982		ocfs2_map_and_dirty_page(inode,
2983					 handle, from, to,
2984					 page, 0, &new_block);
2985		mark_page_accessed(page);
2986unlock:
2987		unlock_page(page);
2988		put_page(page);
2989		page = NULL;
2990		offset = map_end;
2991		if (ret)
2992			break;
2993	}
2994
2995	return ret;
2996}
2997
2998int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
2999				    struct inode *inode,
3000				    u32 cpos, u32 old_cluster,
3001				    u32 new_cluster, u32 new_len)
3002{
3003	int ret = 0;
3004	struct super_block *sb = inode->i_sb;
3005	struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3006	int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
3007	u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
3008	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
3009	struct ocfs2_super *osb = OCFS2_SB(sb);
3010	struct buffer_head *old_bh = NULL;
3011	struct buffer_head *new_bh = NULL;
3012
3013	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
3014					       new_cluster, new_len);
3015
3016	for (i = 0; i < blocks; i++, old_block++, new_block++) {
3017		new_bh = sb_getblk(osb->sb, new_block);
3018		if (new_bh == NULL) {
3019			ret = -ENOMEM;
3020			mlog_errno(ret);
3021			break;
3022		}
3023
3024		ocfs2_set_new_buffer_uptodate(ci, new_bh);
3025
3026		ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
3027		if (ret) {
3028			mlog_errno(ret);
3029			break;
3030		}
3031
3032		ret = ocfs2_journal_access(handle, ci, new_bh,
3033					   OCFS2_JOURNAL_ACCESS_CREATE);
3034		if (ret) {
3035			mlog_errno(ret);
3036			break;
3037		}
3038
3039		memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
3040		ocfs2_journal_dirty(handle, new_bh);
3041
3042		brelse(new_bh);
3043		brelse(old_bh);
3044		new_bh = NULL;
3045		old_bh = NULL;
3046	}
3047
3048	brelse(new_bh);
3049	brelse(old_bh);
3050	return ret;
3051}
3052
3053static int ocfs2_clear_ext_refcount(handle_t *handle,
3054				    struct ocfs2_extent_tree *et,
3055				    u32 cpos, u32 p_cluster, u32 len,
3056				    unsigned int ext_flags,
3057				    struct ocfs2_alloc_context *meta_ac,
3058				    struct ocfs2_cached_dealloc_ctxt *dealloc)
3059{
3060	int ret, index;
3061	struct ocfs2_extent_rec replace_rec;
3062	struct ocfs2_path *path = NULL;
3063	struct ocfs2_extent_list *el;
3064	struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
3065	u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
3066
3067	trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
3068				       cpos, len, p_cluster, ext_flags);
3069
3070	memset(&replace_rec, 0, sizeof(replace_rec));
3071	replace_rec.e_cpos = cpu_to_le32(cpos);
3072	replace_rec.e_leaf_clusters = cpu_to_le16(len);
3073	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
3074								   p_cluster));
3075	replace_rec.e_flags = ext_flags;
3076	replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
3077
3078	path = ocfs2_new_path_from_et(et);
3079	if (!path) {
3080		ret = -ENOMEM;
3081		mlog_errno(ret);
3082		goto out;
3083	}
3084
3085	ret = ocfs2_find_path(et->et_ci, path, cpos);
3086	if (ret) {
3087		mlog_errno(ret);
3088		goto out;
3089	}
3090
3091	el = path_leaf_el(path);
3092
3093	index = ocfs2_search_extent_list(el, cpos);
3094	if (index == -1) {
3095		ret = ocfs2_error(sb,
3096				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
3097				  (unsigned long long)ino, cpos);
3098		goto out;
3099	}
3100
3101	ret = ocfs2_split_extent(handle, et, path, index,
3102				 &replace_rec, meta_ac, dealloc);
3103	if (ret)
3104		mlog_errno(ret);
3105
3106out:
3107	ocfs2_free_path(path);
3108	return ret;
3109}
3110
3111static int ocfs2_replace_clusters(handle_t *handle,
3112				  struct ocfs2_cow_context *context,
3113				  u32 cpos, u32 old,
3114				  u32 new, u32 len,
3115				  unsigned int ext_flags)
3116{
3117	int ret;
3118	struct ocfs2_caching_info *ci = context->data_et.et_ci;
3119	u64 ino = ocfs2_metadata_cache_owner(ci);
3120
3121	trace_ocfs2_replace_clusters((unsigned long long)ino,
3122				     cpos, old, new, len, ext_flags);
3123
3124	/*If the old clusters is unwritten, no need to duplicate. */
3125	if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3126		ret = context->cow_duplicate_clusters(handle, context->inode,
3127						      cpos, old, new, len);
3128		if (ret) {
3129			mlog_errno(ret);
3130			goto out;
3131		}
3132	}
3133
3134	ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
3135				       cpos, new, len, ext_flags,
3136				       context->meta_ac, &context->dealloc);
3137	if (ret)
3138		mlog_errno(ret);
3139out:
3140	return ret;
3141}
3142
3143int ocfs2_cow_sync_writeback(struct super_block *sb,
3144			     struct inode *inode,
3145			     u32 cpos, u32 num_clusters)
3146{
3147	int ret = 0;
3148	loff_t offset, end, map_end;
3149	pgoff_t page_index;
3150	struct page *page;
3151
3152	if (ocfs2_should_order_data(inode))
3153		return 0;
3154
3155	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
3156	end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
3157
3158	ret = filemap_fdatawrite_range(inode->i_mapping,
3159				       offset, end - 1);
3160	if (ret < 0) {
3161		mlog_errno(ret);
3162		return ret;
3163	}
3164
3165	while (offset < end) {
3166		page_index = offset >> PAGE_SHIFT;
3167		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
3168		if (map_end > end)
3169			map_end = end;
3170
3171		page = find_or_create_page(inode->i_mapping,
3172					   page_index, GFP_NOFS);
3173		BUG_ON(!page);
3174
3175		wait_on_page_writeback(page);
3176		if (PageError(page)) {
3177			ret = -EIO;
3178			mlog_errno(ret);
3179		} else
3180			mark_page_accessed(page);
3181
3182		unlock_page(page);
3183		put_page(page);
3184		page = NULL;
3185		offset = map_end;
3186		if (ret)
3187			break;
3188	}
3189
3190	return ret;
3191}
3192
3193static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
3194				 u32 v_cluster, u32 *p_cluster,
3195				 u32 *num_clusters,
3196				 unsigned int *extent_flags)
3197{
3198	return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
3199				  num_clusters, extent_flags);
3200}
3201
3202static int ocfs2_make_clusters_writable(struct super_block *sb,
3203					struct ocfs2_cow_context *context,
3204					u32 cpos, u32 p_cluster,
3205					u32 num_clusters, unsigned int e_flags)
3206{
3207	int ret, delete, index, credits =  0;
3208	u32 new_bit, new_len, orig_num_clusters;
3209	unsigned int set_len;
3210	struct ocfs2_super *osb = OCFS2_SB(sb);
3211	handle_t *handle;
3212	struct buffer_head *ref_leaf_bh = NULL;
3213	struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
3214	struct ocfs2_refcount_rec rec;
3215
3216	trace_ocfs2_make_clusters_writable(cpos, p_cluster,
3217					   num_clusters, e_flags);
3218
3219	ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
3220					     &context->data_et,
3221					     ref_ci,
3222					     context->ref_root_bh,
3223					     &context->meta_ac,
3224					     &context->data_ac, &credits);
3225	if (ret) {
3226		mlog_errno(ret);
3227		return ret;
3228	}
3229
3230	if (context->post_refcount)
3231		credits += context->post_refcount->credits;
3232
3233	credits += context->extra_credits;
3234	handle = ocfs2_start_trans(osb, credits);
3235	if (IS_ERR(handle)) {
3236		ret = PTR_ERR(handle);
3237		mlog_errno(ret);
3238		goto out;
3239	}
3240
3241	orig_num_clusters = num_clusters;
3242
3243	while (num_clusters) {
3244		ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3245					     p_cluster, num_clusters,
3246					     &rec, &index, &ref_leaf_bh);
3247		if (ret) {
3248			mlog_errno(ret);
3249			goto out_commit;
3250		}
3251
3252		BUG_ON(!rec.r_refcount);
3253		set_len = min((u64)p_cluster + num_clusters,
3254			      le64_to_cpu(rec.r_cpos) +
3255			      le32_to_cpu(rec.r_clusters)) - p_cluster;
3256
3257		/*
3258		 * There are many different situation here.
3259		 * 1. If refcount == 1, remove the flag and don't COW.
3260		 * 2. If refcount > 1, allocate clusters.
3261		 *    Here we may not allocate r_len once at a time, so continue
3262		 *    until we reach num_clusters.
3263		 */
3264		if (le32_to_cpu(rec.r_refcount) == 1) {
3265			delete = 0;
3266			ret = ocfs2_clear_ext_refcount(handle,
3267						       &context->data_et,
3268						       cpos, p_cluster,
3269						       set_len, e_flags,
3270						       context->meta_ac,
3271						       &context->dealloc);
3272			if (ret) {
3273				mlog_errno(ret);
3274				goto out_commit;
3275			}
3276		} else {
3277			delete = 1;
3278
3279			ret = __ocfs2_claim_clusters(handle,
3280						     context->data_ac,
3281						     1, set_len,
3282						     &new_bit, &new_len);
3283			if (ret) {
3284				mlog_errno(ret);
3285				goto out_commit;
3286			}
3287
3288			ret = ocfs2_replace_clusters(handle, context,
3289						     cpos, p_cluster, new_bit,
3290						     new_len, e_flags);
3291			if (ret) {
3292				mlog_errno(ret);
3293				goto out_commit;
3294			}
3295			set_len = new_len;
3296		}
3297
3298		ret = __ocfs2_decrease_refcount(handle, ref_ci,
3299						context->ref_root_bh,
3300						p_cluster, set_len,
3301						context->meta_ac,
3302						&context->dealloc, delete);
3303		if (ret) {
3304			mlog_errno(ret);
3305			goto out_commit;
3306		}
3307
3308		cpos += set_len;
3309		p_cluster += set_len;
3310		num_clusters -= set_len;
3311		brelse(ref_leaf_bh);
3312		ref_leaf_bh = NULL;
3313	}
3314
3315	/* handle any post_cow action. */
3316	if (context->post_refcount && context->post_refcount->func) {
3317		ret = context->post_refcount->func(context->inode, handle,
3318						context->post_refcount->para);
3319		if (ret) {
3320			mlog_errno(ret);
3321			goto out_commit;
3322		}
3323	}
3324
3325	/*
3326	 * Here we should write the new page out first if we are
3327	 * in write-back mode.
3328	 */
3329	if (context->get_clusters == ocfs2_di_get_clusters) {
3330		ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
3331					       orig_num_clusters);
3332		if (ret)
3333			mlog_errno(ret);
3334	}
3335
3336out_commit:
3337	ocfs2_commit_trans(osb, handle);
3338
3339out:
3340	if (context->data_ac) {
3341		ocfs2_free_alloc_context(context->data_ac);
3342		context->data_ac = NULL;
3343	}
3344	if (context->meta_ac) {
3345		ocfs2_free_alloc_context(context->meta_ac);
3346		context->meta_ac = NULL;
3347	}
3348	brelse(ref_leaf_bh);
3349
3350	return ret;
3351}
3352
3353static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3354{
3355	int ret = 0;
3356	struct inode *inode = context->inode;
3357	u32 cow_start = context->cow_start, cow_len = context->cow_len;
3358	u32 p_cluster, num_clusters;
3359	unsigned int ext_flags;
3360	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3361
3362	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
3363		return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
3364				   inode->i_ino);
3365	}
3366
3367	ocfs2_init_dealloc_ctxt(&context->dealloc);
3368
3369	while (cow_len) {
3370		ret = context->get_clusters(context, cow_start, &p_cluster,
3371					    &num_clusters, &ext_flags);
3372		if (ret) {
3373			mlog_errno(ret);
3374			break;
3375		}
3376
3377		BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
3378
3379		if (cow_len < num_clusters)
3380			num_clusters = cow_len;
3381
3382		ret = ocfs2_make_clusters_writable(inode->i_sb, context,
3383						   cow_start, p_cluster,
3384						   num_clusters, ext_flags);
3385		if (ret) {
3386			mlog_errno(ret);
3387			break;
3388		}
3389
3390		cow_len -= num_clusters;
3391		cow_start += num_clusters;
3392	}
3393
3394	if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
3395		ocfs2_schedule_truncate_log_flush(osb, 1);
3396		ocfs2_run_deallocs(osb, &context->dealloc);
3397	}
3398
3399	return ret;
3400}
3401
3402/*
3403 * Starting at cpos, try to CoW write_len clusters.  Don't CoW
3404 * past max_cpos.  This will stop when it runs into a hole or an
3405 * unrefcounted extent.
3406 */
3407static int ocfs2_refcount_cow_hunk(struct inode *inode,
3408				   struct buffer_head *di_bh,
3409				   u32 cpos, u32 write_len, u32 max_cpos)
3410{
3411	int ret;
3412	u32 cow_start = 0, cow_len = 0;
3413	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3414	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3415	struct buffer_head *ref_root_bh = NULL;
3416	struct ocfs2_refcount_tree *ref_tree;
3417	struct ocfs2_cow_context *context = NULL;
3418
3419	BUG_ON(!ocfs2_is_refcount_inode(inode));
3420
3421	ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
3422					      cpos, write_len, max_cpos,
3423					      &cow_start, &cow_len);
3424	if (ret) {
3425		mlog_errno(ret);
3426		goto out;
3427	}
3428
3429	trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
3430				      cpos, write_len, max_cpos,
3431				      cow_start, cow_len);
3432
3433	BUG_ON(cow_len == 0);
3434
3435	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3436	if (!context) {
3437		ret = -ENOMEM;
3438		mlog_errno(ret);
3439		goto out;
3440	}
3441
3442	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3443				       1, &ref_tree, &ref_root_bh);
3444	if (ret) {
3445		mlog_errno(ret);
3446		goto out;
3447	}
3448
3449	context->inode = inode;
3450	context->cow_start = cow_start;
3451	context->cow_len = cow_len;
3452	context->ref_tree = ref_tree;
3453	context->ref_root_bh = ref_root_bh;
3454	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3455	context->get_clusters = ocfs2_di_get_clusters;
3456
3457	ocfs2_init_dinode_extent_tree(&context->data_et,
3458				      INODE_CACHE(inode), di_bh);
3459
3460	ret = ocfs2_replace_cow(context);
3461	if (ret)
3462		mlog_errno(ret);
3463
3464	/*
3465	 * truncate the extent map here since no matter whether we meet with
3466	 * any error during the action, we shouldn't trust cached extent map
3467	 * any more.
3468	 */
3469	ocfs2_extent_map_trunc(inode, cow_start);
3470
3471	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3472	brelse(ref_root_bh);
3473out:
3474	kfree(context);
3475	return ret;
3476}
3477
3478/*
3479 * CoW any and all clusters between cpos and cpos+write_len.
3480 * Don't CoW past max_cpos.  If this returns successfully, all
3481 * clusters between cpos and cpos+write_len are safe to modify.
3482 */
3483int ocfs2_refcount_cow(struct inode *inode,
3484		       struct buffer_head *di_bh,
3485		       u32 cpos, u32 write_len, u32 max_cpos)
3486{
3487	int ret = 0;
3488	u32 p_cluster, num_clusters;
3489	unsigned int ext_flags;
3490
3491	while (write_len) {
3492		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3493					 &num_clusters, &ext_flags);
3494		if (ret) {
3495			mlog_errno(ret);
3496			break;
3497		}
3498
3499		if (write_len < num_clusters)
3500			num_clusters = write_len;
3501
3502		if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3503			ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3504						      num_clusters, max_cpos);
3505			if (ret) {
3506				mlog_errno(ret);
3507				break;
3508			}
3509		}
3510
3511		write_len -= num_clusters;
3512		cpos += num_clusters;
3513	}
3514
3515	return ret;
3516}
3517
3518static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
3519					  u32 v_cluster, u32 *p_cluster,
3520					  u32 *num_clusters,
3521					  unsigned int *extent_flags)
3522{
3523	struct inode *inode = context->inode;
3524	struct ocfs2_xattr_value_root *xv = context->cow_object;
3525
3526	return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
3527					num_clusters, &xv->xr_list,
3528					extent_flags);
3529}
3530
3531/*
3532 * Given a xattr value root, calculate the most meta/credits we need for
3533 * refcount tree change if we truncate it to 0.
3534 */
3535int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
3536				       struct ocfs2_caching_info *ref_ci,
3537				       struct buffer_head *ref_root_bh,
3538				       struct ocfs2_xattr_value_root *xv,
3539				       int *meta_add, int *credits)
3540{
3541	int ret = 0, index, ref_blocks = 0;
3542	u32 p_cluster, num_clusters;
3543	u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
3544	struct ocfs2_refcount_block *rb;
3545	struct ocfs2_refcount_rec rec;
3546	struct buffer_head *ref_leaf_bh = NULL;
3547
3548	while (cpos < clusters) {
3549		ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
3550					       &num_clusters, &xv->xr_list,
3551					       NULL);
3552		if (ret) {
3553			mlog_errno(ret);
3554			goto out;
3555		}
3556
3557		cpos += num_clusters;
3558
3559		while (num_clusters) {
3560			ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
3561						     p_cluster, num_clusters,
3562						     &rec, &index,
3563						     &ref_leaf_bh);
3564			if (ret) {
3565				mlog_errno(ret);
3566				goto out;
3567			}
3568
3569			BUG_ON(!rec.r_refcount);
3570
3571			rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
3572
3573			/*
3574			 * We really don't know whether the other clusters is in
3575			 * this refcount block or not, so just take the worst
3576			 * case that all the clusters are in this block and each
3577			 * one will split a refcount rec, so totally we need
3578			 * clusters * 2 new refcount rec.
3579			 */
3580			if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
3581			    le16_to_cpu(rb->rf_records.rl_count))
3582				ref_blocks++;
3583
3584			*credits += 1;
3585			brelse(ref_leaf_bh);
3586			ref_leaf_bh = NULL;
3587
3588			if (num_clusters <= le32_to_cpu(rec.r_clusters))
3589				break;
3590			else
3591				num_clusters -= le32_to_cpu(rec.r_clusters);
3592			p_cluster += num_clusters;
3593		}
3594	}
3595
3596	*meta_add += ref_blocks;
3597	if (!ref_blocks)
3598		goto out;
3599
3600	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
3601	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
3602		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
3603	else {
3604		struct ocfs2_extent_tree et;
3605
3606		ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
3607		*credits += ocfs2_calc_extend_credits(inode->i_sb,
3608						      et.et_root_el);
3609	}
3610
3611out:
3612	brelse(ref_leaf_bh);
3613	return ret;
3614}
3615
3616/*
3617 * Do CoW for xattr.
3618 */
3619int ocfs2_refcount_cow_xattr(struct inode *inode,
3620			     struct ocfs2_dinode *di,
3621			     struct ocfs2_xattr_value_buf *vb,
3622			     struct ocfs2_refcount_tree *ref_tree,
3623			     struct buffer_head *ref_root_bh,
3624			     u32 cpos, u32 write_len,
3625			     struct ocfs2_post_refcount *post)
3626{
3627	int ret;
3628	struct ocfs2_xattr_value_root *xv = vb->vb_xv;
3629	struct ocfs2_cow_context *context = NULL;
3630	u32 cow_start, cow_len;
3631
3632	BUG_ON(!ocfs2_is_refcount_inode(inode));
3633
3634	ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
3635					      cpos, write_len, UINT_MAX,
3636					      &cow_start, &cow_len);
3637	if (ret) {
3638		mlog_errno(ret);
3639		goto out;
3640	}
3641
3642	BUG_ON(cow_len == 0);
3643
3644	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3645	if (!context) {
3646		ret = -ENOMEM;
3647		mlog_errno(ret);
3648		goto out;
3649	}
3650
3651	context->inode = inode;
3652	context->cow_start = cow_start;
3653	context->cow_len = cow_len;
3654	context->ref_tree = ref_tree;
3655	context->ref_root_bh = ref_root_bh;
3656	context->cow_object = xv;
3657
3658	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
3659	/* We need the extra credits for duplicate_clusters by jbd. */
3660	context->extra_credits =
3661		ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
3662	context->get_clusters = ocfs2_xattr_value_get_clusters;
3663	context->post_refcount = post;
3664
3665	ocfs2_init_xattr_value_extent_tree(&context->data_et,
3666					   INODE_CACHE(inode), vb);
3667
3668	ret = ocfs2_replace_cow(context);
3669	if (ret)
3670		mlog_errno(ret);
3671
3672out:
3673	kfree(context);
3674	return ret;
3675}
3676
3677/*
3678 * Insert a new extent into refcount tree and mark a extent rec
3679 * as refcounted in the dinode tree.
3680 */
3681int ocfs2_add_refcount_flag(struct inode *inode,
3682			    struct ocfs2_extent_tree *data_et,
3683			    struct ocfs2_caching_info *ref_ci,
3684			    struct buffer_head *ref_root_bh,
3685			    u32 cpos, u32 p_cluster, u32 num_clusters,
3686			    struct ocfs2_cached_dealloc_ctxt *dealloc,
3687			    struct ocfs2_post_refcount *post)
3688{
3689	int ret;
3690	handle_t *handle;
3691	int credits = 1, ref_blocks = 0;
3692	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3693	struct ocfs2_alloc_context *meta_ac = NULL;
3694
3695	/* We need to be able to handle at least an extent tree split. */
3696	ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el);
3697
3698	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
3699					       ref_ci, ref_root_bh,
3700					       p_cluster, num_clusters,
3701					       &ref_blocks, &credits);
3702	if (ret) {
3703		mlog_errno(ret);
3704		goto out;
3705	}
3706
3707	trace_ocfs2_add_refcount_flag(ref_blocks, credits);
3708
3709	if (ref_blocks) {
3710		ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
3711							ref_blocks, &meta_ac);
3712		if (ret) {
3713			mlog_errno(ret);
3714			goto out;
3715		}
3716	}
3717
3718	if (post)
3719		credits += post->credits;
3720
3721	handle = ocfs2_start_trans(osb, credits);
3722	if (IS_ERR(handle)) {
3723		ret = PTR_ERR(handle);
3724		mlog_errno(ret);
3725		goto out;
3726	}
3727
3728	ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
3729					   cpos, num_clusters, p_cluster,
3730					   meta_ac, dealloc);
3731	if (ret) {
3732		mlog_errno(ret);
3733		goto out_commit;
3734	}
3735
3736	ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3737					p_cluster, num_clusters, 0,
3738					meta_ac, dealloc);
3739	if (ret) {
3740		mlog_errno(ret);
3741		goto out_commit;
3742	}
3743
3744	if (post && post->func) {
3745		ret = post->func(inode, handle, post->para);
3746		if (ret)
3747			mlog_errno(ret);
3748	}
3749
3750out_commit:
3751	ocfs2_commit_trans(osb, handle);
3752out:
3753	if (meta_ac)
3754		ocfs2_free_alloc_context(meta_ac);
3755	return ret;
3756}
3757
3758static int ocfs2_change_ctime(struct inode *inode,
3759			      struct buffer_head *di_bh)
3760{
3761	int ret;
3762	handle_t *handle;
3763	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3764
3765	handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
3766				   OCFS2_INODE_UPDATE_CREDITS);
3767	if (IS_ERR(handle)) {
3768		ret = PTR_ERR(handle);
3769		mlog_errno(ret);
3770		goto out;
3771	}
3772
3773	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
3774				      OCFS2_JOURNAL_ACCESS_WRITE);
3775	if (ret) {
3776		mlog_errno(ret);
3777		goto out_commit;
3778	}
3779
3780	inode->i_ctime = current_time(inode);
3781	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3782	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3783
3784	ocfs2_journal_dirty(handle, di_bh);
3785
3786out_commit:
3787	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
3788out:
3789	return ret;
3790}
3791
3792static int ocfs2_attach_refcount_tree(struct inode *inode,
3793				      struct buffer_head *di_bh)
3794{
3795	int ret, data_changed = 0;
3796	struct buffer_head *ref_root_bh = NULL;
3797	struct ocfs2_inode_info *oi = OCFS2_I(inode);
3798	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3799	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3800	struct ocfs2_refcount_tree *ref_tree;
3801	unsigned int ext_flags;
3802	loff_t size;
3803	u32 cpos, num_clusters, clusters, p_cluster;
3804	struct ocfs2_cached_dealloc_ctxt dealloc;
3805	struct ocfs2_extent_tree di_et;
3806
3807	ocfs2_init_dealloc_ctxt(&dealloc);
3808
3809	if (!ocfs2_is_refcount_inode(inode)) {
3810		ret = ocfs2_create_refcount_tree(inode, di_bh);
3811		if (ret) {
3812			mlog_errno(ret);
3813			goto out;
3814		}
3815	}
3816
3817	BUG_ON(!di->i_refcount_loc);
3818	ret = ocfs2_lock_refcount_tree(osb,
3819				       le64_to_cpu(di->i_refcount_loc), 1,
3820				       &ref_tree, &ref_root_bh);
3821	if (ret) {
3822		mlog_errno(ret);
3823		goto out;
3824	}
3825
3826	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3827		goto attach_xattr;
3828
3829	ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3830
3831	size = i_size_read(inode);
3832	clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
3833
3834	cpos = 0;
3835	while (cpos < clusters) {
3836		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3837					 &num_clusters, &ext_flags);
3838		if (ret) {
3839			mlog_errno(ret);
3840			goto unlock;
3841		}
3842		if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
3843			ret = ocfs2_add_refcount_flag(inode, &di_et,
3844						      &ref_tree->rf_ci,
3845						      ref_root_bh, cpos,
3846						      p_cluster, num_clusters,
3847						      &dealloc, NULL);
3848			if (ret) {
3849				mlog_errno(ret);
3850				goto unlock;
3851			}
3852
3853			data_changed = 1;
3854		}
3855		cpos += num_clusters;
3856	}
3857
3858attach_xattr:
3859	if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3860		ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3861						       &ref_tree->rf_ci,
3862						       ref_root_bh,
3863						       &dealloc);
3864		if (ret) {
3865			mlog_errno(ret);
3866			goto unlock;
3867		}
3868	}
3869
3870	if (data_changed) {
3871		ret = ocfs2_change_ctime(inode, di_bh);
3872		if (ret)
3873			mlog_errno(ret);
3874	}
3875
3876unlock:
3877	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3878	brelse(ref_root_bh);
3879
3880	if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
3881		ocfs2_schedule_truncate_log_flush(osb, 1);
3882		ocfs2_run_deallocs(osb, &dealloc);
3883	}
3884out:
3885	/*
3886	 * Empty the extent map so that we may get the right extent
3887	 * record from the disk.
3888	 */
3889	ocfs2_extent_map_trunc(inode, 0);
3890
3891	return ret;
3892}
3893
3894static int ocfs2_add_refcounted_extent(struct inode *inode,
3895				   struct ocfs2_extent_tree *et,
3896				   struct ocfs2_caching_info *ref_ci,
3897				   struct buffer_head *ref_root_bh,
3898				   u32 cpos, u32 p_cluster, u32 num_clusters,
3899				   unsigned int ext_flags,
3900				   struct ocfs2_cached_dealloc_ctxt *dealloc)
3901{
3902	int ret;
3903	handle_t *handle;
3904	int credits = 0;
3905	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3906	struct ocfs2_alloc_context *meta_ac = NULL;
3907
3908	ret = ocfs2_lock_refcount_allocators(inode->i_sb,
3909					     p_cluster, num_clusters,
3910					     et, ref_ci,
3911					     ref_root_bh, &meta_ac,
3912					     NULL, &credits);
3913	if (ret) {
3914		mlog_errno(ret);
3915		goto out;
3916	}
3917
3918	handle = ocfs2_start_trans(osb, credits);
3919	if (IS_ERR(handle)) {
3920		ret = PTR_ERR(handle);
3921		mlog_errno(ret);
3922		goto out;
3923	}
3924
3925	ret = ocfs2_insert_extent(handle, et, cpos,
3926			ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
3927			num_clusters, ext_flags, meta_ac);
3928	if (ret) {
3929		mlog_errno(ret);
3930		goto out_commit;
3931	}
3932
3933	ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3934				      p_cluster, num_clusters,
3935				      meta_ac, dealloc);
3936	if (ret) {
3937		mlog_errno(ret);
3938		goto out_commit;
3939	}
3940
3941	ret = dquot_alloc_space_nodirty(inode,
3942		ocfs2_clusters_to_bytes(osb->sb, num_clusters));
3943	if (ret)
3944		mlog_errno(ret);
3945
3946out_commit:
3947	ocfs2_commit_trans(osb, handle);
3948out:
3949	if (meta_ac)
3950		ocfs2_free_alloc_context(meta_ac);
3951	return ret;
3952}
3953
3954static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3955				       struct buffer_head *s_bh,
3956				       struct inode *t_inode,
3957				       struct buffer_head *t_bh)
3958{
3959	int ret;
3960	handle_t *handle;
3961	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3962	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3963	struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3964
3965	BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3966
3967	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3968	if (IS_ERR(handle)) {
3969		ret = PTR_ERR(handle);
3970		mlog_errno(ret);
3971		goto out;
3972	}
3973
3974	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3975				      OCFS2_JOURNAL_ACCESS_WRITE);
3976	if (ret) {
3977		mlog_errno(ret);
3978		goto out_commit;
3979	}
3980
3981	t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3982	memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3983	       le16_to_cpu(s_di->id2.i_data.id_count));
3984	spin_lock(&OCFS2_I(t_inode)->ip_lock);
3985	OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3986	t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3987	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3988
3989	ocfs2_journal_dirty(handle, t_bh);
3990
3991out_commit:
3992	ocfs2_commit_trans(osb, handle);
3993out:
3994	return ret;
3995}
3996
3997static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3998				struct inode *t_inode,
3999				struct buffer_head *t_bh,
4000				struct ocfs2_caching_info *ref_ci,
4001				struct buffer_head *ref_root_bh,
4002				struct ocfs2_cached_dealloc_ctxt *dealloc)
4003{
4004	int ret = 0;
4005	u32 p_cluster, num_clusters, clusters, cpos;
4006	loff_t size;
4007	unsigned int ext_flags;
4008	struct ocfs2_extent_tree et;
4009
4010	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
4011
4012	size = i_size_read(s_inode);
4013	clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
4014
4015	cpos = 0;
4016	while (cpos < clusters) {
4017		ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
4018					 &num_clusters, &ext_flags);
4019		if (ret) {
4020			mlog_errno(ret);
4021			goto out;
4022		}
4023		if (p_cluster) {
4024			ret = ocfs2_add_refcounted_extent(t_inode, &et,
4025							  ref_ci, ref_root_bh,
4026							  cpos, p_cluster,
4027							  num_clusters,
4028							  ext_flags,
4029							  dealloc);
4030			if (ret) {
4031				mlog_errno(ret);
4032				goto out;
4033			}
4034		}
4035
4036		cpos += num_clusters;
4037	}
4038
4039out:
4040	return ret;
4041}
4042
4043/*
4044 * change the new file's attributes to the src.
4045 *
4046 * reflink creates a snapshot of a file, that means the attributes
4047 * must be identical except for three exceptions - nlink, ino, and ctime.
4048 */
4049static int ocfs2_complete_reflink(struct inode *s_inode,
4050				  struct buffer_head *s_bh,
4051				  struct inode *t_inode,
4052				  struct buffer_head *t_bh,
4053				  bool preserve)
4054{
4055	int ret;
4056	handle_t *handle;
4057	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
4058	struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
4059	loff_t size = i_size_read(s_inode);
4060
4061	handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
4062				   OCFS2_INODE_UPDATE_CREDITS);
4063	if (IS_ERR(handle)) {
4064		ret = PTR_ERR(handle);
4065		mlog_errno(ret);
4066		return ret;
4067	}
4068
4069	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
4070				      OCFS2_JOURNAL_ACCESS_WRITE);
4071	if (ret) {
4072		mlog_errno(ret);
4073		goto out_commit;
4074	}
4075
4076	spin_lock(&OCFS2_I(t_inode)->ip_lock);
4077	OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
4078	OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
4079	OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
4080	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
4081	i_size_write(t_inode, size);
4082	t_inode->i_blocks = s_inode->i_blocks;
4083
4084	di->i_xattr_inline_size = s_di->i_xattr_inline_size;
4085	di->i_clusters = s_di->i_clusters;
4086	di->i_size = s_di->i_size;
4087	di->i_dyn_features = s_di->i_dyn_features;
4088	di->i_attr = s_di->i_attr;
4089
4090	if (preserve) {
4091		t_inode->i_uid = s_inode->i_uid;
4092		t_inode->i_gid = s_inode->i_gid;
4093		t_inode->i_mode = s_inode->i_mode;
4094		di->i_uid = s_di->i_uid;
4095		di->i_gid = s_di->i_gid;
4096		di->i_mode = s_di->i_mode;
4097
4098		/*
4099		 * update time.
4100		 * we want mtime to appear identical to the source and
4101		 * update ctime.
4102		 */
4103		t_inode->i_ctime = current_time(t_inode);
4104
4105		di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
4106		di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
4107
4108		t_inode->i_mtime = s_inode->i_mtime;
4109		di->i_mtime = s_di->i_mtime;
4110		di->i_mtime_nsec = s_di->i_mtime_nsec;
4111	}
4112
4113	ocfs2_journal_dirty(handle, t_bh);
4114
4115out_commit:
4116	ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
4117	return ret;
4118}
4119
4120static int ocfs2_create_reflink_node(struct inode *s_inode,
4121				     struct buffer_head *s_bh,
4122				     struct inode *t_inode,
4123				     struct buffer_head *t_bh,
4124				     bool preserve)
4125{
4126	int ret;
4127	struct buffer_head *ref_root_bh = NULL;
4128	struct ocfs2_cached_dealloc_ctxt dealloc;
4129	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
4130	struct ocfs2_refcount_block *rb;
4131	struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
4132	struct ocfs2_refcount_tree *ref_tree;
4133
4134	ocfs2_init_dealloc_ctxt(&dealloc);
4135
4136	ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4137				      le64_to_cpu(di->i_refcount_loc));
4138	if (ret) {
4139		mlog_errno(ret);
4140		goto out;
4141	}
4142
4143	if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4144		ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4145						  t_inode, t_bh);
4146		if (ret)
4147			mlog_errno(ret);
4148		goto out;
4149	}
4150
4151	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4152				       1, &ref_tree, &ref_root_bh);
4153	if (ret) {
4154		mlog_errno(ret);
4155		goto out;
4156	}
4157	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
4158
4159	ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
4160					  &ref_tree->rf_ci, ref_root_bh,
4161					  &dealloc);
4162	if (ret) {
4163		mlog_errno(ret);
4164		goto out_unlock_refcount;
4165	}
4166
4167out_unlock_refcount:
4168	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4169	brelse(ref_root_bh);
4170out:
4171	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4172		ocfs2_schedule_truncate_log_flush(osb, 1);
4173		ocfs2_run_deallocs(osb, &dealloc);
4174	}
4175
4176	return ret;
4177}
4178
4179static int __ocfs2_reflink(struct dentry *old_dentry,
4180			   struct buffer_head *old_bh,
4181			   struct inode *new_inode,
4182			   bool preserve)
4183{
4184	int ret;
4185	struct inode *inode = d_inode(old_dentry);
4186	struct buffer_head *new_bh = NULL;
4187
4188	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
4189		ret = -EINVAL;
4190		mlog_errno(ret);
4191		goto out;
4192	}
4193
4194	ret = filemap_fdatawrite(inode->i_mapping);
4195	if (ret) {
4196		mlog_errno(ret);
4197		goto out;
4198	}
4199
4200	ret = ocfs2_attach_refcount_tree(inode, old_bh);
4201	if (ret) {
4202		mlog_errno(ret);
4203		goto out;
4204	}
4205
4206	inode_lock_nested(new_inode, I_MUTEX_CHILD);
4207	ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4208				      OI_LS_REFLINK_TARGET);
4209	if (ret) {
4210		mlog_errno(ret);
4211		goto out_unlock;
4212	}
4213
4214	ret = ocfs2_create_reflink_node(inode, old_bh,
4215					new_inode, new_bh, preserve);
4216	if (ret) {
4217		mlog_errno(ret);
4218		goto inode_unlock;
4219	}
4220
4221	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
4222		ret = ocfs2_reflink_xattrs(inode, old_bh,
4223					   new_inode, new_bh,
4224					   preserve);
4225		if (ret) {
4226			mlog_errno(ret);
4227			goto inode_unlock;
4228		}
4229	}
4230
4231	ret = ocfs2_complete_reflink(inode, old_bh,
4232				     new_inode, new_bh, preserve);
4233	if (ret)
4234		mlog_errno(ret);
4235
4236inode_unlock:
4237	ocfs2_inode_unlock(new_inode, 1);
4238	brelse(new_bh);
4239out_unlock:
4240	inode_unlock(new_inode);
4241out:
4242	if (!ret) {
4243		ret = filemap_fdatawait(inode->i_mapping);
4244		if (ret)
4245			mlog_errno(ret);
4246	}
4247	return ret;
4248}
4249
4250static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4251			 struct dentry *new_dentry, bool preserve)
4252{
4253	int error;
4254	struct inode *inode = d_inode(old_dentry);
4255	struct buffer_head *old_bh = NULL;
4256	struct inode *new_orphan_inode = NULL;
 
4257
4258	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4259		return -EOPNOTSUPP;
4260
4261
4262	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
4263					     &new_orphan_inode);
4264	if (error) {
4265		mlog_errno(error);
4266		goto out;
4267	}
4268
4269	error = ocfs2_rw_lock(inode, 1);
4270	if (error) {
4271		mlog_errno(error);
4272		goto out;
4273	}
4274
4275	error = ocfs2_inode_lock(inode, &old_bh, 1);
4276	if (error) {
4277		mlog_errno(error);
4278		ocfs2_rw_unlock(inode, 1);
4279		goto out;
4280	}
4281
4282	down_write(&OCFS2_I(inode)->ip_xattr_sem);
4283	down_write(&OCFS2_I(inode)->ip_alloc_sem);
4284	error = __ocfs2_reflink(old_dentry, old_bh,
4285				new_orphan_inode, preserve);
4286	up_write(&OCFS2_I(inode)->ip_alloc_sem);
4287	up_write(&OCFS2_I(inode)->ip_xattr_sem);
4288
4289	ocfs2_inode_unlock(inode, 1);
4290	ocfs2_rw_unlock(inode, 1);
4291	brelse(old_bh);
4292
4293	if (error) {
4294		mlog_errno(error);
4295		goto out;
4296	}
4297
 
 
 
 
 
 
 
 
4298	/* If the security isn't preserved, we need to re-initialize them. */
4299	if (!preserve) {
4300		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
4301						    &new_dentry->d_name);
4302		if (error)
4303			mlog_errno(error);
4304	}
4305out:
4306	if (!error) {
4307		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
4308						       new_dentry);
4309		if (error)
4310			mlog_errno(error);
4311	}
 
4312
 
4313	if (new_orphan_inode) {
4314		/*
4315		 * We need to open_unlock the inode no matter whether we
4316		 * succeed or not, so that other nodes can delete it later.
4317		 */
4318		ocfs2_open_unlock(new_orphan_inode);
4319		if (error)
4320			iput(new_orphan_inode);
4321	}
4322
4323	return error;
4324}
4325
4326/*
4327 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
4328 * sys_reflink().  This will go away when vfs_reflink() exists in
4329 * fs/namei.c.
4330 */
4331
4332/* copied from may_create in VFS. */
4333static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
4334{
4335	if (d_really_is_positive(child))
4336		return -EEXIST;
4337	if (IS_DEADDIR(dir))
4338		return -ENOENT;
4339	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
4340}
4341
4342/**
4343 * ocfs2_vfs_reflink - Create a reference-counted link
4344 *
4345 * @old_dentry:        source dentry + inode
4346 * @dir:       directory to create the target
4347 * @new_dentry:        target dentry
4348 * @preserve:  if true, preserve all file attributes
4349 */
4350static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
4351			     struct dentry *new_dentry, bool preserve)
4352{
4353	struct inode *inode = d_inode(old_dentry);
4354	int error;
4355
4356	if (!inode)
4357		return -ENOENT;
4358
4359	error = ocfs2_may_create(dir, new_dentry);
4360	if (error)
4361		return error;
4362
4363	if (dir->i_sb != inode->i_sb)
4364		return -EXDEV;
4365
4366	/*
4367	 * A reflink to an append-only or immutable file cannot be created.
4368	 */
4369	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4370		return -EPERM;
4371
4372	/* Only regular files can be reflinked. */
4373	if (!S_ISREG(inode->i_mode))
4374		return -EPERM;
4375
4376	/*
4377	 * If the caller wants to preserve ownership, they require the
4378	 * rights to do so.
4379	 */
4380	if (preserve) {
4381		if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
4382			return -EPERM;
4383		if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
4384			return -EPERM;
4385	}
4386
4387	/*
4388	 * If the caller is modifying any aspect of the attributes, they
4389	 * are not creating a snapshot.  They need read permission on the
4390	 * file.
4391	 */
4392	if (!preserve) {
4393		error = inode_permission(inode, MAY_READ);
4394		if (error)
4395			return error;
4396	}
4397
4398	inode_lock(inode);
4399	error = dquot_initialize(dir);
4400	if (!error)
4401		error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
4402	inode_unlock(inode);
4403	if (!error)
4404		fsnotify_create(dir, new_dentry);
4405	return error;
4406}
4407/*
4408 * Most codes are copied from sys_linkat.
4409 */
4410int ocfs2_reflink_ioctl(struct inode *inode,
4411			const char __user *oldname,
4412			const char __user *newname,
4413			bool preserve)
4414{
4415	struct dentry *new_dentry;
4416	struct path old_path, new_path;
4417	int error;
4418
4419	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4420		return -EOPNOTSUPP;
4421
4422	error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
4423	if (error) {
4424		mlog_errno(error);
4425		return error;
4426	}
4427
4428	new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0);
4429	error = PTR_ERR(new_dentry);
4430	if (IS_ERR(new_dentry)) {
4431		mlog_errno(error);
4432		goto out;
4433	}
4434
4435	error = -EXDEV;
4436	if (old_path.mnt != new_path.mnt) {
4437		mlog_errno(error);
4438		goto out_dput;
4439	}
4440
4441	error = ocfs2_vfs_reflink(old_path.dentry,
4442				  d_inode(new_path.dentry),
4443				  new_dentry, preserve);
4444out_dput:
4445	done_path_create(&new_path, new_dentry);
4446out:
4447	path_put(&old_path);
4448
4449	return error;
4450}
4451
4452/* Update destination inode size, if necessary. */
4453static int ocfs2_reflink_update_dest(struct inode *dest,
4454				     struct buffer_head *d_bh,
4455				     loff_t newlen)
4456{
4457	handle_t *handle;
4458	int ret;
4459
4460	dest->i_blocks = ocfs2_inode_sector_count(dest);
4461
4462	if (newlen <= i_size_read(dest))
4463		return 0;
4464
4465	handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb),
4466				   OCFS2_INODE_UPDATE_CREDITS);
4467	if (IS_ERR(handle)) {
4468		ret = PTR_ERR(handle);
4469		mlog_errno(ret);
4470		return ret;
4471	}
4472
4473	/* Extend i_size if needed. */
4474	spin_lock(&OCFS2_I(dest)->ip_lock);
4475	if (newlen > i_size_read(dest))
4476		i_size_write(dest, newlen);
4477	spin_unlock(&OCFS2_I(dest)->ip_lock);
4478	dest->i_ctime = dest->i_mtime = current_time(dest);
4479
4480	ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
4481	if (ret) {
4482		mlog_errno(ret);
4483		goto out_commit;
4484	}
4485
4486out_commit:
4487	ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle);
4488	return ret;
4489}
4490
4491/* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
4492static int ocfs2_reflink_remap_extent(struct inode *s_inode,
4493				      struct buffer_head *s_bh,
4494				      loff_t pos_in,
4495				      struct inode *t_inode,
4496				      struct buffer_head *t_bh,
4497				      loff_t pos_out,
4498				      loff_t len,
4499				      struct ocfs2_cached_dealloc_ctxt *dealloc)
4500{
4501	struct ocfs2_extent_tree s_et;
4502	struct ocfs2_extent_tree t_et;
4503	struct ocfs2_dinode *dis;
4504	struct buffer_head *ref_root_bh = NULL;
4505	struct ocfs2_refcount_tree *ref_tree;
4506	struct ocfs2_super *osb;
 
4507	loff_t pstart, plen;
4508	u32 p_cluster, num_clusters, slast, spos, tpos;
4509	unsigned int ext_flags;
4510	int ret = 0;
4511
4512	osb = OCFS2_SB(s_inode->i_sb);
4513	dis = (struct ocfs2_dinode *)s_bh->b_data;
4514	ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh);
4515	ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh);
4516
4517	spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in);
4518	tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out);
4519	slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len);
4520
4521	while (spos < slast) {
4522		if (fatal_signal_pending(current)) {
4523			ret = -EINTR;
4524			goto out;
4525		}
4526
4527		/* Look up the extent. */
4528		ret = ocfs2_get_clusters(s_inode, spos, &p_cluster,
4529					 &num_clusters, &ext_flags);
4530		if (ret) {
4531			mlog_errno(ret);
4532			goto out;
4533		}
4534
4535		num_clusters = min_t(u32, num_clusters, slast - spos);
4536
4537		/* Punch out the dest range. */
4538		pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos);
4539		plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters);
4540		ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen);
4541		if (ret) {
4542			mlog_errno(ret);
4543			goto out;
4544		}
4545
4546		if (p_cluster == 0)
4547			goto next_loop;
4548
4549		/* Lock the refcount btree... */
4550		ret = ocfs2_lock_refcount_tree(osb,
4551					       le64_to_cpu(dis->i_refcount_loc),
4552					       1, &ref_tree, &ref_root_bh);
4553		if (ret) {
4554			mlog_errno(ret);
4555			goto out;
4556		}
4557
4558		/* Mark s_inode's extent as refcounted. */
4559		if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) {
4560			ret = ocfs2_add_refcount_flag(s_inode, &s_et,
4561						      &ref_tree->rf_ci,
4562						      ref_root_bh, spos,
4563						      p_cluster, num_clusters,
4564						      dealloc, NULL);
4565			if (ret) {
4566				mlog_errno(ret);
4567				goto out_unlock_refcount;
4568			}
4569		}
4570
4571		/* Map in the new extent. */
4572		ext_flags |= OCFS2_EXT_REFCOUNTED;
4573		ret = ocfs2_add_refcounted_extent(t_inode, &t_et,
4574						  &ref_tree->rf_ci,
4575						  ref_root_bh,
4576						  tpos, p_cluster,
4577						  num_clusters,
4578						  ext_flags,
4579						  dealloc);
4580		if (ret) {
4581			mlog_errno(ret);
4582			goto out_unlock_refcount;
4583		}
4584
4585		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4586		brelse(ref_root_bh);
4587next_loop:
4588		spos += num_clusters;
4589		tpos += num_clusters;
 
4590	}
4591
4592out:
4593	return ret;
4594out_unlock_refcount:
4595	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4596	brelse(ref_root_bh);
4597	return ret;
 
 
 
 
4598}
4599
4600/* Set up refcount tree and remap s_inode to t_inode. */
4601static int ocfs2_reflink_remap_blocks(struct inode *s_inode,
4602				      struct buffer_head *s_bh,
4603				      loff_t pos_in,
4604				      struct inode *t_inode,
4605				      struct buffer_head *t_bh,
4606				      loff_t pos_out,
4607				      loff_t len)
4608{
4609	struct ocfs2_cached_dealloc_ctxt dealloc;
4610	struct ocfs2_super *osb;
4611	struct ocfs2_dinode *dis;
4612	struct ocfs2_dinode *dit;
4613	int ret;
4614
4615	osb = OCFS2_SB(s_inode->i_sb);
4616	dis = (struct ocfs2_dinode *)s_bh->b_data;
4617	dit = (struct ocfs2_dinode *)t_bh->b_data;
4618	ocfs2_init_dealloc_ctxt(&dealloc);
4619
4620	/*
4621	 * If we're reflinking the entire file and the source is inline
4622	 * data, just copy the contents.
4623	 */
4624	if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) &&
4625	    i_size_read(t_inode) <= len &&
4626	    (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) {
4627		ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh);
4628		if (ret)
4629			mlog_errno(ret);
4630		goto out;
4631	}
4632
4633	/*
4634	 * If both inodes belong to two different refcount groups then
4635	 * forget it because we don't know how (or want) to go merging
4636	 * refcount trees.
4637	 */
4638	ret = -EOPNOTSUPP;
4639	if (ocfs2_is_refcount_inode(s_inode) &&
4640	    ocfs2_is_refcount_inode(t_inode) &&
4641	    le64_to_cpu(dis->i_refcount_loc) !=
4642	    le64_to_cpu(dit->i_refcount_loc))
4643		goto out;
4644
4645	/* Neither inode has a refcount tree.  Add one to s_inode. */
4646	if (!ocfs2_is_refcount_inode(s_inode) &&
4647	    !ocfs2_is_refcount_inode(t_inode)) {
4648		ret = ocfs2_create_refcount_tree(s_inode, s_bh);
4649		if (ret) {
4650			mlog_errno(ret);
4651			goto out;
4652		}
4653	}
4654
4655	/* Ensure that both inodes end up with the same refcount tree. */
4656	if (!ocfs2_is_refcount_inode(s_inode)) {
4657		ret = ocfs2_set_refcount_tree(s_inode, s_bh,
4658					      le64_to_cpu(dit->i_refcount_loc));
4659		if (ret) {
4660			mlog_errno(ret);
4661			goto out;
4662		}
4663	}
4664	if (!ocfs2_is_refcount_inode(t_inode)) {
4665		ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4666					      le64_to_cpu(dis->i_refcount_loc));
4667		if (ret) {
4668			mlog_errno(ret);
4669			goto out;
4670		}
4671	}
4672
4673	/* Turn off inline data in the dest file. */
4674	if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4675		ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh);
4676		if (ret) {
4677			mlog_errno(ret);
4678			goto out;
4679		}
4680	}
4681
4682	/* Actually remap extents now. */
4683	ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
4684					 pos_out, len, &dealloc);
4685	if (ret) {
4686		mlog_errno(ret);
4687		goto out;
4688	}
4689
4690out:
4691	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4692		ocfs2_schedule_truncate_log_flush(osb, 1);
4693		ocfs2_run_deallocs(osb, &dealloc);
4694	}
4695
4696	return ret;
4697}
4698
4699/* Lock an inode and grab a bh pointing to the inode. */
4700static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4701				     struct buffer_head **bh1,
4702				     struct inode *t_inode,
4703				     struct buffer_head **bh2)
4704{
4705	struct inode *inode1;
4706	struct inode *inode2;
4707	struct ocfs2_inode_info *oi1;
4708	struct ocfs2_inode_info *oi2;
 
 
4709	bool same_inode = (s_inode == t_inode);
 
4710	int status;
4711
4712	/* First grab the VFS and rw locks. */
4713	lock_two_nondirectories(s_inode, t_inode);
4714	inode1 = s_inode;
4715	inode2 = t_inode;
4716	if (inode1->i_ino > inode2->i_ino)
4717		swap(inode1, inode2);
4718
4719	status = ocfs2_rw_lock(inode1, 1);
4720	if (status) {
4721		mlog_errno(status);
4722		goto out_i1;
4723	}
4724	if (!same_inode) {
4725		status = ocfs2_rw_lock(inode2, 1);
4726		if (status) {
4727			mlog_errno(status);
4728			goto out_i2;
4729		}
4730	}
4731
4732	/* Now go for the cluster locks */
4733	oi1 = OCFS2_I(inode1);
4734	oi2 = OCFS2_I(inode2);
4735
4736	trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
4737				(unsigned long long)oi2->ip_blkno);
4738
4739	if (*bh1)
4740		*bh1 = NULL;
4741	if (*bh2)
4742		*bh2 = NULL;
4743
4744	/* We always want to lock the one with the lower lockid first. */
4745	if (oi1->ip_blkno > oi2->ip_blkno)
4746		mlog_errno(-ENOLCK);
4747
4748	/* lock id1 */
4749	status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
 
4750	if (status < 0) {
4751		if (status != -ENOENT)
4752			mlog_errno(status);
4753		goto out_rw2;
4754	}
4755
4756	/* lock id2 */
4757	if (!same_inode) {
4758		status = ocfs2_inode_lock_nested(inode2, bh2, 1,
4759						 OI_LS_REFLINK_TARGET);
4760		if (status < 0) {
4761			if (status != -ENOENT)
4762				mlog_errno(status);
4763			goto out_cl1;
4764		}
4765	} else
4766		*bh2 = *bh1;
 
 
 
 
 
 
 
 
 
 
4767
4768	trace_ocfs2_double_lock_end(
4769			(unsigned long long)OCFS2_I(inode1)->ip_blkno,
4770			(unsigned long long)OCFS2_I(inode2)->ip_blkno);
4771
4772	return 0;
4773
4774out_cl1:
4775	ocfs2_inode_unlock(inode1, 1);
4776	brelse(*bh1);
4777	*bh1 = NULL;
4778out_rw2:
4779	ocfs2_rw_unlock(inode2, 1);
4780out_i2:
4781	ocfs2_rw_unlock(inode1, 1);
4782out_i1:
4783	unlock_two_nondirectories(s_inode, t_inode);
4784	return status;
4785}
4786
4787/* Unlock both inodes and release buffers. */
4788static void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
4789					struct buffer_head *s_bh,
4790					struct inode *t_inode,
4791					struct buffer_head *t_bh)
4792{
4793	ocfs2_inode_unlock(s_inode, 1);
4794	ocfs2_rw_unlock(s_inode, 1);
4795	brelse(s_bh);
4796	if (s_inode != t_inode) {
4797		ocfs2_inode_unlock(t_inode, 1);
4798		ocfs2_rw_unlock(t_inode, 1);
4799		brelse(t_bh);
4800	}
4801	unlock_two_nondirectories(s_inode, t_inode);
4802}
4803
4804/* Link a range of blocks from one file to another. */
4805int ocfs2_reflink_remap_range(struct file *file_in,
4806			      loff_t pos_in,
4807			      struct file *file_out,
4808			      loff_t pos_out,
4809			      u64 len,
4810			      bool is_dedupe)
4811{
4812	struct inode *inode_in = file_inode(file_in);
4813	struct inode *inode_out = file_inode(file_out);
4814	struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
4815	struct buffer_head *in_bh = NULL, *out_bh = NULL;
4816	bool same_inode = (inode_in == inode_out);
4817	ssize_t ret;
4818
4819	if (!ocfs2_refcount_tree(osb))
4820		return -EOPNOTSUPP;
4821	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
4822		return -EROFS;
4823
4824	/* Lock both files against IO */
4825	ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
4826	if (ret)
4827		return ret;
4828
4829	/* Check file eligibility and prepare for block sharing. */
4830	ret = -EINVAL;
4831	if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
4832	    (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
4833		goto out_unlock;
4834
4835	ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
4836			&len, is_dedupe);
4837	if (ret <= 0)
4838		goto out_unlock;
4839
4840	/* Lock out changes to the allocation maps and remap. */
4841	down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
4842	if (!same_inode)
4843		down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
4844				  SINGLE_DEPTH_NESTING);
4845
4846	ret = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in, inode_out,
4847					 out_bh, pos_out, len);
4848
4849	/* Zap any page cache for the destination file's range. */
4850	if (!ret)
4851		truncate_inode_pages_range(&inode_out->i_data, pos_out,
4852					   PAGE_ALIGN(pos_out + len) - 1);
4853
4854	up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
4855	if (!same_inode)
4856		up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
4857	if (ret) {
4858		mlog_errno(ret);
4859		goto out_unlock;
4860	}
4861
4862	/*
4863	 * Empty the extent map so that we may get the right extent
4864	 * record from the disk.
4865	 */
4866	ocfs2_extent_map_trunc(inode_in, 0);
4867	ocfs2_extent_map_trunc(inode_out, 0);
4868
4869	ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
4870	if (ret) {
4871		mlog_errno(ret);
4872		goto out_unlock;
4873	}
4874
4875	ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
4876	return 0;
4877
4878out_unlock:
4879	ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
4880	return ret;
4881}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
 
   3 * refcounttree.c
   4 *
   5 * Copyright (C) 2009 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/sort.h>
   9#include <cluster/masklog.h>
  10#include "ocfs2.h"
  11#include "inode.h"
  12#include "alloc.h"
  13#include "suballoc.h"
  14#include "journal.h"
  15#include "uptodate.h"
  16#include "super.h"
  17#include "buffer_head_io.h"
  18#include "blockcheck.h"
  19#include "refcounttree.h"
  20#include "sysfile.h"
  21#include "dlmglue.h"
  22#include "extent_map.h"
  23#include "aops.h"
  24#include "xattr.h"
  25#include "namei.h"
  26#include "ocfs2_trace.h"
  27#include "file.h"
  28
  29#include <linux/bio.h>
  30#include <linux/blkdev.h>
  31#include <linux/slab.h>
  32#include <linux/writeback.h>
  33#include <linux/pagevec.h>
  34#include <linux/swap.h>
  35#include <linux/security.h>
  36#include <linux/fsnotify.h>
  37#include <linux/quotaops.h>
  38#include <linux/namei.h>
  39#include <linux/mount.h>
  40#include <linux/posix_acl.h>
  41
  42struct ocfs2_cow_context {
  43	struct inode *inode;
  44	u32 cow_start;
  45	u32 cow_len;
  46	struct ocfs2_extent_tree data_et;
  47	struct ocfs2_refcount_tree *ref_tree;
  48	struct buffer_head *ref_root_bh;
  49	struct ocfs2_alloc_context *meta_ac;
  50	struct ocfs2_alloc_context *data_ac;
  51	struct ocfs2_cached_dealloc_ctxt dealloc;
  52	void *cow_object;
  53	struct ocfs2_post_refcount *post_refcount;
  54	int extra_credits;
  55	int (*get_clusters)(struct ocfs2_cow_context *context,
  56			    u32 v_cluster, u32 *p_cluster,
  57			    u32 *num_clusters,
  58			    unsigned int *extent_flags);
  59	int (*cow_duplicate_clusters)(handle_t *handle,
  60				      struct inode *inode,
  61				      u32 cpos, u32 old_cluster,
  62				      u32 new_cluster, u32 new_len);
  63};
  64
  65static inline struct ocfs2_refcount_tree *
  66cache_info_to_refcount(struct ocfs2_caching_info *ci)
  67{
  68	return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  69}
  70
  71static int ocfs2_validate_refcount_block(struct super_block *sb,
  72					 struct buffer_head *bh)
  73{
  74	int rc;
  75	struct ocfs2_refcount_block *rb =
  76		(struct ocfs2_refcount_block *)bh->b_data;
  77
  78	trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
  79
  80	BUG_ON(!buffer_uptodate(bh));
  81
  82	/*
  83	 * If the ecc fails, we return the error but otherwise
  84	 * leave the filesystem running.  We know any error is
  85	 * local to this block.
  86	 */
  87	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  88	if (rc) {
  89		mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  90		     (unsigned long long)bh->b_blocknr);
  91		return rc;
  92	}
  93
  94
  95	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  96		rc = ocfs2_error(sb,
  97				 "Refcount block #%llu has bad signature %.*s\n",
  98				 (unsigned long long)bh->b_blocknr, 7,
  99				 rb->rf_signature);
 100		goto out;
 101	}
 102
 103	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
 104		rc = ocfs2_error(sb,
 105				 "Refcount block #%llu has an invalid rf_blkno of %llu\n",
 106				 (unsigned long long)bh->b_blocknr,
 107				 (unsigned long long)le64_to_cpu(rb->rf_blkno));
 108		goto out;
 109	}
 110
 111	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
 112		rc = ocfs2_error(sb,
 113				 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
 114				 (unsigned long long)bh->b_blocknr,
 115				 le32_to_cpu(rb->rf_fs_generation));
 116		goto out;
 117	}
 118out:
 119	return rc;
 120}
 121
 122static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
 123				     u64 rb_blkno,
 124				     struct buffer_head **bh)
 125{
 126	int rc;
 127	struct buffer_head *tmp = *bh;
 128
 129	rc = ocfs2_read_block(ci, rb_blkno, &tmp,
 130			      ocfs2_validate_refcount_block);
 131
 132	/* If ocfs2_read_block() got us a new bh, pass it up. */
 133	if (!rc && !*bh)
 134		*bh = tmp;
 135
 136	return rc;
 137}
 138
 139static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
 140{
 141	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 142
 143	return rf->rf_blkno;
 144}
 145
 146static struct super_block *
 147ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
 148{
 149	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 150
 151	return rf->rf_sb;
 152}
 153
 154static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
 155__acquires(&rf->rf_lock)
 156{
 157	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 158
 159	spin_lock(&rf->rf_lock);
 160}
 161
 162static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
 163__releases(&rf->rf_lock)
 164{
 165	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 166
 167	spin_unlock(&rf->rf_lock);
 168}
 169
 170static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
 171{
 172	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 173
 174	mutex_lock(&rf->rf_io_mutex);
 175}
 176
 177static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
 178{
 179	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 180
 181	mutex_unlock(&rf->rf_io_mutex);
 182}
 183
 184static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
 185	.co_owner		= ocfs2_refcount_cache_owner,
 186	.co_get_super		= ocfs2_refcount_cache_get_super,
 187	.co_cache_lock		= ocfs2_refcount_cache_lock,
 188	.co_cache_unlock	= ocfs2_refcount_cache_unlock,
 189	.co_io_lock		= ocfs2_refcount_cache_io_lock,
 190	.co_io_unlock		= ocfs2_refcount_cache_io_unlock,
 191};
 192
 193static struct ocfs2_refcount_tree *
 194ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
 195{
 196	struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
 197	struct ocfs2_refcount_tree *tree = NULL;
 198
 199	while (n) {
 200		tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
 201
 202		if (blkno < tree->rf_blkno)
 203			n = n->rb_left;
 204		else if (blkno > tree->rf_blkno)
 205			n = n->rb_right;
 206		else
 207			return tree;
 208	}
 209
 210	return NULL;
 211}
 212
 213/* osb_lock is already locked. */
 214static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
 215				       struct ocfs2_refcount_tree *new)
 216{
 217	u64 rf_blkno = new->rf_blkno;
 218	struct rb_node *parent = NULL;
 219	struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
 220	struct ocfs2_refcount_tree *tmp;
 221
 222	while (*p) {
 223		parent = *p;
 224
 225		tmp = rb_entry(parent, struct ocfs2_refcount_tree,
 226			       rf_node);
 227
 228		if (rf_blkno < tmp->rf_blkno)
 229			p = &(*p)->rb_left;
 230		else if (rf_blkno > tmp->rf_blkno)
 231			p = &(*p)->rb_right;
 232		else {
 233			/* This should never happen! */
 234			mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
 235			     (unsigned long long)rf_blkno);
 236			BUG();
 237		}
 238	}
 239
 240	rb_link_node(&new->rf_node, parent, p);
 241	rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
 242}
 243
 244static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
 245{
 246	ocfs2_metadata_cache_exit(&tree->rf_ci);
 247	ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
 248	ocfs2_lock_res_free(&tree->rf_lockres);
 249	kfree(tree);
 250}
 251
 252static inline void
 253ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
 254					struct ocfs2_refcount_tree *tree)
 255{
 256	rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
 257	if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
 258		osb->osb_ref_tree_lru = NULL;
 259}
 260
 261static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
 262					struct ocfs2_refcount_tree *tree)
 263{
 264	spin_lock(&osb->osb_lock);
 265	ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 266	spin_unlock(&osb->osb_lock);
 267}
 268
 269static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
 270{
 271	struct ocfs2_refcount_tree *tree =
 272		container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
 273
 274	ocfs2_free_refcount_tree(tree);
 275}
 276
 277static inline void
 278ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
 279{
 280	kref_get(&tree->rf_getcnt);
 281}
 282
 283static inline void
 284ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
 285{
 286	kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
 287}
 288
 289static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
 290					       struct super_block *sb)
 291{
 292	ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
 293	mutex_init(&new->rf_io_mutex);
 294	new->rf_sb = sb;
 295	spin_lock_init(&new->rf_lock);
 296}
 297
 298static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
 299					struct ocfs2_refcount_tree *new,
 300					u64 rf_blkno, u32 generation)
 301{
 302	init_rwsem(&new->rf_sem);
 303	ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
 304				     rf_blkno, generation);
 305}
 306
 307static struct ocfs2_refcount_tree*
 308ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
 309{
 310	struct ocfs2_refcount_tree *new;
 311
 312	new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
 313	if (!new)
 314		return NULL;
 315
 316	new->rf_blkno = rf_blkno;
 317	kref_init(&new->rf_getcnt);
 318	ocfs2_init_refcount_tree_ci(new, osb->sb);
 319
 320	return new;
 321}
 322
 323static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
 324				   struct ocfs2_refcount_tree **ret_tree)
 325{
 326	int ret = 0;
 327	struct ocfs2_refcount_tree *tree, *new = NULL;
 328	struct buffer_head *ref_root_bh = NULL;
 329	struct ocfs2_refcount_block *ref_rb;
 330
 331	spin_lock(&osb->osb_lock);
 332	if (osb->osb_ref_tree_lru &&
 333	    osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
 334		tree = osb->osb_ref_tree_lru;
 335	else
 336		tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 337	if (tree)
 338		goto out;
 339
 340	spin_unlock(&osb->osb_lock);
 341
 342	new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
 343	if (!new) {
 344		ret = -ENOMEM;
 345		mlog_errno(ret);
 346		return ret;
 347	}
 348	/*
 349	 * We need the generation to create the refcount tree lock and since
 350	 * it isn't changed during the tree modification, we are safe here to
 351	 * read without protection.
 352	 * We also have to purge the cache after we create the lock since the
 353	 * refcount block may have the stale data. It can only be trusted when
 354	 * we hold the refcount lock.
 355	 */
 356	ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
 357	if (ret) {
 358		mlog_errno(ret);
 359		ocfs2_metadata_cache_exit(&new->rf_ci);
 360		kfree(new);
 361		return ret;
 362	}
 363
 364	ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 365	new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
 366	ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
 367				      new->rf_generation);
 368	ocfs2_metadata_cache_purge(&new->rf_ci);
 369
 370	spin_lock(&osb->osb_lock);
 371	tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 372	if (tree)
 373		goto out;
 374
 375	ocfs2_insert_refcount_tree(osb, new);
 376
 377	tree = new;
 378	new = NULL;
 379
 380out:
 381	*ret_tree = tree;
 382
 383	osb->osb_ref_tree_lru = tree;
 384
 385	spin_unlock(&osb->osb_lock);
 386
 387	if (new)
 388		ocfs2_free_refcount_tree(new);
 389
 390	brelse(ref_root_bh);
 391	return ret;
 392}
 393
 394static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
 395{
 396	int ret;
 397	struct buffer_head *di_bh = NULL;
 398	struct ocfs2_dinode *di;
 399
 400	ret = ocfs2_read_inode_block(inode, &di_bh);
 401	if (ret) {
 402		mlog_errno(ret);
 403		goto out;
 404	}
 405
 406	BUG_ON(!ocfs2_is_refcount_inode(inode));
 407
 408	di = (struct ocfs2_dinode *)di_bh->b_data;
 409	*ref_blkno = le64_to_cpu(di->i_refcount_loc);
 410	brelse(di_bh);
 411out:
 412	return ret;
 413}
 414
 415static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 416				      struct ocfs2_refcount_tree *tree, int rw)
 417{
 418	int ret;
 419
 420	ret = ocfs2_refcount_lock(tree, rw);
 421	if (ret) {
 422		mlog_errno(ret);
 423		goto out;
 424	}
 425
 426	if (rw)
 427		down_write(&tree->rf_sem);
 428	else
 429		down_read(&tree->rf_sem);
 430
 431out:
 432	return ret;
 433}
 434
 435/*
 436 * Lock the refcount tree pointed by ref_blkno and return the tree.
 437 * In most case, we lock the tree and read the refcount block.
 438 * So read it here if the caller really needs it.
 439 *
 440 * If the tree has been re-created by other node, it will free the
 441 * old one and re-create it.
 442 */
 443int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 444			     u64 ref_blkno, int rw,
 445			     struct ocfs2_refcount_tree **ret_tree,
 446			     struct buffer_head **ref_bh)
 447{
 448	int ret, delete_tree = 0;
 449	struct ocfs2_refcount_tree *tree = NULL;
 450	struct buffer_head *ref_root_bh = NULL;
 451	struct ocfs2_refcount_block *rb;
 452
 453again:
 454	ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
 455	if (ret) {
 456		mlog_errno(ret);
 457		return ret;
 458	}
 459
 460	ocfs2_refcount_tree_get(tree);
 461
 462	ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
 463	if (ret) {
 464		mlog_errno(ret);
 465		ocfs2_refcount_tree_put(tree);
 466		goto out;
 467	}
 468
 469	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
 470					&ref_root_bh);
 471	if (ret) {
 472		mlog_errno(ret);
 473		ocfs2_unlock_refcount_tree(osb, tree, rw);
 474		goto out;
 475	}
 476
 477	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 478	/*
 479	 * If the refcount block has been freed and re-created, we may need
 480	 * to recreate the refcount tree also.
 481	 *
 482	 * Here we just remove the tree from the rb-tree, and the last
 483	 * kref holder will unlock and delete this refcount_tree.
 484	 * Then we goto "again" and ocfs2_get_refcount_tree will create
 485	 * the new refcount tree for us.
 486	 */
 487	if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
 488		if (!tree->rf_removed) {
 489			ocfs2_erase_refcount_tree_from_list(osb, tree);
 490			tree->rf_removed = 1;
 491			delete_tree = 1;
 492		}
 493
 494		ocfs2_unlock_refcount_tree(osb, tree, rw);
 495		/*
 496		 * We get an extra reference when we create the refcount
 497		 * tree, so another put will destroy it.
 498		 */
 499		if (delete_tree)
 500			ocfs2_refcount_tree_put(tree);
 501		brelse(ref_root_bh);
 502		ref_root_bh = NULL;
 503		goto again;
 504	}
 505
 506	*ret_tree = tree;
 507	if (ref_bh) {
 508		*ref_bh = ref_root_bh;
 509		ref_root_bh = NULL;
 510	}
 511out:
 512	brelse(ref_root_bh);
 513	return ret;
 514}
 515
 516void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
 517				struct ocfs2_refcount_tree *tree, int rw)
 518{
 519	if (rw)
 520		up_write(&tree->rf_sem);
 521	else
 522		up_read(&tree->rf_sem);
 523
 524	ocfs2_refcount_unlock(tree, rw);
 525	ocfs2_refcount_tree_put(tree);
 526}
 527
 528void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
 529{
 530	struct rb_node *node;
 531	struct ocfs2_refcount_tree *tree;
 532	struct rb_root *root = &osb->osb_rf_lock_tree;
 533
 534	while ((node = rb_last(root)) != NULL) {
 535		tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
 536
 537		trace_ocfs2_purge_refcount_trees(
 538				(unsigned long long) tree->rf_blkno);
 539
 540		rb_erase(&tree->rf_node, root);
 541		ocfs2_free_refcount_tree(tree);
 542	}
 543}
 544
 545/*
 546 * Create a refcount tree for an inode.
 547 * We take for granted that the inode is already locked.
 548 */
 549static int ocfs2_create_refcount_tree(struct inode *inode,
 550				      struct buffer_head *di_bh)
 551{
 552	int ret;
 553	handle_t *handle = NULL;
 554	struct ocfs2_alloc_context *meta_ac = NULL;
 555	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 556	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 557	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 558	struct buffer_head *new_bh = NULL;
 559	struct ocfs2_refcount_block *rb;
 560	struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
 561	u16 suballoc_bit_start;
 562	u32 num_got;
 563	u64 suballoc_loc, first_blkno;
 564
 565	BUG_ON(ocfs2_is_refcount_inode(inode));
 566
 567	trace_ocfs2_create_refcount_tree(
 568		(unsigned long long)oi->ip_blkno);
 569
 570	ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
 571	if (ret) {
 572		mlog_errno(ret);
 573		goto out;
 574	}
 575
 576	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
 577	if (IS_ERR(handle)) {
 578		ret = PTR_ERR(handle);
 579		mlog_errno(ret);
 580		goto out;
 581	}
 582
 583	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 584				      OCFS2_JOURNAL_ACCESS_WRITE);
 585	if (ret) {
 586		mlog_errno(ret);
 587		goto out_commit;
 588	}
 589
 590	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
 591				   &suballoc_bit_start, &num_got,
 592				   &first_blkno);
 593	if (ret) {
 594		mlog_errno(ret);
 595		goto out_commit;
 596	}
 597
 598	new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
 599	if (!new_tree) {
 600		ret = -ENOMEM;
 601		mlog_errno(ret);
 602		goto out_commit;
 603	}
 604
 605	new_bh = sb_getblk(inode->i_sb, first_blkno);
 606	if (!new_bh) {
 607		ret = -ENOMEM;
 608		mlog_errno(ret);
 609		goto out_commit;
 610	}
 611	ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
 612
 613	ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
 614				      OCFS2_JOURNAL_ACCESS_CREATE);
 615	if (ret) {
 616		mlog_errno(ret);
 617		goto out_commit;
 618	}
 619
 620	/* Initialize ocfs2_refcount_block. */
 621	rb = (struct ocfs2_refcount_block *)new_bh->b_data;
 622	memset(rb, 0, inode->i_sb->s_blocksize);
 623	strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
 624	rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
 625	rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
 626	rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
 627	rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
 628	rb->rf_blkno = cpu_to_le64(first_blkno);
 629	rb->rf_count = cpu_to_le32(1);
 630	rb->rf_records.rl_count =
 631			cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
 632	spin_lock(&osb->osb_lock);
 633	rb->rf_generation = osb->s_next_generation++;
 634	spin_unlock(&osb->osb_lock);
 635
 636	ocfs2_journal_dirty(handle, new_bh);
 637
 638	spin_lock(&oi->ip_lock);
 639	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 640	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 641	di->i_refcount_loc = cpu_to_le64(first_blkno);
 642	spin_unlock(&oi->ip_lock);
 643
 644	trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
 645
 646	ocfs2_journal_dirty(handle, di_bh);
 647
 648	/*
 649	 * We have to init the tree lock here since it will use
 650	 * the generation number to create it.
 651	 */
 652	new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
 653	ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
 654				      new_tree->rf_generation);
 655
 656	spin_lock(&osb->osb_lock);
 657	tree = ocfs2_find_refcount_tree(osb, first_blkno);
 658
 659	/*
 660	 * We've just created a new refcount tree in this block.  If
 661	 * we found a refcount tree on the ocfs2_super, it must be
 662	 * one we just deleted.  We free the old tree before
 663	 * inserting the new tree.
 664	 */
 665	BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
 666	if (tree)
 667		ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 668	ocfs2_insert_refcount_tree(osb, new_tree);
 669	spin_unlock(&osb->osb_lock);
 670	new_tree = NULL;
 671	if (tree)
 672		ocfs2_refcount_tree_put(tree);
 673
 674out_commit:
 675	ocfs2_commit_trans(osb, handle);
 676
 677out:
 678	if (new_tree) {
 679		ocfs2_metadata_cache_exit(&new_tree->rf_ci);
 680		kfree(new_tree);
 681	}
 682
 683	brelse(new_bh);
 684	if (meta_ac)
 685		ocfs2_free_alloc_context(meta_ac);
 686
 687	return ret;
 688}
 689
 690static int ocfs2_set_refcount_tree(struct inode *inode,
 691				   struct buffer_head *di_bh,
 692				   u64 refcount_loc)
 693{
 694	int ret;
 695	handle_t *handle = NULL;
 696	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 697	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 698	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 699	struct buffer_head *ref_root_bh = NULL;
 700	struct ocfs2_refcount_block *rb;
 701	struct ocfs2_refcount_tree *ref_tree;
 702
 703	BUG_ON(ocfs2_is_refcount_inode(inode));
 704
 705	ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
 706				       &ref_tree, &ref_root_bh);
 707	if (ret) {
 708		mlog_errno(ret);
 709		return ret;
 710	}
 711
 712	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
 713	if (IS_ERR(handle)) {
 714		ret = PTR_ERR(handle);
 715		mlog_errno(ret);
 716		goto out;
 717	}
 718
 719	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 720				      OCFS2_JOURNAL_ACCESS_WRITE);
 721	if (ret) {
 722		mlog_errno(ret);
 723		goto out_commit;
 724	}
 725
 726	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
 727				      OCFS2_JOURNAL_ACCESS_WRITE);
 728	if (ret) {
 729		mlog_errno(ret);
 730		goto out_commit;
 731	}
 732
 733	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 734	le32_add_cpu(&rb->rf_count, 1);
 735
 736	ocfs2_journal_dirty(handle, ref_root_bh);
 737
 738	spin_lock(&oi->ip_lock);
 739	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 740	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 741	di->i_refcount_loc = cpu_to_le64(refcount_loc);
 742	spin_unlock(&oi->ip_lock);
 743	ocfs2_journal_dirty(handle, di_bh);
 744
 745out_commit:
 746	ocfs2_commit_trans(osb, handle);
 747out:
 748	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 749	brelse(ref_root_bh);
 750
 751	return ret;
 752}
 753
 754int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
 755{
 756	int ret, delete_tree = 0;
 757	handle_t *handle = NULL;
 758	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 759	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 760	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 761	struct ocfs2_refcount_block *rb;
 762	struct inode *alloc_inode = NULL;
 763	struct buffer_head *alloc_bh = NULL;
 764	struct buffer_head *blk_bh = NULL;
 765	struct ocfs2_refcount_tree *ref_tree;
 766	int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
 767	u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
 768	u16 bit = 0;
 769
 770	if (!ocfs2_is_refcount_inode(inode))
 771		return 0;
 772
 773	BUG_ON(!ref_blkno);
 774	ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
 775	if (ret) {
 776		mlog_errno(ret);
 777		return ret;
 778	}
 779
 780	rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
 781
 782	/*
 783	 * If we are the last user, we need to free the block.
 784	 * So lock the allocator ahead.
 785	 */
 786	if (le32_to_cpu(rb->rf_count) == 1) {
 787		blk = le64_to_cpu(rb->rf_blkno);
 788		bit = le16_to_cpu(rb->rf_suballoc_bit);
 789		if (rb->rf_suballoc_loc)
 790			bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
 791		else
 792			bg_blkno = ocfs2_which_suballoc_group(blk, bit);
 793
 794		alloc_inode = ocfs2_get_system_file_inode(osb,
 795					EXTENT_ALLOC_SYSTEM_INODE,
 796					le16_to_cpu(rb->rf_suballoc_slot));
 797		if (!alloc_inode) {
 798			ret = -ENOMEM;
 799			mlog_errno(ret);
 800			goto out;
 801		}
 802		inode_lock(alloc_inode);
 803
 804		ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
 805		if (ret) {
 806			mlog_errno(ret);
 807			goto out_mutex;
 808		}
 809
 810		credits += OCFS2_SUBALLOC_FREE;
 811	}
 812
 813	handle = ocfs2_start_trans(osb, credits);
 814	if (IS_ERR(handle)) {
 815		ret = PTR_ERR(handle);
 816		mlog_errno(ret);
 817		goto out_unlock;
 818	}
 819
 820	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 821				      OCFS2_JOURNAL_ACCESS_WRITE);
 822	if (ret) {
 823		mlog_errno(ret);
 824		goto out_commit;
 825	}
 826
 827	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
 828				      OCFS2_JOURNAL_ACCESS_WRITE);
 829	if (ret) {
 830		mlog_errno(ret);
 831		goto out_commit;
 832	}
 833
 834	spin_lock(&oi->ip_lock);
 835	oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
 836	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 837	di->i_refcount_loc = 0;
 838	spin_unlock(&oi->ip_lock);
 839	ocfs2_journal_dirty(handle, di_bh);
 840
 841	le32_add_cpu(&rb->rf_count , -1);
 842	ocfs2_journal_dirty(handle, blk_bh);
 843
 844	if (!rb->rf_count) {
 845		delete_tree = 1;
 846		ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
 847		ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
 848					       alloc_bh, bit, bg_blkno, 1);
 849		if (ret)
 850			mlog_errno(ret);
 851	}
 852
 853out_commit:
 854	ocfs2_commit_trans(osb, handle);
 855out_unlock:
 856	if (alloc_inode) {
 857		ocfs2_inode_unlock(alloc_inode, 1);
 858		brelse(alloc_bh);
 859	}
 860out_mutex:
 861	if (alloc_inode) {
 862		inode_unlock(alloc_inode);
 863		iput(alloc_inode);
 864	}
 865out:
 866	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 867	if (delete_tree)
 868		ocfs2_refcount_tree_put(ref_tree);
 869	brelse(blk_bh);
 870
 871	return ret;
 872}
 873
 874static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
 875					  struct buffer_head *ref_leaf_bh,
 876					  u64 cpos, unsigned int len,
 877					  struct ocfs2_refcount_rec *ret_rec,
 878					  int *index)
 879{
 880	int i = 0;
 881	struct ocfs2_refcount_block *rb =
 882		(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
 883	struct ocfs2_refcount_rec *rec = NULL;
 884
 885	for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
 886		rec = &rb->rf_records.rl_recs[i];
 887
 888		if (le64_to_cpu(rec->r_cpos) +
 889		    le32_to_cpu(rec->r_clusters) <= cpos)
 890			continue;
 891		else if (le64_to_cpu(rec->r_cpos) > cpos)
 892			break;
 893
 894		/* ok, cpos fail in this rec. Just return. */
 895		if (ret_rec)
 896			*ret_rec = *rec;
 897		goto out;
 898	}
 899
 900	if (ret_rec) {
 901		/* We meet with a hole here, so fake the rec. */
 902		ret_rec->r_cpos = cpu_to_le64(cpos);
 903		ret_rec->r_refcount = 0;
 904		if (i < le16_to_cpu(rb->rf_records.rl_used) &&
 905		    le64_to_cpu(rec->r_cpos) < cpos + len)
 906			ret_rec->r_clusters =
 907				cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
 908		else
 909			ret_rec->r_clusters = cpu_to_le32(len);
 910	}
 911
 912out:
 913	*index = i;
 914}
 915
 916/*
 917 * Try to remove refcount tree. The mechanism is:
 918 * 1) Check whether i_clusters == 0, if no, exit.
 919 * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
 920 * 3) Check whether we have inline xattr stored outside, if yes, exit.
 921 * 4) Remove the tree.
 922 */
 923int ocfs2_try_remove_refcount_tree(struct inode *inode,
 924				   struct buffer_head *di_bh)
 925{
 926	int ret;
 927	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 928	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 929
 930	down_write(&oi->ip_xattr_sem);
 931	down_write(&oi->ip_alloc_sem);
 932
 933	if (oi->ip_clusters)
 934		goto out;
 935
 936	if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
 937		goto out;
 938
 939	if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
 940	    ocfs2_has_inline_xattr_value_outside(inode, di))
 941		goto out;
 942
 943	ret = ocfs2_remove_refcount_tree(inode, di_bh);
 944	if (ret)
 945		mlog_errno(ret);
 946out:
 947	up_write(&oi->ip_alloc_sem);
 948	up_write(&oi->ip_xattr_sem);
 949	return 0;
 950}
 951
 952/*
 953 * Find the end range for a leaf refcount block indicated by
 954 * el->l_recs[index].e_blkno.
 955 */
 956static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
 957				       struct buffer_head *ref_root_bh,
 958				       struct ocfs2_extent_block *eb,
 959				       struct ocfs2_extent_list *el,
 960				       int index,  u32 *cpos_end)
 961{
 962	int ret, i, subtree_root;
 963	u32 cpos;
 964	u64 blkno;
 965	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
 966	struct ocfs2_path *left_path = NULL, *right_path = NULL;
 967	struct ocfs2_extent_tree et;
 968	struct ocfs2_extent_list *tmp_el;
 969
 970	if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
 971		/*
 972		 * We have a extent rec after index, so just use the e_cpos
 973		 * of the next extent rec.
 974		 */
 975		*cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
 976		return 0;
 977	}
 978
 979	if (!eb || !eb->h_next_leaf_blk) {
 980		/*
 981		 * We are the last extent rec, so any high cpos should
 982		 * be stored in this leaf refcount block.
 983		 */
 984		*cpos_end = UINT_MAX;
 985		return 0;
 986	}
 987
 988	/*
 989	 * If the extent block isn't the last one, we have to find
 990	 * the subtree root between this extent block and the next
 991	 * leaf extent block and get the corresponding e_cpos from
 992	 * the subroot. Otherwise we may corrupt the b-tree.
 993	 */
 994	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
 995
 996	left_path = ocfs2_new_path_from_et(&et);
 997	if (!left_path) {
 998		ret = -ENOMEM;
 999		mlog_errno(ret);
1000		goto out;
1001	}
1002
1003	cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
1004	ret = ocfs2_find_path(ci, left_path, cpos);
1005	if (ret) {
1006		mlog_errno(ret);
1007		goto out;
1008	}
1009
1010	right_path = ocfs2_new_path_from_path(left_path);
1011	if (!right_path) {
1012		ret = -ENOMEM;
1013		mlog_errno(ret);
1014		goto out;
1015	}
1016
1017	ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
1018	if (ret) {
1019		mlog_errno(ret);
1020		goto out;
1021	}
1022
1023	ret = ocfs2_find_path(ci, right_path, cpos);
1024	if (ret) {
1025		mlog_errno(ret);
1026		goto out;
1027	}
1028
1029	subtree_root = ocfs2_find_subtree_root(&et, left_path,
1030					       right_path);
1031
1032	tmp_el = left_path->p_node[subtree_root].el;
1033	blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
1034	for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
1035		if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
1036			*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
1037			break;
1038		}
1039	}
1040
1041	BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
1042
1043out:
1044	ocfs2_free_path(left_path);
1045	ocfs2_free_path(right_path);
1046	return ret;
1047}
1048
1049/*
1050 * Given a cpos and len, try to find the refcount record which contains cpos.
1051 * 1. If cpos can be found in one refcount record, return the record.
1052 * 2. If cpos can't be found, return a fake record which start from cpos
1053 *    and end at a small value between cpos+len and start of the next record.
1054 *    This fake record has r_refcount = 0.
1055 */
1056static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
1057				  struct buffer_head *ref_root_bh,
1058				  u64 cpos, unsigned int len,
1059				  struct ocfs2_refcount_rec *ret_rec,
1060				  int *index,
1061				  struct buffer_head **ret_bh)
1062{
1063	int ret = 0, i, found;
1064	u32 low_cpos, cpos_end;
1065	struct ocfs2_extent_list *el;
1066	struct ocfs2_extent_rec *rec = NULL;
1067	struct ocfs2_extent_block *eb = NULL;
1068	struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
1069	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1070	struct ocfs2_refcount_block *rb =
1071			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1072
1073	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
1074		ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
1075					      ret_rec, index);
1076		*ret_bh = ref_root_bh;
1077		get_bh(ref_root_bh);
1078		return 0;
1079	}
1080
1081	el = &rb->rf_list;
1082	low_cpos = cpos & OCFS2_32BIT_POS_MASK;
1083
1084	if (el->l_tree_depth) {
1085		ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
1086		if (ret) {
1087			mlog_errno(ret);
1088			goto out;
1089		}
1090
1091		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
1092		el = &eb->h_list;
1093
1094		if (el->l_tree_depth) {
1095			ret = ocfs2_error(sb,
1096					  "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
1097					  (unsigned long long)ocfs2_metadata_cache_owner(ci),
1098					  (unsigned long long)eb_bh->b_blocknr);
1099			goto out;
1100		}
1101	}
1102
1103	found = 0;
1104	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1105		rec = &el->l_recs[i];
1106
1107		if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
1108			found = 1;
1109			break;
1110		}
1111	}
1112
1113	if (found) {
1114		ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
1115						  eb, el, i, &cpos_end);
1116		if (ret) {
1117			mlog_errno(ret);
1118			goto out;
1119		}
1120
1121		if (cpos_end < low_cpos + len)
1122			len = cpos_end - low_cpos;
1123	}
1124
1125	ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
1126					&ref_leaf_bh);
1127	if (ret) {
1128		mlog_errno(ret);
1129		goto out;
1130	}
1131
1132	ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1133				      ret_rec, index);
1134	*ret_bh = ref_leaf_bh;
1135out:
1136	brelse(eb_bh);
1137	return ret;
1138}
1139
1140enum ocfs2_ref_rec_contig {
1141	REF_CONTIG_NONE = 0,
1142	REF_CONTIG_LEFT,
1143	REF_CONTIG_RIGHT,
1144	REF_CONTIG_LEFTRIGHT,
1145};
1146
1147static enum ocfs2_ref_rec_contig
1148	ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
1149				    int index)
1150{
1151	if ((rb->rf_records.rl_recs[index].r_refcount ==
1152	    rb->rf_records.rl_recs[index + 1].r_refcount) &&
1153	    (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
1154	    le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
1155	    le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
1156		return REF_CONTIG_RIGHT;
1157
1158	return REF_CONTIG_NONE;
1159}
1160
1161static enum ocfs2_ref_rec_contig
1162	ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
1163				  int index)
1164{
1165	enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
1166
1167	if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
1168		ret = ocfs2_refcount_rec_adjacent(rb, index);
1169
1170	if (index > 0) {
1171		enum ocfs2_ref_rec_contig tmp;
1172
1173		tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
1174
1175		if (tmp == REF_CONTIG_RIGHT) {
1176			if (ret == REF_CONTIG_RIGHT)
1177				ret = REF_CONTIG_LEFTRIGHT;
1178			else
1179				ret = REF_CONTIG_LEFT;
1180		}
1181	}
1182
1183	return ret;
1184}
1185
1186static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
1187					   int index)
1188{
1189	BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
1190	       rb->rf_records.rl_recs[index+1].r_refcount);
1191
1192	le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
1193		     le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
1194
1195	if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
1196		memmove(&rb->rf_records.rl_recs[index + 1],
1197			&rb->rf_records.rl_recs[index + 2],
1198			sizeof(struct ocfs2_refcount_rec) *
1199			(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
1200
1201	memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
1202	       0, sizeof(struct ocfs2_refcount_rec));
1203	le16_add_cpu(&rb->rf_records.rl_used, -1);
1204}
1205
1206/*
1207 * Merge the refcount rec if we are contiguous with the adjacent recs.
1208 */
1209static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
1210				     int index)
1211{
1212	enum ocfs2_ref_rec_contig contig =
1213				ocfs2_refcount_rec_contig(rb, index);
1214
1215	if (contig == REF_CONTIG_NONE)
1216		return;
1217
1218	if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
1219		BUG_ON(index == 0);
1220		index--;
1221	}
1222
1223	ocfs2_rotate_refcount_rec_left(rb, index);
1224
1225	if (contig == REF_CONTIG_LEFTRIGHT)
1226		ocfs2_rotate_refcount_rec_left(rb, index);
1227}
1228
1229/*
1230 * Change the refcount indexed by "index" in ref_bh.
1231 * If refcount reaches 0, remove it.
1232 */
1233static int ocfs2_change_refcount_rec(handle_t *handle,
1234				     struct ocfs2_caching_info *ci,
1235				     struct buffer_head *ref_leaf_bh,
1236				     int index, int merge, int change)
1237{
1238	int ret;
1239	struct ocfs2_refcount_block *rb =
1240			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1241	struct ocfs2_refcount_list *rl = &rb->rf_records;
1242	struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
1243
1244	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1245				      OCFS2_JOURNAL_ACCESS_WRITE);
1246	if (ret) {
1247		mlog_errno(ret);
1248		goto out;
1249	}
1250
1251	trace_ocfs2_change_refcount_rec(
1252		(unsigned long long)ocfs2_metadata_cache_owner(ci),
1253		index, le32_to_cpu(rec->r_refcount), change);
1254	le32_add_cpu(&rec->r_refcount, change);
1255
1256	if (!rec->r_refcount) {
1257		if (index != le16_to_cpu(rl->rl_used) - 1) {
1258			memmove(rec, rec + 1,
1259				(le16_to_cpu(rl->rl_used) - index - 1) *
1260				sizeof(struct ocfs2_refcount_rec));
1261			memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
1262			       0, sizeof(struct ocfs2_refcount_rec));
1263		}
1264
1265		le16_add_cpu(&rl->rl_used, -1);
1266	} else if (merge)
1267		ocfs2_refcount_rec_merge(rb, index);
1268
1269	ocfs2_journal_dirty(handle, ref_leaf_bh);
1270out:
1271	return ret;
1272}
1273
1274static int ocfs2_expand_inline_ref_root(handle_t *handle,
1275					struct ocfs2_caching_info *ci,
1276					struct buffer_head *ref_root_bh,
1277					struct buffer_head **ref_leaf_bh,
1278					struct ocfs2_alloc_context *meta_ac)
1279{
1280	int ret;
1281	u16 suballoc_bit_start;
1282	u32 num_got;
1283	u64 suballoc_loc, blkno;
1284	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1285	struct buffer_head *new_bh = NULL;
1286	struct ocfs2_refcount_block *new_rb;
1287	struct ocfs2_refcount_block *root_rb =
1288			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1289
1290	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1291				      OCFS2_JOURNAL_ACCESS_WRITE);
1292	if (ret) {
1293		mlog_errno(ret);
1294		goto out;
1295	}
1296
1297	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1298				   &suballoc_bit_start, &num_got,
1299				   &blkno);
1300	if (ret) {
1301		mlog_errno(ret);
1302		goto out;
1303	}
1304
1305	new_bh = sb_getblk(sb, blkno);
1306	if (new_bh == NULL) {
1307		ret = -ENOMEM;
1308		mlog_errno(ret);
1309		goto out;
1310	}
1311	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1312
1313	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1314				      OCFS2_JOURNAL_ACCESS_CREATE);
1315	if (ret) {
1316		mlog_errno(ret);
1317		goto out;
1318	}
1319
1320	/*
1321	 * Initialize ocfs2_refcount_block.
1322	 * It should contain the same information as the old root.
1323	 * so just memcpy it and change the corresponding field.
1324	 */
1325	memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1326
1327	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1328	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1329	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1330	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1331	new_rb->rf_blkno = cpu_to_le64(blkno);
1332	new_rb->rf_cpos = cpu_to_le32(0);
1333	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1334	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1335	ocfs2_journal_dirty(handle, new_bh);
1336
1337	/* Now change the root. */
1338	memset(&root_rb->rf_list, 0, sb->s_blocksize -
1339	       offsetof(struct ocfs2_refcount_block, rf_list));
1340	root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
1341	root_rb->rf_clusters = cpu_to_le32(1);
1342	root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
1343	root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
1344	root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
1345	root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
1346
1347	ocfs2_journal_dirty(handle, ref_root_bh);
1348
1349	trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
1350		le16_to_cpu(new_rb->rf_records.rl_used));
1351
1352	*ref_leaf_bh = new_bh;
1353	new_bh = NULL;
1354out:
1355	brelse(new_bh);
1356	return ret;
1357}
1358
1359static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
1360					   struct ocfs2_refcount_rec *next)
1361{
1362	if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
1363		ocfs2_get_ref_rec_low_cpos(next))
1364		return 1;
1365
1366	return 0;
1367}
1368
1369static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
1370{
1371	const struct ocfs2_refcount_rec *l = a, *r = b;
1372	u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
1373	u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
1374
1375	if (l_cpos > r_cpos)
1376		return 1;
1377	if (l_cpos < r_cpos)
1378		return -1;
1379	return 0;
1380}
1381
1382static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
1383{
1384	const struct ocfs2_refcount_rec *l = a, *r = b;
1385	u64 l_cpos = le64_to_cpu(l->r_cpos);
1386	u64 r_cpos = le64_to_cpu(r->r_cpos);
1387
1388	if (l_cpos > r_cpos)
1389		return 1;
1390	if (l_cpos < r_cpos)
1391		return -1;
1392	return 0;
1393}
1394
1395static void swap_refcount_rec(void *a, void *b, int size)
1396{
1397	struct ocfs2_refcount_rec *l = a, *r = b;
1398
1399	swap(*l, *r);
1400}
1401
1402/*
1403 * The refcount cpos are ordered by their 64bit cpos,
1404 * But we will use the low 32 bit to be the e_cpos in the b-tree.
1405 * So we need to make sure that this pos isn't intersected with others.
1406 *
1407 * Note: The refcount block is already sorted by their low 32 bit cpos,
1408 *       So just try the middle pos first, and we will exit when we find
1409 *       the good position.
1410 */
1411static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
1412					 u32 *split_pos, int *split_index)
1413{
1414	int num_used = le16_to_cpu(rl->rl_used);
1415	int delta, middle = num_used / 2;
1416
1417	for (delta = 0; delta < middle; delta++) {
1418		/* Let's check delta earlier than middle */
1419		if (ocfs2_refcount_rec_no_intersect(
1420					&rl->rl_recs[middle - delta - 1],
1421					&rl->rl_recs[middle - delta])) {
1422			*split_index = middle - delta;
1423			break;
1424		}
1425
1426		/* For even counts, don't walk off the end */
1427		if ((middle + delta + 1) == num_used)
1428			continue;
1429
1430		/* Now try delta past middle */
1431		if (ocfs2_refcount_rec_no_intersect(
1432					&rl->rl_recs[middle + delta],
1433					&rl->rl_recs[middle + delta + 1])) {
1434			*split_index = middle + delta + 1;
1435			break;
1436		}
1437	}
1438
1439	if (delta >= middle)
1440		return -ENOSPC;
1441
1442	*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
1443	return 0;
1444}
1445
1446static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1447					    struct buffer_head *new_bh,
1448					    u32 *split_cpos)
1449{
1450	int split_index = 0, num_moved, ret;
1451	u32 cpos = 0;
1452	struct ocfs2_refcount_block *rb =
1453			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1454	struct ocfs2_refcount_list *rl = &rb->rf_records;
1455	struct ocfs2_refcount_block *new_rb =
1456			(struct ocfs2_refcount_block *)new_bh->b_data;
1457	struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1458
1459	trace_ocfs2_divide_leaf_refcount_block(
1460		(unsigned long long)ref_leaf_bh->b_blocknr,
1461		le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
1462
1463	/*
1464	 * XXX: Improvement later.
1465	 * If we know all the high 32 bit cpos is the same, no need to sort.
1466	 *
1467	 * In order to make the whole process safe, we do:
1468	 * 1. sort the entries by their low 32 bit cpos first so that we can
1469	 *    find the split cpos easily.
1470	 * 2. call ocfs2_insert_extent to insert the new refcount block.
1471	 * 3. move the refcount rec to the new block.
1472	 * 4. sort the entries by their 64 bit cpos.
1473	 * 5. dirty the new_rb and rb.
1474	 */
1475	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1476	     sizeof(struct ocfs2_refcount_rec),
1477	     cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
1478
1479	ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1480	if (ret) {
1481		mlog_errno(ret);
1482		return ret;
1483	}
1484
1485	new_rb->rf_cpos = cpu_to_le32(cpos);
1486
1487	/* move refcount records starting from split_index to the new block. */
1488	num_moved = le16_to_cpu(rl->rl_used) - split_index;
1489	memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
1490	       num_moved * sizeof(struct ocfs2_refcount_rec));
1491
1492	/*ok, remove the entries we just moved over to the other block. */
1493	memset(&rl->rl_recs[split_index], 0,
1494	       num_moved * sizeof(struct ocfs2_refcount_rec));
1495
1496	/* change old and new rl_used accordingly. */
1497	le16_add_cpu(&rl->rl_used, -num_moved);
1498	new_rl->rl_used = cpu_to_le16(num_moved);
1499
1500	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1501	     sizeof(struct ocfs2_refcount_rec),
1502	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1503
1504	sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
1505	     sizeof(struct ocfs2_refcount_rec),
1506	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1507
1508	*split_cpos = cpos;
1509	return 0;
1510}
1511
1512static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1513					 struct ocfs2_caching_info *ci,
1514					 struct buffer_head *ref_root_bh,
1515					 struct buffer_head *ref_leaf_bh,
1516					 struct ocfs2_alloc_context *meta_ac)
1517{
1518	int ret;
1519	u16 suballoc_bit_start;
1520	u32 num_got, new_cpos;
1521	u64 suballoc_loc, blkno;
1522	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1523	struct ocfs2_refcount_block *root_rb =
1524			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1525	struct buffer_head *new_bh = NULL;
1526	struct ocfs2_refcount_block *new_rb;
1527	struct ocfs2_extent_tree ref_et;
1528
1529	BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
1530
1531	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1532				      OCFS2_JOURNAL_ACCESS_WRITE);
1533	if (ret) {
1534		mlog_errno(ret);
1535		goto out;
1536	}
1537
1538	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1539				      OCFS2_JOURNAL_ACCESS_WRITE);
1540	if (ret) {
1541		mlog_errno(ret);
1542		goto out;
1543	}
1544
1545	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1546				   &suballoc_bit_start, &num_got,
1547				   &blkno);
1548	if (ret) {
1549		mlog_errno(ret);
1550		goto out;
1551	}
1552
1553	new_bh = sb_getblk(sb, blkno);
1554	if (new_bh == NULL) {
1555		ret = -ENOMEM;
1556		mlog_errno(ret);
1557		goto out;
1558	}
1559	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1560
1561	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1562				      OCFS2_JOURNAL_ACCESS_CREATE);
1563	if (ret) {
1564		mlog_errno(ret);
1565		goto out;
1566	}
1567
1568	/* Initialize ocfs2_refcount_block. */
1569	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1570	memset(new_rb, 0, sb->s_blocksize);
1571	strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1572	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1573	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1574	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1575	new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1576	new_rb->rf_blkno = cpu_to_le64(blkno);
1577	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1578	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1579	new_rb->rf_records.rl_count =
1580				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
1581	new_rb->rf_generation = root_rb->rf_generation;
1582
1583	ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
1584	if (ret) {
1585		mlog_errno(ret);
1586		goto out;
1587	}
1588
1589	ocfs2_journal_dirty(handle, ref_leaf_bh);
1590	ocfs2_journal_dirty(handle, new_bh);
1591
1592	ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1593
1594	trace_ocfs2_new_leaf_refcount_block(
1595			(unsigned long long)new_bh->b_blocknr, new_cpos);
1596
1597	/* Insert the new leaf block with the specific offset cpos. */
1598	ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1599				  1, 0, meta_ac);
1600	if (ret)
1601		mlog_errno(ret);
1602
1603out:
1604	brelse(new_bh);
1605	return ret;
1606}
1607
1608static int ocfs2_expand_refcount_tree(handle_t *handle,
1609				      struct ocfs2_caching_info *ci,
1610				      struct buffer_head *ref_root_bh,
1611				      struct buffer_head *ref_leaf_bh,
1612				      struct ocfs2_alloc_context *meta_ac)
1613{
1614	int ret;
1615	struct buffer_head *expand_bh = NULL;
1616
1617	if (ref_root_bh == ref_leaf_bh) {
1618		/*
1619		 * the old root bh hasn't been expanded to a b-tree,
1620		 * so expand it first.
1621		 */
1622		ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
1623						   &expand_bh, meta_ac);
1624		if (ret) {
1625			mlog_errno(ret);
1626			goto out;
1627		}
1628	} else {
1629		expand_bh = ref_leaf_bh;
1630		get_bh(expand_bh);
1631	}
1632
1633
1634	/* Now add a new refcount block into the tree.*/
1635	ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
1636					    expand_bh, meta_ac);
1637	if (ret)
1638		mlog_errno(ret);
1639out:
1640	brelse(expand_bh);
1641	return ret;
1642}
1643
1644/*
1645 * Adjust the extent rec in b-tree representing ref_leaf_bh.
1646 *
1647 * Only called when we have inserted a new refcount rec at index 0
1648 * which means ocfs2_extent_rec.e_cpos may need some change.
1649 */
1650static int ocfs2_adjust_refcount_rec(handle_t *handle,
1651				     struct ocfs2_caching_info *ci,
1652				     struct buffer_head *ref_root_bh,
1653				     struct buffer_head *ref_leaf_bh,
1654				     struct ocfs2_refcount_rec *rec)
1655{
1656	int ret = 0, i;
1657	u32 new_cpos, old_cpos;
1658	struct ocfs2_path *path = NULL;
1659	struct ocfs2_extent_tree et;
1660	struct ocfs2_refcount_block *rb =
1661		(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1662	struct ocfs2_extent_list *el;
1663
1664	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
1665		goto out;
1666
1667	rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1668	old_cpos = le32_to_cpu(rb->rf_cpos);
1669	new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
1670	if (old_cpos <= new_cpos)
1671		goto out;
1672
1673	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1674
1675	path = ocfs2_new_path_from_et(&et);
1676	if (!path) {
1677		ret = -ENOMEM;
1678		mlog_errno(ret);
1679		goto out;
1680	}
1681
1682	ret = ocfs2_find_path(ci, path, old_cpos);
1683	if (ret) {
1684		mlog_errno(ret);
1685		goto out;
1686	}
1687
1688	/*
1689	 * 2 more credits, one for the leaf refcount block, one for
1690	 * the extent block contains the extent rec.
1691	 */
1692	ret = ocfs2_extend_trans(handle, 2);
1693	if (ret < 0) {
1694		mlog_errno(ret);
1695		goto out;
1696	}
1697
1698	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1699				      OCFS2_JOURNAL_ACCESS_WRITE);
1700	if (ret < 0) {
1701		mlog_errno(ret);
1702		goto out;
1703	}
1704
1705	ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
1706				      OCFS2_JOURNAL_ACCESS_WRITE);
1707	if (ret < 0) {
1708		mlog_errno(ret);
1709		goto out;
1710	}
1711
1712	/* change the leaf extent block first. */
1713	el = path_leaf_el(path);
1714
1715	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
1716		if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
1717			break;
1718
1719	BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
1720
1721	el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
1722
1723	/* change the r_cpos in the leaf block. */
1724	rb->rf_cpos = cpu_to_le32(new_cpos);
1725
1726	ocfs2_journal_dirty(handle, path_leaf_bh(path));
1727	ocfs2_journal_dirty(handle, ref_leaf_bh);
1728
1729out:
1730	ocfs2_free_path(path);
1731	return ret;
1732}
1733
1734static int ocfs2_insert_refcount_rec(handle_t *handle,
1735				     struct ocfs2_caching_info *ci,
1736				     struct buffer_head *ref_root_bh,
1737				     struct buffer_head *ref_leaf_bh,
1738				     struct ocfs2_refcount_rec *rec,
1739				     int index, int merge,
1740				     struct ocfs2_alloc_context *meta_ac)
1741{
1742	int ret;
1743	struct ocfs2_refcount_block *rb =
1744			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1745	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1746	struct buffer_head *new_bh = NULL;
1747
1748	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1749
1750	if (rf_list->rl_used == rf_list->rl_count) {
1751		u64 cpos = le64_to_cpu(rec->r_cpos);
1752		u32 len = le32_to_cpu(rec->r_clusters);
1753
1754		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1755						 ref_leaf_bh, meta_ac);
1756		if (ret) {
1757			mlog_errno(ret);
1758			goto out;
1759		}
1760
1761		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1762					     cpos, len, NULL, &index,
1763					     &new_bh);
1764		if (ret) {
1765			mlog_errno(ret);
1766			goto out;
1767		}
1768
1769		ref_leaf_bh = new_bh;
1770		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1771		rf_list = &rb->rf_records;
1772	}
1773
1774	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1775				      OCFS2_JOURNAL_ACCESS_WRITE);
1776	if (ret) {
1777		mlog_errno(ret);
1778		goto out;
1779	}
1780
1781	if (index < le16_to_cpu(rf_list->rl_used))
1782		memmove(&rf_list->rl_recs[index + 1],
1783			&rf_list->rl_recs[index],
1784			(le16_to_cpu(rf_list->rl_used) - index) *
1785			 sizeof(struct ocfs2_refcount_rec));
1786
1787	trace_ocfs2_insert_refcount_rec(
1788		(unsigned long long)ref_leaf_bh->b_blocknr, index,
1789		(unsigned long long)le64_to_cpu(rec->r_cpos),
1790		le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
1791
1792	rf_list->rl_recs[index] = *rec;
1793
1794	le16_add_cpu(&rf_list->rl_used, 1);
1795
1796	if (merge)
1797		ocfs2_refcount_rec_merge(rb, index);
1798
1799	ocfs2_journal_dirty(handle, ref_leaf_bh);
1800
1801	if (index == 0) {
1802		ret = ocfs2_adjust_refcount_rec(handle, ci,
1803						ref_root_bh,
1804						ref_leaf_bh, rec);
1805		if (ret)
1806			mlog_errno(ret);
1807	}
1808out:
1809	brelse(new_bh);
1810	return ret;
1811}
1812
1813/*
1814 * Split the refcount_rec indexed by "index" in ref_leaf_bh.
1815 * This is much simple than our b-tree code.
1816 * split_rec is the new refcount rec we want to insert.
1817 * If split_rec->r_refcount > 0, we are changing the refcount(in case we
1818 * increase refcount or decrease a refcount to non-zero).
1819 * If split_rec->r_refcount == 0, we are punching a hole in current refcount
1820 * rec( in case we decrease a refcount to zero).
1821 */
1822static int ocfs2_split_refcount_rec(handle_t *handle,
1823				    struct ocfs2_caching_info *ci,
1824				    struct buffer_head *ref_root_bh,
1825				    struct buffer_head *ref_leaf_bh,
1826				    struct ocfs2_refcount_rec *split_rec,
1827				    int index, int merge,
1828				    struct ocfs2_alloc_context *meta_ac,
1829				    struct ocfs2_cached_dealloc_ctxt *dealloc)
1830{
1831	int ret, recs_need;
1832	u32 len;
1833	struct ocfs2_refcount_block *rb =
1834			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1835	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1836	struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
1837	struct ocfs2_refcount_rec *tail_rec = NULL;
1838	struct buffer_head *new_bh = NULL;
1839
1840	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1841
1842	trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
1843		le32_to_cpu(orig_rec->r_clusters),
1844		le32_to_cpu(orig_rec->r_refcount),
1845		le64_to_cpu(split_rec->r_cpos),
1846		le32_to_cpu(split_rec->r_clusters),
1847		le32_to_cpu(split_rec->r_refcount));
1848
1849	/*
1850	 * If we just need to split the header or tail clusters,
1851	 * no more recs are needed, just split is OK.
1852	 * Otherwise we at least need one new recs.
1853	 */
1854	if (!split_rec->r_refcount &&
1855	    (split_rec->r_cpos == orig_rec->r_cpos ||
1856	     le64_to_cpu(split_rec->r_cpos) +
1857	     le32_to_cpu(split_rec->r_clusters) ==
1858	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1859		recs_need = 0;
1860	else
1861		recs_need = 1;
1862
1863	/*
1864	 * We need one more rec if we split in the middle and the new rec have
1865	 * some refcount in it.
1866	 */
1867	if (split_rec->r_refcount &&
1868	    (split_rec->r_cpos != orig_rec->r_cpos &&
1869	     le64_to_cpu(split_rec->r_cpos) +
1870	     le32_to_cpu(split_rec->r_clusters) !=
1871	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1872		recs_need++;
1873
1874	/* If the leaf block don't have enough record, expand it. */
1875	if (le16_to_cpu(rf_list->rl_used) + recs_need >
1876					 le16_to_cpu(rf_list->rl_count)) {
1877		struct ocfs2_refcount_rec tmp_rec;
1878		u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1879		len = le32_to_cpu(orig_rec->r_clusters);
1880		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1881						 ref_leaf_bh, meta_ac);
1882		if (ret) {
1883			mlog_errno(ret);
1884			goto out;
1885		}
1886
1887		/*
1888		 * We have to re-get it since now cpos may be moved to
1889		 * another leaf block.
1890		 */
1891		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1892					     cpos, len, &tmp_rec, &index,
1893					     &new_bh);
1894		if (ret) {
1895			mlog_errno(ret);
1896			goto out;
1897		}
1898
1899		ref_leaf_bh = new_bh;
1900		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1901		rf_list = &rb->rf_records;
1902		orig_rec = &rf_list->rl_recs[index];
1903	}
1904
1905	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1906				      OCFS2_JOURNAL_ACCESS_WRITE);
1907	if (ret) {
1908		mlog_errno(ret);
1909		goto out;
1910	}
1911
1912	/*
1913	 * We have calculated out how many new records we need and store
1914	 * in recs_need, so spare enough space first by moving the records
1915	 * after "index" to the end.
1916	 */
1917	if (index != le16_to_cpu(rf_list->rl_used) - 1)
1918		memmove(&rf_list->rl_recs[index + 1 + recs_need],
1919			&rf_list->rl_recs[index + 1],
1920			(le16_to_cpu(rf_list->rl_used) - index - 1) *
1921			 sizeof(struct ocfs2_refcount_rec));
1922
1923	len = (le64_to_cpu(orig_rec->r_cpos) +
1924	      le32_to_cpu(orig_rec->r_clusters)) -
1925	      (le64_to_cpu(split_rec->r_cpos) +
1926	      le32_to_cpu(split_rec->r_clusters));
1927
1928	/*
1929	 * If we have "len", the we will split in the tail and move it
1930	 * to the end of the space we have just spared.
1931	 */
1932	if (len) {
1933		tail_rec = &rf_list->rl_recs[index + recs_need];
1934
1935		memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
1936		le64_add_cpu(&tail_rec->r_cpos,
1937			     le32_to_cpu(tail_rec->r_clusters) - len);
1938		tail_rec->r_clusters = cpu_to_le32(len);
1939	}
1940
1941	/*
1942	 * If the split pos isn't the same as the original one, we need to
1943	 * split in the head.
1944	 *
1945	 * Note: We have the chance that split_rec.r_refcount = 0,
1946	 * recs_need = 0 and len > 0, which means we just cut the head from
1947	 * the orig_rec and in that case we have done some modification in
1948	 * orig_rec above, so the check for r_cpos is faked.
1949	 */
1950	if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
1951		len = le64_to_cpu(split_rec->r_cpos) -
1952		      le64_to_cpu(orig_rec->r_cpos);
1953		orig_rec->r_clusters = cpu_to_le32(len);
1954		index++;
1955	}
1956
1957	le16_add_cpu(&rf_list->rl_used, recs_need);
1958
1959	if (split_rec->r_refcount) {
1960		rf_list->rl_recs[index] = *split_rec;
1961		trace_ocfs2_split_refcount_rec_insert(
1962			(unsigned long long)ref_leaf_bh->b_blocknr, index,
1963			(unsigned long long)le64_to_cpu(split_rec->r_cpos),
1964			le32_to_cpu(split_rec->r_clusters),
1965			le32_to_cpu(split_rec->r_refcount));
1966
1967		if (merge)
1968			ocfs2_refcount_rec_merge(rb, index);
1969	}
1970
1971	ocfs2_journal_dirty(handle, ref_leaf_bh);
1972
1973out:
1974	brelse(new_bh);
1975	return ret;
1976}
1977
1978static int __ocfs2_increase_refcount(handle_t *handle,
1979				     struct ocfs2_caching_info *ci,
1980				     struct buffer_head *ref_root_bh,
1981				     u64 cpos, u32 len, int merge,
1982				     struct ocfs2_alloc_context *meta_ac,
1983				     struct ocfs2_cached_dealloc_ctxt *dealloc)
1984{
1985	int ret = 0, index;
1986	struct buffer_head *ref_leaf_bh = NULL;
1987	struct ocfs2_refcount_rec rec;
1988	unsigned int set_len = 0;
1989
1990	trace_ocfs2_increase_refcount_begin(
1991	     (unsigned long long)ocfs2_metadata_cache_owner(ci),
1992	     (unsigned long long)cpos, len);
1993
1994	while (len) {
1995		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1996					     cpos, len, &rec, &index,
1997					     &ref_leaf_bh);
1998		if (ret) {
1999			mlog_errno(ret);
2000			goto out;
2001		}
2002
2003		set_len = le32_to_cpu(rec.r_clusters);
2004
2005		/*
2006		 * Here we may meet with 3 situations:
2007		 *
2008		 * 1. If we find an already existing record, and the length
2009		 *    is the same, cool, we just need to increase the r_refcount
2010		 *    and it is OK.
2011		 * 2. If we find a hole, just insert it with r_refcount = 1.
2012		 * 3. If we are in the middle of one extent record, split
2013		 *    it.
2014		 */
2015		if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
2016		    set_len <= len) {
2017			trace_ocfs2_increase_refcount_change(
2018				(unsigned long long)cpos, set_len,
2019				le32_to_cpu(rec.r_refcount));
2020			ret = ocfs2_change_refcount_rec(handle, ci,
2021							ref_leaf_bh, index,
2022							merge, 1);
2023			if (ret) {
2024				mlog_errno(ret);
2025				goto out;
2026			}
2027		} else if (!rec.r_refcount) {
2028			rec.r_refcount = cpu_to_le32(1);
2029
2030			trace_ocfs2_increase_refcount_insert(
2031			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2032			     set_len);
2033			ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
2034							ref_leaf_bh,
2035							&rec, index,
2036							merge, meta_ac);
2037			if (ret) {
2038				mlog_errno(ret);
2039				goto out;
2040			}
2041		} else  {
2042			set_len = min((u64)(cpos + len),
2043				      le64_to_cpu(rec.r_cpos) + set_len) - cpos;
2044			rec.r_cpos = cpu_to_le64(cpos);
2045			rec.r_clusters = cpu_to_le32(set_len);
2046			le32_add_cpu(&rec.r_refcount, 1);
2047
2048			trace_ocfs2_increase_refcount_split(
2049			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2050			     set_len, le32_to_cpu(rec.r_refcount));
2051			ret = ocfs2_split_refcount_rec(handle, ci,
2052						       ref_root_bh, ref_leaf_bh,
2053						       &rec, index, merge,
2054						       meta_ac, dealloc);
2055			if (ret) {
2056				mlog_errno(ret);
2057				goto out;
2058			}
2059		}
2060
2061		cpos += set_len;
2062		len -= set_len;
2063		brelse(ref_leaf_bh);
2064		ref_leaf_bh = NULL;
2065	}
2066
2067out:
2068	brelse(ref_leaf_bh);
2069	return ret;
2070}
2071
2072static int ocfs2_remove_refcount_extent(handle_t *handle,
2073				struct ocfs2_caching_info *ci,
2074				struct buffer_head *ref_root_bh,
2075				struct buffer_head *ref_leaf_bh,
2076				struct ocfs2_alloc_context *meta_ac,
2077				struct ocfs2_cached_dealloc_ctxt *dealloc)
2078{
2079	int ret;
2080	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2081	struct ocfs2_refcount_block *rb =
2082			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2083	struct ocfs2_extent_tree et;
2084
2085	BUG_ON(rb->rf_records.rl_used);
2086
2087	trace_ocfs2_remove_refcount_extent(
2088		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2089		(unsigned long long)ref_leaf_bh->b_blocknr,
2090		le32_to_cpu(rb->rf_cpos));
2091
2092	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2093	ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
2094				  1, meta_ac, dealloc);
2095	if (ret) {
2096		mlog_errno(ret);
2097		goto out;
2098	}
2099
2100	ocfs2_remove_from_cache(ci, ref_leaf_bh);
2101
2102	/*
2103	 * add the freed block to the dealloc so that it will be freed
2104	 * when we run dealloc.
2105	 */
2106	ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
2107					le16_to_cpu(rb->rf_suballoc_slot),
2108					le64_to_cpu(rb->rf_suballoc_loc),
2109					le64_to_cpu(rb->rf_blkno),
2110					le16_to_cpu(rb->rf_suballoc_bit));
2111	if (ret) {
2112		mlog_errno(ret);
2113		goto out;
2114	}
2115
2116	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
2117				      OCFS2_JOURNAL_ACCESS_WRITE);
2118	if (ret) {
2119		mlog_errno(ret);
2120		goto out;
2121	}
2122
2123	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2124
2125	le32_add_cpu(&rb->rf_clusters, -1);
2126
2127	/*
2128	 * check whether we need to restore the root refcount block if
2129	 * there is no leaf extent block at atll.
2130	 */
2131	if (!rb->rf_list.l_next_free_rec) {
2132		BUG_ON(rb->rf_clusters);
2133
2134		trace_ocfs2_restore_refcount_block(
2135		     (unsigned long long)ref_root_bh->b_blocknr);
2136
2137		rb->rf_flags = 0;
2138		rb->rf_parent = 0;
2139		rb->rf_cpos = 0;
2140		memset(&rb->rf_records, 0, sb->s_blocksize -
2141		       offsetof(struct ocfs2_refcount_block, rf_records));
2142		rb->rf_records.rl_count =
2143				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
2144	}
2145
2146	ocfs2_journal_dirty(handle, ref_root_bh);
2147
2148out:
2149	return ret;
2150}
2151
2152int ocfs2_increase_refcount(handle_t *handle,
2153			    struct ocfs2_caching_info *ci,
2154			    struct buffer_head *ref_root_bh,
2155			    u64 cpos, u32 len,
2156			    struct ocfs2_alloc_context *meta_ac,
2157			    struct ocfs2_cached_dealloc_ctxt *dealloc)
2158{
2159	return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
2160					 cpos, len, 1,
2161					 meta_ac, dealloc);
2162}
2163
2164static int ocfs2_decrease_refcount_rec(handle_t *handle,
2165				struct ocfs2_caching_info *ci,
2166				struct buffer_head *ref_root_bh,
2167				struct buffer_head *ref_leaf_bh,
2168				int index, u64 cpos, unsigned int len,
2169				struct ocfs2_alloc_context *meta_ac,
2170				struct ocfs2_cached_dealloc_ctxt *dealloc)
2171{
2172	int ret;
2173	struct ocfs2_refcount_block *rb =
2174			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2175	struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
2176
2177	BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2178	BUG_ON(cpos + len >
2179	       le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2180
2181	trace_ocfs2_decrease_refcount_rec(
2182		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2183		(unsigned long long)cpos, len);
2184
2185	if (cpos == le64_to_cpu(rec->r_cpos) &&
2186	    len == le32_to_cpu(rec->r_clusters))
2187		ret = ocfs2_change_refcount_rec(handle, ci,
2188						ref_leaf_bh, index, 1, -1);
2189	else {
2190		struct ocfs2_refcount_rec split = *rec;
2191		split.r_cpos = cpu_to_le64(cpos);
2192		split.r_clusters = cpu_to_le32(len);
2193
2194		le32_add_cpu(&split.r_refcount, -1);
2195
2196		ret = ocfs2_split_refcount_rec(handle, ci,
2197					       ref_root_bh, ref_leaf_bh,
2198					       &split, index, 1,
2199					       meta_ac, dealloc);
2200	}
2201
2202	if (ret) {
2203		mlog_errno(ret);
2204		goto out;
2205	}
2206
2207	/* Remove the leaf refcount block if it contains no refcount record. */
2208	if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
2209		ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
2210						   ref_leaf_bh, meta_ac,
2211						   dealloc);
2212		if (ret)
2213			mlog_errno(ret);
2214	}
2215
2216out:
2217	return ret;
2218}
2219
2220static int __ocfs2_decrease_refcount(handle_t *handle,
2221				     struct ocfs2_caching_info *ci,
2222				     struct buffer_head *ref_root_bh,
2223				     u64 cpos, u32 len,
2224				     struct ocfs2_alloc_context *meta_ac,
2225				     struct ocfs2_cached_dealloc_ctxt *dealloc,
2226				     int delete)
2227{
2228	int ret = 0, index = 0;
2229	struct ocfs2_refcount_rec rec;
2230	unsigned int r_count = 0, r_len;
2231	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2232	struct buffer_head *ref_leaf_bh = NULL;
2233
2234	trace_ocfs2_decrease_refcount(
2235		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2236		(unsigned long long)cpos, len, delete);
2237
2238	while (len) {
2239		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2240					     cpos, len, &rec, &index,
2241					     &ref_leaf_bh);
2242		if (ret) {
2243			mlog_errno(ret);
2244			goto out;
2245		}
2246
2247		r_count = le32_to_cpu(rec.r_refcount);
2248		BUG_ON(r_count == 0);
2249		if (!delete)
2250			BUG_ON(r_count > 1);
2251
2252		r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2253			      le32_to_cpu(rec.r_clusters)) - cpos;
2254
2255		ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
2256						  ref_leaf_bh, index,
2257						  cpos, r_len,
2258						  meta_ac, dealloc);
2259		if (ret) {
2260			mlog_errno(ret);
2261			goto out;
2262		}
2263
2264		if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
2265			ret = ocfs2_cache_cluster_dealloc(dealloc,
2266					  ocfs2_clusters_to_blocks(sb, cpos),
2267							  r_len);
2268			if (ret) {
2269				mlog_errno(ret);
2270				goto out;
2271			}
2272		}
2273
2274		cpos += r_len;
2275		len -= r_len;
2276		brelse(ref_leaf_bh);
2277		ref_leaf_bh = NULL;
2278	}
2279
2280out:
2281	brelse(ref_leaf_bh);
2282	return ret;
2283}
2284
2285/* Caller must hold refcount tree lock. */
2286int ocfs2_decrease_refcount(struct inode *inode,
2287			    handle_t *handle, u32 cpos, u32 len,
2288			    struct ocfs2_alloc_context *meta_ac,
2289			    struct ocfs2_cached_dealloc_ctxt *dealloc,
2290			    int delete)
2291{
2292	int ret;
2293	u64 ref_blkno;
2294	struct buffer_head *ref_root_bh = NULL;
2295	struct ocfs2_refcount_tree *tree;
2296
2297	BUG_ON(!ocfs2_is_refcount_inode(inode));
2298
2299	ret = ocfs2_get_refcount_block(inode, &ref_blkno);
2300	if (ret) {
2301		mlog_errno(ret);
2302		goto out;
2303	}
2304
2305	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
2306	if (ret) {
2307		mlog_errno(ret);
2308		goto out;
2309	}
2310
2311	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
2312					&ref_root_bh);
2313	if (ret) {
2314		mlog_errno(ret);
2315		goto out;
2316	}
2317
2318	ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
2319					cpos, len, meta_ac, dealloc, delete);
2320	if (ret)
2321		mlog_errno(ret);
2322out:
2323	brelse(ref_root_bh);
2324	return ret;
2325}
2326
2327/*
2328 * Mark the already-existing extent at cpos as refcounted for len clusters.
2329 * This adds the refcount extent flag.
2330 *
2331 * If the existing extent is larger than the request, initiate a
2332 * split. An attempt will be made at merging with adjacent extents.
2333 *
2334 * The caller is responsible for passing down meta_ac if we'll need it.
2335 */
2336static int ocfs2_mark_extent_refcounted(struct inode *inode,
2337				struct ocfs2_extent_tree *et,
2338				handle_t *handle, u32 cpos,
2339				u32 len, u32 phys,
2340				struct ocfs2_alloc_context *meta_ac,
2341				struct ocfs2_cached_dealloc_ctxt *dealloc)
2342{
2343	int ret;
2344
2345	trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
2346					   cpos, len, phys);
2347
2348	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2349		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
2350				  inode->i_ino);
2351		goto out;
2352	}
2353
2354	ret = ocfs2_change_extent_flag(handle, et, cpos,
2355				       len, phys, meta_ac, dealloc,
2356				       OCFS2_EXT_REFCOUNTED, 0);
2357	if (ret)
2358		mlog_errno(ret);
2359
2360out:
2361	return ret;
2362}
2363
2364/*
2365 * Given some contiguous physical clusters, calculate what we need
2366 * for modifying their refcount.
2367 */
2368static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2369					    struct ocfs2_caching_info *ci,
2370					    struct buffer_head *ref_root_bh,
2371					    u64 start_cpos,
2372					    u32 clusters,
2373					    int *meta_add,
2374					    int *credits)
2375{
2376	int ret = 0, index, ref_blocks = 0, recs_add = 0;
2377	u64 cpos = start_cpos;
2378	struct ocfs2_refcount_block *rb;
2379	struct ocfs2_refcount_rec rec;
2380	struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2381	u32 len;
2382
2383	while (clusters) {
2384		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2385					     cpos, clusters, &rec,
2386					     &index, &ref_leaf_bh);
2387		if (ret) {
2388			mlog_errno(ret);
2389			goto out;
2390		}
2391
2392		if (ref_leaf_bh != prev_bh) {
2393			/*
2394			 * Now we encounter a new leaf block, so calculate
2395			 * whether we need to extend the old leaf.
2396			 */
2397			if (prev_bh) {
2398				rb = (struct ocfs2_refcount_block *)
2399							prev_bh->b_data;
2400
2401				if (le16_to_cpu(rb->rf_records.rl_used) +
2402				    recs_add >
2403				    le16_to_cpu(rb->rf_records.rl_count))
2404					ref_blocks++;
2405			}
2406
2407			recs_add = 0;
2408			*credits += 1;
2409			brelse(prev_bh);
2410			prev_bh = ref_leaf_bh;
2411			get_bh(prev_bh);
2412		}
2413
2414		trace_ocfs2_calc_refcount_meta_credits_iterate(
2415				recs_add, (unsigned long long)cpos, clusters,
2416				(unsigned long long)le64_to_cpu(rec.r_cpos),
2417				le32_to_cpu(rec.r_clusters),
2418				le32_to_cpu(rec.r_refcount), index);
2419
2420		len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2421			  le32_to_cpu(rec.r_clusters)) - cpos;
2422		/*
2423		 * We record all the records which will be inserted to the
2424		 * same refcount block, so that we can tell exactly whether
2425		 * we need a new refcount block or not.
2426		 *
2427		 * If we will insert a new one, this is easy and only happens
2428		 * during adding refcounted flag to the extent, so we don't
2429		 * have a chance of spliting. We just need one record.
2430		 *
2431		 * If the refcount rec already exists, that would be a little
2432		 * complicated. we may have to:
2433		 * 1) split at the beginning if the start pos isn't aligned.
2434		 *    we need 1 more record in this case.
2435		 * 2) split int the end if the end pos isn't aligned.
2436		 *    we need 1 more record in this case.
2437		 * 3) split in the middle because of file system fragmentation.
2438		 *    we need 2 more records in this case(we can't detect this
2439		 *    beforehand, so always think of the worst case).
2440		 */
2441		if (rec.r_refcount) {
2442			recs_add += 2;
2443			/* Check whether we need a split at the beginning. */
2444			if (cpos == start_cpos &&
2445			    cpos != le64_to_cpu(rec.r_cpos))
2446				recs_add++;
2447
2448			/* Check whether we need a split in the end. */
2449			if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2450			    le32_to_cpu(rec.r_clusters))
2451				recs_add++;
2452		} else
2453			recs_add++;
2454
2455		brelse(ref_leaf_bh);
2456		ref_leaf_bh = NULL;
2457		clusters -= len;
2458		cpos += len;
2459	}
2460
2461	if (prev_bh) {
2462		rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
2463
2464		if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
2465		    le16_to_cpu(rb->rf_records.rl_count))
2466			ref_blocks++;
2467
2468		*credits += 1;
2469	}
2470
2471	if (!ref_blocks)
2472		goto out;
2473
2474	*meta_add += ref_blocks;
2475	*credits += ref_blocks;
2476
2477	/*
2478	 * So we may need ref_blocks to insert into the tree.
2479	 * That also means we need to change the b-tree and add that number
2480	 * of records since we never merge them.
2481	 * We need one more block for expansion since the new created leaf
2482	 * block is also full and needs split.
2483	 */
2484	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2485	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
2486		struct ocfs2_extent_tree et;
2487
2488		ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2489		*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2490		*credits += ocfs2_calc_extend_credits(sb,
2491						      et.et_root_el);
2492	} else {
2493		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2494		*meta_add += 1;
2495	}
2496
2497out:
2498
2499	trace_ocfs2_calc_refcount_meta_credits(
2500		(unsigned long long)start_cpos, clusters,
2501		*meta_add, *credits);
2502	brelse(ref_leaf_bh);
2503	brelse(prev_bh);
2504	return ret;
2505}
2506
2507/*
2508 * For refcount tree, we will decrease some contiguous clusters
2509 * refcount count, so just go through it to see how many blocks
2510 * we gonna touch and whether we need to create new blocks.
2511 *
2512 * Normally the refcount blocks store these refcount should be
2513 * contiguous also, so that we can get the number easily.
2514 * We will at most add split 2 refcount records and 2 more
2515 * refcount blocks, so just check it in a rough way.
2516 *
2517 * Caller must hold refcount tree lock.
2518 */
2519int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2520					  u64 refcount_loc,
2521					  u64 phys_blkno,
2522					  u32 clusters,
2523					  int *credits,
2524					  int *ref_blocks)
2525{
2526	int ret;
2527	struct buffer_head *ref_root_bh = NULL;
2528	struct ocfs2_refcount_tree *tree;
2529	u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
2530
2531	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2532		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
2533				  inode->i_ino);
2534		goto out;
2535	}
2536
2537	BUG_ON(!ocfs2_is_refcount_inode(inode));
2538
2539	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
2540				      refcount_loc, &tree);
2541	if (ret) {
2542		mlog_errno(ret);
2543		goto out;
2544	}
2545
2546	ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
2547					&ref_root_bh);
2548	if (ret) {
2549		mlog_errno(ret);
2550		goto out;
2551	}
2552
2553	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
2554					       &tree->rf_ci,
2555					       ref_root_bh,
2556					       start_cpos, clusters,
2557					       ref_blocks, credits);
2558	if (ret) {
2559		mlog_errno(ret);
2560		goto out;
2561	}
2562
2563	trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
2564
2565out:
2566	brelse(ref_root_bh);
2567	return ret;
2568}
2569
2570#define	MAX_CONTIG_BYTES	1048576
2571
2572static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
2573{
2574	return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
2575}
2576
2577static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
2578{
2579	return ~(ocfs2_cow_contig_clusters(sb) - 1);
2580}
2581
2582/*
2583 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2584 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2585 * while still being less than or equal to it.
2586 *
2587 * The goal is to break the extent at a multiple of contig_clusters.
2588 */
2589static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
2590						 unsigned int start,
2591						 unsigned int cpos)
2592{
2593	BUG_ON(start > cpos);
2594
2595	return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2596}
2597
2598/*
2599 * Given a cluster count of len, pad it out so that it is a multiple
2600 * of contig_clusters.
2601 */
2602static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
2603						  unsigned int len)
2604{
2605	unsigned int padded =
2606		(len + (ocfs2_cow_contig_clusters(sb) - 1)) &
2607		ocfs2_cow_contig_mask(sb);
2608
2609	/* Did we wrap? */
2610	if (padded < len)
2611		padded = UINT_MAX;
2612
2613	return padded;
2614}
2615
2616/*
2617 * Calculate out the start and number of virtual clusters we need to to CoW.
2618 *
2619 * cpos is vitual start cluster position we want to do CoW in a
2620 * file and write_len is the cluster length.
2621 * max_cpos is the place where we want to stop CoW intentionally.
2622 *
2623 * Normal we will start CoW from the beginning of extent record cotaining cpos.
2624 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
2625 * get good I/O from the resulting extent tree.
2626 */
2627static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
2628					   struct ocfs2_extent_list *el,
2629					   u32 cpos,
2630					   u32 write_len,
2631					   u32 max_cpos,
2632					   u32 *cow_start,
2633					   u32 *cow_len)
2634{
2635	int ret = 0;
2636	int tree_height = le16_to_cpu(el->l_tree_depth), i;
2637	struct buffer_head *eb_bh = NULL;
2638	struct ocfs2_extent_block *eb = NULL;
2639	struct ocfs2_extent_rec *rec;
2640	unsigned int want_clusters, rec_end = 0;
2641	int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
2642	int leaf_clusters;
2643
2644	BUG_ON(cpos + write_len > max_cpos);
2645
2646	if (tree_height > 0) {
2647		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2648		if (ret) {
2649			mlog_errno(ret);
2650			goto out;
2651		}
2652
2653		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2654		el = &eb->h_list;
2655
2656		if (el->l_tree_depth) {
2657			ret = ocfs2_error(inode->i_sb,
2658					  "Inode %lu has non zero tree depth in leaf block %llu\n",
2659					  inode->i_ino,
2660					  (unsigned long long)eb_bh->b_blocknr);
2661			goto out;
2662		}
2663	}
2664
2665	*cow_len = 0;
2666	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2667		rec = &el->l_recs[i];
2668
2669		if (ocfs2_is_empty_extent(rec)) {
2670			mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
2671					"index %d\n", inode->i_ino, i);
2672			continue;
2673		}
2674
2675		if (le32_to_cpu(rec->e_cpos) +
2676		    le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2677			continue;
2678
2679		if (*cow_len == 0) {
2680			/*
2681			 * We should find a refcounted record in the
2682			 * first pass.
2683			 */
2684			BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
2685			*cow_start = le32_to_cpu(rec->e_cpos);
2686		}
2687
2688		/*
2689		 * If we encounter a hole, a non-refcounted record or
2690		 * pass the max_cpos, stop the search.
2691		 */
2692		if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
2693		    (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
2694		    (max_cpos <= le32_to_cpu(rec->e_cpos)))
2695			break;
2696
2697		leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
2698		rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
2699		if (rec_end > max_cpos) {
2700			rec_end = max_cpos;
2701			leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
2702		}
2703
2704		/*
2705		 * How many clusters do we actually need from
2706		 * this extent?  First we see how many we actually
2707		 * need to complete the write.  If that's smaller
2708		 * than contig_clusters, we try for contig_clusters.
2709		 */
2710		if (!*cow_len)
2711			want_clusters = write_len;
2712		else
2713			want_clusters = (cpos + write_len) -
2714				(*cow_start + *cow_len);
2715		if (want_clusters < contig_clusters)
2716			want_clusters = contig_clusters;
2717
2718		/*
2719		 * If the write does not cover the whole extent, we
2720		 * need to calculate how we're going to split the extent.
2721		 * We try to do it on contig_clusters boundaries.
2722		 *
2723		 * Any extent smaller than contig_clusters will be
2724		 * CoWed in its entirety.
2725		 */
2726		if (leaf_clusters <= contig_clusters)
2727			*cow_len += leaf_clusters;
2728		else if (*cow_len || (*cow_start == cpos)) {
2729			/*
2730			 * This extent needs to be CoW'd from its
2731			 * beginning, so all we have to do is compute
2732			 * how many clusters to grab.  We align
2733			 * want_clusters to the edge of contig_clusters
2734			 * to get better I/O.
2735			 */
2736			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2737							       want_clusters);
2738
2739			if (leaf_clusters < want_clusters)
2740				*cow_len += leaf_clusters;
2741			else
2742				*cow_len += want_clusters;
2743		} else if ((*cow_start + contig_clusters) >=
2744			   (cpos + write_len)) {
2745			/*
2746			 * Breaking off contig_clusters at the front
2747			 * of the extent will cover our write.  That's
2748			 * easy.
2749			 */
2750			*cow_len = contig_clusters;
2751		} else if ((rec_end - cpos) <= contig_clusters) {
2752			/*
2753			 * Breaking off contig_clusters at the tail of
2754			 * this extent will cover cpos.
2755			 */
2756			*cow_start = rec_end - contig_clusters;
2757			*cow_len = contig_clusters;
2758		} else if ((rec_end - cpos) <= want_clusters) {
2759			/*
2760			 * While we can't fit the entire write in this
2761			 * extent, we know that the write goes from cpos
2762			 * to the end of the extent.  Break that off.
2763			 * We try to break it at some multiple of
2764			 * contig_clusters from the front of the extent.
2765			 * Failing that (ie, cpos is within
2766			 * contig_clusters of the front), we'll CoW the
2767			 * entire extent.
2768			 */
2769			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2770							   *cow_start, cpos);
2771			*cow_len = rec_end - *cow_start;
2772		} else {
2773			/*
2774			 * Ok, the entire write lives in the middle of
2775			 * this extent.  Let's try to slice the extent up
2776			 * nicely.  Optimally, our CoW region starts at
2777			 * m*contig_clusters from the beginning of the
2778			 * extent and goes for n*contig_clusters,
2779			 * covering the entire write.
2780			 */
2781			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2782							   *cow_start, cpos);
2783
2784			want_clusters = (cpos + write_len) - *cow_start;
2785			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2786							       want_clusters);
2787			if (*cow_start + want_clusters <= rec_end)
2788				*cow_len = want_clusters;
2789			else
2790				*cow_len = rec_end - *cow_start;
2791		}
2792
2793		/* Have we covered our entire write yet? */
2794		if ((*cow_start + *cow_len) >= (cpos + write_len))
2795			break;
2796
2797		/*
2798		 * If we reach the end of the extent block and don't get enough
2799		 * clusters, continue with the next extent block if possible.
2800		 */
2801		if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
2802		    eb && eb->h_next_leaf_blk) {
2803			brelse(eb_bh);
2804			eb_bh = NULL;
2805
2806			ret = ocfs2_read_extent_block(INODE_CACHE(inode),
2807					       le64_to_cpu(eb->h_next_leaf_blk),
2808					       &eb_bh);
2809			if (ret) {
2810				mlog_errno(ret);
2811				goto out;
2812			}
2813
2814			eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2815			el = &eb->h_list;
2816			i = -1;
2817		}
2818	}
2819
2820out:
2821	brelse(eb_bh);
2822	return ret;
2823}
2824
2825/*
2826 * Prepare meta_ac, data_ac and calculate credits when we want to add some
2827 * num_clusters in data_tree "et" and change the refcount for the old
2828 * clusters(starting form p_cluster) in the refcount tree.
2829 *
2830 * Note:
2831 * 1. since we may split the old tree, so we at most will need num_clusters + 2
2832 *    more new leaf records.
2833 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
2834 *    just give data_ac = NULL.
2835 */
2836static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2837					u32 p_cluster, u32 num_clusters,
2838					struct ocfs2_extent_tree *et,
2839					struct ocfs2_caching_info *ref_ci,
2840					struct buffer_head *ref_root_bh,
2841					struct ocfs2_alloc_context **meta_ac,
2842					struct ocfs2_alloc_context **data_ac,
2843					int *credits)
2844{
2845	int ret = 0, meta_add = 0;
2846	int num_free_extents = ocfs2_num_free_extents(et);
2847
2848	if (num_free_extents < 0) {
2849		ret = num_free_extents;
2850		mlog_errno(ret);
2851		goto out;
2852	}
2853
2854	if (num_free_extents < num_clusters + 2)
2855		meta_add =
2856			ocfs2_extend_meta_needed(et->et_root_el);
2857
2858	*credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
2859
2860	ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2861					       p_cluster, num_clusters,
2862					       &meta_add, credits);
2863	if (ret) {
2864		mlog_errno(ret);
2865		goto out;
2866	}
2867
2868	trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
2869	ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2870						meta_ac);
2871	if (ret) {
2872		mlog_errno(ret);
2873		goto out;
2874	}
2875
2876	if (data_ac) {
2877		ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
2878					     data_ac);
2879		if (ret)
2880			mlog_errno(ret);
2881	}
2882
2883out:
2884	if (ret) {
2885		if (*meta_ac) {
2886			ocfs2_free_alloc_context(*meta_ac);
2887			*meta_ac = NULL;
2888		}
2889	}
2890
2891	return ret;
2892}
2893
2894static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2895{
2896	BUG_ON(buffer_dirty(bh));
2897
2898	clear_buffer_mapped(bh);
2899
2900	return 0;
2901}
2902
2903int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2904				     struct inode *inode,
2905				     u32 cpos, u32 old_cluster,
2906				     u32 new_cluster, u32 new_len)
2907{
2908	int ret = 0, partial;
2909	struct super_block *sb = inode->i_sb;
2910	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2911	struct page *page;
2912	pgoff_t page_index;
2913	unsigned int from, to;
2914	loff_t offset, end, map_end;
2915	struct address_space *mapping = inode->i_mapping;
2916
2917	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2918					       new_cluster, new_len);
2919
2920	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2921	end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
2922	/*
2923	 * We only duplicate pages until we reach the page contains i_size - 1.
2924	 * So trim 'end' to i_size.
2925	 */
2926	if (end > i_size_read(inode))
2927		end = i_size_read(inode);
2928
2929	while (offset < end) {
2930		page_index = offset >> PAGE_SHIFT;
2931		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
2932		if (map_end > end)
2933			map_end = end;
2934
2935		/* from, to is the offset within the page. */
2936		from = offset & (PAGE_SIZE - 1);
2937		to = PAGE_SIZE;
2938		if (map_end & (PAGE_SIZE - 1))
2939			to = map_end & (PAGE_SIZE - 1);
2940
2941retry:
2942		page = find_or_create_page(mapping, page_index, GFP_NOFS);
2943		if (!page) {
2944			ret = -ENOMEM;
2945			mlog_errno(ret);
2946			break;
2947		}
2948
2949		/*
2950		 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
2951		 * page, so write it back.
2952		 */
2953		if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
2954			if (PageDirty(page)) {
2955				/*
2956				 * write_on_page will unlock the page on return
2957				 */
2958				ret = write_one_page(page);
2959				goto retry;
2960			}
2961		}
2962
2963		if (!PageUptodate(page)) {
2964			ret = block_read_full_page(page, ocfs2_get_block);
2965			if (ret) {
2966				mlog_errno(ret);
2967				goto unlock;
2968			}
2969			lock_page(page);
2970		}
2971
2972		if (page_has_buffers(page)) {
2973			ret = walk_page_buffers(handle, page_buffers(page),
2974						from, to, &partial,
2975						ocfs2_clear_cow_buffer);
2976			if (ret) {
2977				mlog_errno(ret);
2978				goto unlock;
2979			}
2980		}
2981
2982		ocfs2_map_and_dirty_page(inode,
2983					 handle, from, to,
2984					 page, 0, &new_block);
2985		mark_page_accessed(page);
2986unlock:
2987		unlock_page(page);
2988		put_page(page);
2989		page = NULL;
2990		offset = map_end;
2991		if (ret)
2992			break;
2993	}
2994
2995	return ret;
2996}
2997
2998int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
2999				    struct inode *inode,
3000				    u32 cpos, u32 old_cluster,
3001				    u32 new_cluster, u32 new_len)
3002{
3003	int ret = 0;
3004	struct super_block *sb = inode->i_sb;
3005	struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3006	int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
3007	u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
3008	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
3009	struct ocfs2_super *osb = OCFS2_SB(sb);
3010	struct buffer_head *old_bh = NULL;
3011	struct buffer_head *new_bh = NULL;
3012
3013	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
3014					       new_cluster, new_len);
3015
3016	for (i = 0; i < blocks; i++, old_block++, new_block++) {
3017		new_bh = sb_getblk(osb->sb, new_block);
3018		if (new_bh == NULL) {
3019			ret = -ENOMEM;
3020			mlog_errno(ret);
3021			break;
3022		}
3023
3024		ocfs2_set_new_buffer_uptodate(ci, new_bh);
3025
3026		ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
3027		if (ret) {
3028			mlog_errno(ret);
3029			break;
3030		}
3031
3032		ret = ocfs2_journal_access(handle, ci, new_bh,
3033					   OCFS2_JOURNAL_ACCESS_CREATE);
3034		if (ret) {
3035			mlog_errno(ret);
3036			break;
3037		}
3038
3039		memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
3040		ocfs2_journal_dirty(handle, new_bh);
3041
3042		brelse(new_bh);
3043		brelse(old_bh);
3044		new_bh = NULL;
3045		old_bh = NULL;
3046	}
3047
3048	brelse(new_bh);
3049	brelse(old_bh);
3050	return ret;
3051}
3052
3053static int ocfs2_clear_ext_refcount(handle_t *handle,
3054				    struct ocfs2_extent_tree *et,
3055				    u32 cpos, u32 p_cluster, u32 len,
3056				    unsigned int ext_flags,
3057				    struct ocfs2_alloc_context *meta_ac,
3058				    struct ocfs2_cached_dealloc_ctxt *dealloc)
3059{
3060	int ret, index;
3061	struct ocfs2_extent_rec replace_rec;
3062	struct ocfs2_path *path = NULL;
3063	struct ocfs2_extent_list *el;
3064	struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
3065	u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
3066
3067	trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
3068				       cpos, len, p_cluster, ext_flags);
3069
3070	memset(&replace_rec, 0, sizeof(replace_rec));
3071	replace_rec.e_cpos = cpu_to_le32(cpos);
3072	replace_rec.e_leaf_clusters = cpu_to_le16(len);
3073	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
3074								   p_cluster));
3075	replace_rec.e_flags = ext_flags;
3076	replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
3077
3078	path = ocfs2_new_path_from_et(et);
3079	if (!path) {
3080		ret = -ENOMEM;
3081		mlog_errno(ret);
3082		goto out;
3083	}
3084
3085	ret = ocfs2_find_path(et->et_ci, path, cpos);
3086	if (ret) {
3087		mlog_errno(ret);
3088		goto out;
3089	}
3090
3091	el = path_leaf_el(path);
3092
3093	index = ocfs2_search_extent_list(el, cpos);
3094	if (index == -1) {
3095		ret = ocfs2_error(sb,
3096				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
3097				  (unsigned long long)ino, cpos);
3098		goto out;
3099	}
3100
3101	ret = ocfs2_split_extent(handle, et, path, index,
3102				 &replace_rec, meta_ac, dealloc);
3103	if (ret)
3104		mlog_errno(ret);
3105
3106out:
3107	ocfs2_free_path(path);
3108	return ret;
3109}
3110
3111static int ocfs2_replace_clusters(handle_t *handle,
3112				  struct ocfs2_cow_context *context,
3113				  u32 cpos, u32 old,
3114				  u32 new, u32 len,
3115				  unsigned int ext_flags)
3116{
3117	int ret;
3118	struct ocfs2_caching_info *ci = context->data_et.et_ci;
3119	u64 ino = ocfs2_metadata_cache_owner(ci);
3120
3121	trace_ocfs2_replace_clusters((unsigned long long)ino,
3122				     cpos, old, new, len, ext_flags);
3123
3124	/*If the old clusters is unwritten, no need to duplicate. */
3125	if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3126		ret = context->cow_duplicate_clusters(handle, context->inode,
3127						      cpos, old, new, len);
3128		if (ret) {
3129			mlog_errno(ret);
3130			goto out;
3131		}
3132	}
3133
3134	ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
3135				       cpos, new, len, ext_flags,
3136				       context->meta_ac, &context->dealloc);
3137	if (ret)
3138		mlog_errno(ret);
3139out:
3140	return ret;
3141}
3142
3143int ocfs2_cow_sync_writeback(struct super_block *sb,
3144			     struct inode *inode,
3145			     u32 cpos, u32 num_clusters)
3146{
3147	int ret = 0;
3148	loff_t offset, end, map_end;
3149	pgoff_t page_index;
3150	struct page *page;
3151
3152	if (ocfs2_should_order_data(inode))
3153		return 0;
3154
3155	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
3156	end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
3157
3158	ret = filemap_fdatawrite_range(inode->i_mapping,
3159				       offset, end - 1);
3160	if (ret < 0) {
3161		mlog_errno(ret);
3162		return ret;
3163	}
3164
3165	while (offset < end) {
3166		page_index = offset >> PAGE_SHIFT;
3167		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
3168		if (map_end > end)
3169			map_end = end;
3170
3171		page = find_or_create_page(inode->i_mapping,
3172					   page_index, GFP_NOFS);
3173		BUG_ON(!page);
3174
3175		wait_on_page_writeback(page);
3176		if (PageError(page)) {
3177			ret = -EIO;
3178			mlog_errno(ret);
3179		} else
3180			mark_page_accessed(page);
3181
3182		unlock_page(page);
3183		put_page(page);
3184		page = NULL;
3185		offset = map_end;
3186		if (ret)
3187			break;
3188	}
3189
3190	return ret;
3191}
3192
3193static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
3194				 u32 v_cluster, u32 *p_cluster,
3195				 u32 *num_clusters,
3196				 unsigned int *extent_flags)
3197{
3198	return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
3199				  num_clusters, extent_flags);
3200}
3201
3202static int ocfs2_make_clusters_writable(struct super_block *sb,
3203					struct ocfs2_cow_context *context,
3204					u32 cpos, u32 p_cluster,
3205					u32 num_clusters, unsigned int e_flags)
3206{
3207	int ret, delete, index, credits =  0;
3208	u32 new_bit, new_len, orig_num_clusters;
3209	unsigned int set_len;
3210	struct ocfs2_super *osb = OCFS2_SB(sb);
3211	handle_t *handle;
3212	struct buffer_head *ref_leaf_bh = NULL;
3213	struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
3214	struct ocfs2_refcount_rec rec;
3215
3216	trace_ocfs2_make_clusters_writable(cpos, p_cluster,
3217					   num_clusters, e_flags);
3218
3219	ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
3220					     &context->data_et,
3221					     ref_ci,
3222					     context->ref_root_bh,
3223					     &context->meta_ac,
3224					     &context->data_ac, &credits);
3225	if (ret) {
3226		mlog_errno(ret);
3227		return ret;
3228	}
3229
3230	if (context->post_refcount)
3231		credits += context->post_refcount->credits;
3232
3233	credits += context->extra_credits;
3234	handle = ocfs2_start_trans(osb, credits);
3235	if (IS_ERR(handle)) {
3236		ret = PTR_ERR(handle);
3237		mlog_errno(ret);
3238		goto out;
3239	}
3240
3241	orig_num_clusters = num_clusters;
3242
3243	while (num_clusters) {
3244		ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3245					     p_cluster, num_clusters,
3246					     &rec, &index, &ref_leaf_bh);
3247		if (ret) {
3248			mlog_errno(ret);
3249			goto out_commit;
3250		}
3251
3252		BUG_ON(!rec.r_refcount);
3253		set_len = min((u64)p_cluster + num_clusters,
3254			      le64_to_cpu(rec.r_cpos) +
3255			      le32_to_cpu(rec.r_clusters)) - p_cluster;
3256
3257		/*
3258		 * There are many different situation here.
3259		 * 1. If refcount == 1, remove the flag and don't COW.
3260		 * 2. If refcount > 1, allocate clusters.
3261		 *    Here we may not allocate r_len once at a time, so continue
3262		 *    until we reach num_clusters.
3263		 */
3264		if (le32_to_cpu(rec.r_refcount) == 1) {
3265			delete = 0;
3266			ret = ocfs2_clear_ext_refcount(handle,
3267						       &context->data_et,
3268						       cpos, p_cluster,
3269						       set_len, e_flags,
3270						       context->meta_ac,
3271						       &context->dealloc);
3272			if (ret) {
3273				mlog_errno(ret);
3274				goto out_commit;
3275			}
3276		} else {
3277			delete = 1;
3278
3279			ret = __ocfs2_claim_clusters(handle,
3280						     context->data_ac,
3281						     1, set_len,
3282						     &new_bit, &new_len);
3283			if (ret) {
3284				mlog_errno(ret);
3285				goto out_commit;
3286			}
3287
3288			ret = ocfs2_replace_clusters(handle, context,
3289						     cpos, p_cluster, new_bit,
3290						     new_len, e_flags);
3291			if (ret) {
3292				mlog_errno(ret);
3293				goto out_commit;
3294			}
3295			set_len = new_len;
3296		}
3297
3298		ret = __ocfs2_decrease_refcount(handle, ref_ci,
3299						context->ref_root_bh,
3300						p_cluster, set_len,
3301						context->meta_ac,
3302						&context->dealloc, delete);
3303		if (ret) {
3304			mlog_errno(ret);
3305			goto out_commit;
3306		}
3307
3308		cpos += set_len;
3309		p_cluster += set_len;
3310		num_clusters -= set_len;
3311		brelse(ref_leaf_bh);
3312		ref_leaf_bh = NULL;
3313	}
3314
3315	/* handle any post_cow action. */
3316	if (context->post_refcount && context->post_refcount->func) {
3317		ret = context->post_refcount->func(context->inode, handle,
3318						context->post_refcount->para);
3319		if (ret) {
3320			mlog_errno(ret);
3321			goto out_commit;
3322		}
3323	}
3324
3325	/*
3326	 * Here we should write the new page out first if we are
3327	 * in write-back mode.
3328	 */
3329	if (context->get_clusters == ocfs2_di_get_clusters) {
3330		ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
3331					       orig_num_clusters);
3332		if (ret)
3333			mlog_errno(ret);
3334	}
3335
3336out_commit:
3337	ocfs2_commit_trans(osb, handle);
3338
3339out:
3340	if (context->data_ac) {
3341		ocfs2_free_alloc_context(context->data_ac);
3342		context->data_ac = NULL;
3343	}
3344	if (context->meta_ac) {
3345		ocfs2_free_alloc_context(context->meta_ac);
3346		context->meta_ac = NULL;
3347	}
3348	brelse(ref_leaf_bh);
3349
3350	return ret;
3351}
3352
3353static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3354{
3355	int ret = 0;
3356	struct inode *inode = context->inode;
3357	u32 cow_start = context->cow_start, cow_len = context->cow_len;
3358	u32 p_cluster, num_clusters;
3359	unsigned int ext_flags;
3360	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3361
3362	if (!ocfs2_refcount_tree(osb)) {
3363		return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
3364				   inode->i_ino);
3365	}
3366
3367	ocfs2_init_dealloc_ctxt(&context->dealloc);
3368
3369	while (cow_len) {
3370		ret = context->get_clusters(context, cow_start, &p_cluster,
3371					    &num_clusters, &ext_flags);
3372		if (ret) {
3373			mlog_errno(ret);
3374			break;
3375		}
3376
3377		BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
3378
3379		if (cow_len < num_clusters)
3380			num_clusters = cow_len;
3381
3382		ret = ocfs2_make_clusters_writable(inode->i_sb, context,
3383						   cow_start, p_cluster,
3384						   num_clusters, ext_flags);
3385		if (ret) {
3386			mlog_errno(ret);
3387			break;
3388		}
3389
3390		cow_len -= num_clusters;
3391		cow_start += num_clusters;
3392	}
3393
3394	if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
3395		ocfs2_schedule_truncate_log_flush(osb, 1);
3396		ocfs2_run_deallocs(osb, &context->dealloc);
3397	}
3398
3399	return ret;
3400}
3401
3402/*
3403 * Starting at cpos, try to CoW write_len clusters.  Don't CoW
3404 * past max_cpos.  This will stop when it runs into a hole or an
3405 * unrefcounted extent.
3406 */
3407static int ocfs2_refcount_cow_hunk(struct inode *inode,
3408				   struct buffer_head *di_bh,
3409				   u32 cpos, u32 write_len, u32 max_cpos)
3410{
3411	int ret;
3412	u32 cow_start = 0, cow_len = 0;
3413	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3414	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3415	struct buffer_head *ref_root_bh = NULL;
3416	struct ocfs2_refcount_tree *ref_tree;
3417	struct ocfs2_cow_context *context = NULL;
3418
3419	BUG_ON(!ocfs2_is_refcount_inode(inode));
3420
3421	ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
3422					      cpos, write_len, max_cpos,
3423					      &cow_start, &cow_len);
3424	if (ret) {
3425		mlog_errno(ret);
3426		goto out;
3427	}
3428
3429	trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
3430				      cpos, write_len, max_cpos,
3431				      cow_start, cow_len);
3432
3433	BUG_ON(cow_len == 0);
3434
3435	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3436	if (!context) {
3437		ret = -ENOMEM;
3438		mlog_errno(ret);
3439		goto out;
3440	}
3441
3442	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3443				       1, &ref_tree, &ref_root_bh);
3444	if (ret) {
3445		mlog_errno(ret);
3446		goto out;
3447	}
3448
3449	context->inode = inode;
3450	context->cow_start = cow_start;
3451	context->cow_len = cow_len;
3452	context->ref_tree = ref_tree;
3453	context->ref_root_bh = ref_root_bh;
3454	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3455	context->get_clusters = ocfs2_di_get_clusters;
3456
3457	ocfs2_init_dinode_extent_tree(&context->data_et,
3458				      INODE_CACHE(inode), di_bh);
3459
3460	ret = ocfs2_replace_cow(context);
3461	if (ret)
3462		mlog_errno(ret);
3463
3464	/*
3465	 * truncate the extent map here since no matter whether we meet with
3466	 * any error during the action, we shouldn't trust cached extent map
3467	 * any more.
3468	 */
3469	ocfs2_extent_map_trunc(inode, cow_start);
3470
3471	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3472	brelse(ref_root_bh);
3473out:
3474	kfree(context);
3475	return ret;
3476}
3477
3478/*
3479 * CoW any and all clusters between cpos and cpos+write_len.
3480 * Don't CoW past max_cpos.  If this returns successfully, all
3481 * clusters between cpos and cpos+write_len are safe to modify.
3482 */
3483int ocfs2_refcount_cow(struct inode *inode,
3484		       struct buffer_head *di_bh,
3485		       u32 cpos, u32 write_len, u32 max_cpos)
3486{
3487	int ret = 0;
3488	u32 p_cluster, num_clusters;
3489	unsigned int ext_flags;
3490
3491	while (write_len) {
3492		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3493					 &num_clusters, &ext_flags);
3494		if (ret) {
3495			mlog_errno(ret);
3496			break;
3497		}
3498
3499		if (write_len < num_clusters)
3500			num_clusters = write_len;
3501
3502		if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3503			ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3504						      num_clusters, max_cpos);
3505			if (ret) {
3506				mlog_errno(ret);
3507				break;
3508			}
3509		}
3510
3511		write_len -= num_clusters;
3512		cpos += num_clusters;
3513	}
3514
3515	return ret;
3516}
3517
3518static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
3519					  u32 v_cluster, u32 *p_cluster,
3520					  u32 *num_clusters,
3521					  unsigned int *extent_flags)
3522{
3523	struct inode *inode = context->inode;
3524	struct ocfs2_xattr_value_root *xv = context->cow_object;
3525
3526	return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
3527					num_clusters, &xv->xr_list,
3528					extent_flags);
3529}
3530
3531/*
3532 * Given a xattr value root, calculate the most meta/credits we need for
3533 * refcount tree change if we truncate it to 0.
3534 */
3535int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
3536				       struct ocfs2_caching_info *ref_ci,
3537				       struct buffer_head *ref_root_bh,
3538				       struct ocfs2_xattr_value_root *xv,
3539				       int *meta_add, int *credits)
3540{
3541	int ret = 0, index, ref_blocks = 0;
3542	u32 p_cluster, num_clusters;
3543	u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
3544	struct ocfs2_refcount_block *rb;
3545	struct ocfs2_refcount_rec rec;
3546	struct buffer_head *ref_leaf_bh = NULL;
3547
3548	while (cpos < clusters) {
3549		ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
3550					       &num_clusters, &xv->xr_list,
3551					       NULL);
3552		if (ret) {
3553			mlog_errno(ret);
3554			goto out;
3555		}
3556
3557		cpos += num_clusters;
3558
3559		while (num_clusters) {
3560			ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
3561						     p_cluster, num_clusters,
3562						     &rec, &index,
3563						     &ref_leaf_bh);
3564			if (ret) {
3565				mlog_errno(ret);
3566				goto out;
3567			}
3568
3569			BUG_ON(!rec.r_refcount);
3570
3571			rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
3572
3573			/*
3574			 * We really don't know whether the other clusters is in
3575			 * this refcount block or not, so just take the worst
3576			 * case that all the clusters are in this block and each
3577			 * one will split a refcount rec, so totally we need
3578			 * clusters * 2 new refcount rec.
3579			 */
3580			if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
3581			    le16_to_cpu(rb->rf_records.rl_count))
3582				ref_blocks++;
3583
3584			*credits += 1;
3585			brelse(ref_leaf_bh);
3586			ref_leaf_bh = NULL;
3587
3588			if (num_clusters <= le32_to_cpu(rec.r_clusters))
3589				break;
3590			else
3591				num_clusters -= le32_to_cpu(rec.r_clusters);
3592			p_cluster += num_clusters;
3593		}
3594	}
3595
3596	*meta_add += ref_blocks;
3597	if (!ref_blocks)
3598		goto out;
3599
3600	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
3601	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
3602		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
3603	else {
3604		struct ocfs2_extent_tree et;
3605
3606		ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
3607		*credits += ocfs2_calc_extend_credits(inode->i_sb,
3608						      et.et_root_el);
3609	}
3610
3611out:
3612	brelse(ref_leaf_bh);
3613	return ret;
3614}
3615
3616/*
3617 * Do CoW for xattr.
3618 */
3619int ocfs2_refcount_cow_xattr(struct inode *inode,
3620			     struct ocfs2_dinode *di,
3621			     struct ocfs2_xattr_value_buf *vb,
3622			     struct ocfs2_refcount_tree *ref_tree,
3623			     struct buffer_head *ref_root_bh,
3624			     u32 cpos, u32 write_len,
3625			     struct ocfs2_post_refcount *post)
3626{
3627	int ret;
3628	struct ocfs2_xattr_value_root *xv = vb->vb_xv;
3629	struct ocfs2_cow_context *context = NULL;
3630	u32 cow_start, cow_len;
3631
3632	BUG_ON(!ocfs2_is_refcount_inode(inode));
3633
3634	ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
3635					      cpos, write_len, UINT_MAX,
3636					      &cow_start, &cow_len);
3637	if (ret) {
3638		mlog_errno(ret);
3639		goto out;
3640	}
3641
3642	BUG_ON(cow_len == 0);
3643
3644	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3645	if (!context) {
3646		ret = -ENOMEM;
3647		mlog_errno(ret);
3648		goto out;
3649	}
3650
3651	context->inode = inode;
3652	context->cow_start = cow_start;
3653	context->cow_len = cow_len;
3654	context->ref_tree = ref_tree;
3655	context->ref_root_bh = ref_root_bh;
3656	context->cow_object = xv;
3657
3658	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
3659	/* We need the extra credits for duplicate_clusters by jbd. */
3660	context->extra_credits =
3661		ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
3662	context->get_clusters = ocfs2_xattr_value_get_clusters;
3663	context->post_refcount = post;
3664
3665	ocfs2_init_xattr_value_extent_tree(&context->data_et,
3666					   INODE_CACHE(inode), vb);
3667
3668	ret = ocfs2_replace_cow(context);
3669	if (ret)
3670		mlog_errno(ret);
3671
3672out:
3673	kfree(context);
3674	return ret;
3675}
3676
3677/*
3678 * Insert a new extent into refcount tree and mark a extent rec
3679 * as refcounted in the dinode tree.
3680 */
3681int ocfs2_add_refcount_flag(struct inode *inode,
3682			    struct ocfs2_extent_tree *data_et,
3683			    struct ocfs2_caching_info *ref_ci,
3684			    struct buffer_head *ref_root_bh,
3685			    u32 cpos, u32 p_cluster, u32 num_clusters,
3686			    struct ocfs2_cached_dealloc_ctxt *dealloc,
3687			    struct ocfs2_post_refcount *post)
3688{
3689	int ret;
3690	handle_t *handle;
3691	int credits = 1, ref_blocks = 0;
3692	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3693	struct ocfs2_alloc_context *meta_ac = NULL;
3694
3695	/* We need to be able to handle at least an extent tree split. */
3696	ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el);
3697
3698	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
3699					       ref_ci, ref_root_bh,
3700					       p_cluster, num_clusters,
3701					       &ref_blocks, &credits);
3702	if (ret) {
3703		mlog_errno(ret);
3704		goto out;
3705	}
3706
3707	trace_ocfs2_add_refcount_flag(ref_blocks, credits);
3708
3709	if (ref_blocks) {
3710		ret = ocfs2_reserve_new_metadata_blocks(osb,
3711							ref_blocks, &meta_ac);
3712		if (ret) {
3713			mlog_errno(ret);
3714			goto out;
3715		}
3716	}
3717
3718	if (post)
3719		credits += post->credits;
3720
3721	handle = ocfs2_start_trans(osb, credits);
3722	if (IS_ERR(handle)) {
3723		ret = PTR_ERR(handle);
3724		mlog_errno(ret);
3725		goto out;
3726	}
3727
3728	ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
3729					   cpos, num_clusters, p_cluster,
3730					   meta_ac, dealloc);
3731	if (ret) {
3732		mlog_errno(ret);
3733		goto out_commit;
3734	}
3735
3736	ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3737					p_cluster, num_clusters, 0,
3738					meta_ac, dealloc);
3739	if (ret) {
3740		mlog_errno(ret);
3741		goto out_commit;
3742	}
3743
3744	if (post && post->func) {
3745		ret = post->func(inode, handle, post->para);
3746		if (ret)
3747			mlog_errno(ret);
3748	}
3749
3750out_commit:
3751	ocfs2_commit_trans(osb, handle);
3752out:
3753	if (meta_ac)
3754		ocfs2_free_alloc_context(meta_ac);
3755	return ret;
3756}
3757
3758static int ocfs2_change_ctime(struct inode *inode,
3759			      struct buffer_head *di_bh)
3760{
3761	int ret;
3762	handle_t *handle;
3763	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3764
3765	handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
3766				   OCFS2_INODE_UPDATE_CREDITS);
3767	if (IS_ERR(handle)) {
3768		ret = PTR_ERR(handle);
3769		mlog_errno(ret);
3770		goto out;
3771	}
3772
3773	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
3774				      OCFS2_JOURNAL_ACCESS_WRITE);
3775	if (ret) {
3776		mlog_errno(ret);
3777		goto out_commit;
3778	}
3779
3780	inode->i_ctime = current_time(inode);
3781	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3782	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3783
3784	ocfs2_journal_dirty(handle, di_bh);
3785
3786out_commit:
3787	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
3788out:
3789	return ret;
3790}
3791
3792static int ocfs2_attach_refcount_tree(struct inode *inode,
3793				      struct buffer_head *di_bh)
3794{
3795	int ret, data_changed = 0;
3796	struct buffer_head *ref_root_bh = NULL;
3797	struct ocfs2_inode_info *oi = OCFS2_I(inode);
3798	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3799	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3800	struct ocfs2_refcount_tree *ref_tree;
3801	unsigned int ext_flags;
3802	loff_t size;
3803	u32 cpos, num_clusters, clusters, p_cluster;
3804	struct ocfs2_cached_dealloc_ctxt dealloc;
3805	struct ocfs2_extent_tree di_et;
3806
3807	ocfs2_init_dealloc_ctxt(&dealloc);
3808
3809	if (!ocfs2_is_refcount_inode(inode)) {
3810		ret = ocfs2_create_refcount_tree(inode, di_bh);
3811		if (ret) {
3812			mlog_errno(ret);
3813			goto out;
3814		}
3815	}
3816
3817	BUG_ON(!di->i_refcount_loc);
3818	ret = ocfs2_lock_refcount_tree(osb,
3819				       le64_to_cpu(di->i_refcount_loc), 1,
3820				       &ref_tree, &ref_root_bh);
3821	if (ret) {
3822		mlog_errno(ret);
3823		goto out;
3824	}
3825
3826	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3827		goto attach_xattr;
3828
3829	ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3830
3831	size = i_size_read(inode);
3832	clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
3833
3834	cpos = 0;
3835	while (cpos < clusters) {
3836		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3837					 &num_clusters, &ext_flags);
3838		if (ret) {
3839			mlog_errno(ret);
3840			goto unlock;
3841		}
3842		if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
3843			ret = ocfs2_add_refcount_flag(inode, &di_et,
3844						      &ref_tree->rf_ci,
3845						      ref_root_bh, cpos,
3846						      p_cluster, num_clusters,
3847						      &dealloc, NULL);
3848			if (ret) {
3849				mlog_errno(ret);
3850				goto unlock;
3851			}
3852
3853			data_changed = 1;
3854		}
3855		cpos += num_clusters;
3856	}
3857
3858attach_xattr:
3859	if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3860		ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3861						       &ref_tree->rf_ci,
3862						       ref_root_bh,
3863						       &dealloc);
3864		if (ret) {
3865			mlog_errno(ret);
3866			goto unlock;
3867		}
3868	}
3869
3870	if (data_changed) {
3871		ret = ocfs2_change_ctime(inode, di_bh);
3872		if (ret)
3873			mlog_errno(ret);
3874	}
3875
3876unlock:
3877	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3878	brelse(ref_root_bh);
3879
3880	if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
3881		ocfs2_schedule_truncate_log_flush(osb, 1);
3882		ocfs2_run_deallocs(osb, &dealloc);
3883	}
3884out:
3885	/*
3886	 * Empty the extent map so that we may get the right extent
3887	 * record from the disk.
3888	 */
3889	ocfs2_extent_map_trunc(inode, 0);
3890
3891	return ret;
3892}
3893
3894static int ocfs2_add_refcounted_extent(struct inode *inode,
3895				   struct ocfs2_extent_tree *et,
3896				   struct ocfs2_caching_info *ref_ci,
3897				   struct buffer_head *ref_root_bh,
3898				   u32 cpos, u32 p_cluster, u32 num_clusters,
3899				   unsigned int ext_flags,
3900				   struct ocfs2_cached_dealloc_ctxt *dealloc)
3901{
3902	int ret;
3903	handle_t *handle;
3904	int credits = 0;
3905	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3906	struct ocfs2_alloc_context *meta_ac = NULL;
3907
3908	ret = ocfs2_lock_refcount_allocators(inode->i_sb,
3909					     p_cluster, num_clusters,
3910					     et, ref_ci,
3911					     ref_root_bh, &meta_ac,
3912					     NULL, &credits);
3913	if (ret) {
3914		mlog_errno(ret);
3915		goto out;
3916	}
3917
3918	handle = ocfs2_start_trans(osb, credits);
3919	if (IS_ERR(handle)) {
3920		ret = PTR_ERR(handle);
3921		mlog_errno(ret);
3922		goto out;
3923	}
3924
3925	ret = ocfs2_insert_extent(handle, et, cpos,
3926			ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
3927			num_clusters, ext_flags, meta_ac);
3928	if (ret) {
3929		mlog_errno(ret);
3930		goto out_commit;
3931	}
3932
3933	ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3934				      p_cluster, num_clusters,
3935				      meta_ac, dealloc);
3936	if (ret) {
3937		mlog_errno(ret);
3938		goto out_commit;
3939	}
3940
3941	ret = dquot_alloc_space_nodirty(inode,
3942		ocfs2_clusters_to_bytes(osb->sb, num_clusters));
3943	if (ret)
3944		mlog_errno(ret);
3945
3946out_commit:
3947	ocfs2_commit_trans(osb, handle);
3948out:
3949	if (meta_ac)
3950		ocfs2_free_alloc_context(meta_ac);
3951	return ret;
3952}
3953
3954static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3955				       struct buffer_head *s_bh,
3956				       struct inode *t_inode,
3957				       struct buffer_head *t_bh)
3958{
3959	int ret;
3960	handle_t *handle;
3961	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3962	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3963	struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3964
3965	BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3966
3967	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3968	if (IS_ERR(handle)) {
3969		ret = PTR_ERR(handle);
3970		mlog_errno(ret);
3971		goto out;
3972	}
3973
3974	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3975				      OCFS2_JOURNAL_ACCESS_WRITE);
3976	if (ret) {
3977		mlog_errno(ret);
3978		goto out_commit;
3979	}
3980
3981	t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3982	memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3983	       le16_to_cpu(s_di->id2.i_data.id_count));
3984	spin_lock(&OCFS2_I(t_inode)->ip_lock);
3985	OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3986	t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3987	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3988
3989	ocfs2_journal_dirty(handle, t_bh);
3990
3991out_commit:
3992	ocfs2_commit_trans(osb, handle);
3993out:
3994	return ret;
3995}
3996
3997static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3998				struct inode *t_inode,
3999				struct buffer_head *t_bh,
4000				struct ocfs2_caching_info *ref_ci,
4001				struct buffer_head *ref_root_bh,
4002				struct ocfs2_cached_dealloc_ctxt *dealloc)
4003{
4004	int ret = 0;
4005	u32 p_cluster, num_clusters, clusters, cpos;
4006	loff_t size;
4007	unsigned int ext_flags;
4008	struct ocfs2_extent_tree et;
4009
4010	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
4011
4012	size = i_size_read(s_inode);
4013	clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
4014
4015	cpos = 0;
4016	while (cpos < clusters) {
4017		ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
4018					 &num_clusters, &ext_flags);
4019		if (ret) {
4020			mlog_errno(ret);
4021			goto out;
4022		}
4023		if (p_cluster) {
4024			ret = ocfs2_add_refcounted_extent(t_inode, &et,
4025							  ref_ci, ref_root_bh,
4026							  cpos, p_cluster,
4027							  num_clusters,
4028							  ext_flags,
4029							  dealloc);
4030			if (ret) {
4031				mlog_errno(ret);
4032				goto out;
4033			}
4034		}
4035
4036		cpos += num_clusters;
4037	}
4038
4039out:
4040	return ret;
4041}
4042
4043/*
4044 * change the new file's attributes to the src.
4045 *
4046 * reflink creates a snapshot of a file, that means the attributes
4047 * must be identical except for three exceptions - nlink, ino, and ctime.
4048 */
4049static int ocfs2_complete_reflink(struct inode *s_inode,
4050				  struct buffer_head *s_bh,
4051				  struct inode *t_inode,
4052				  struct buffer_head *t_bh,
4053				  bool preserve)
4054{
4055	int ret;
4056	handle_t *handle;
4057	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
4058	struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
4059	loff_t size = i_size_read(s_inode);
4060
4061	handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
4062				   OCFS2_INODE_UPDATE_CREDITS);
4063	if (IS_ERR(handle)) {
4064		ret = PTR_ERR(handle);
4065		mlog_errno(ret);
4066		return ret;
4067	}
4068
4069	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
4070				      OCFS2_JOURNAL_ACCESS_WRITE);
4071	if (ret) {
4072		mlog_errno(ret);
4073		goto out_commit;
4074	}
4075
4076	spin_lock(&OCFS2_I(t_inode)->ip_lock);
4077	OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
4078	OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
4079	OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
4080	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
4081	i_size_write(t_inode, size);
4082	t_inode->i_blocks = s_inode->i_blocks;
4083
4084	di->i_xattr_inline_size = s_di->i_xattr_inline_size;
4085	di->i_clusters = s_di->i_clusters;
4086	di->i_size = s_di->i_size;
4087	di->i_dyn_features = s_di->i_dyn_features;
4088	di->i_attr = s_di->i_attr;
4089
4090	if (preserve) {
4091		t_inode->i_uid = s_inode->i_uid;
4092		t_inode->i_gid = s_inode->i_gid;
4093		t_inode->i_mode = s_inode->i_mode;
4094		di->i_uid = s_di->i_uid;
4095		di->i_gid = s_di->i_gid;
4096		di->i_mode = s_di->i_mode;
4097
4098		/*
4099		 * update time.
4100		 * we want mtime to appear identical to the source and
4101		 * update ctime.
4102		 */
4103		t_inode->i_ctime = current_time(t_inode);
4104
4105		di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
4106		di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
4107
4108		t_inode->i_mtime = s_inode->i_mtime;
4109		di->i_mtime = s_di->i_mtime;
4110		di->i_mtime_nsec = s_di->i_mtime_nsec;
4111	}
4112
4113	ocfs2_journal_dirty(handle, t_bh);
4114
4115out_commit:
4116	ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
4117	return ret;
4118}
4119
4120static int ocfs2_create_reflink_node(struct inode *s_inode,
4121				     struct buffer_head *s_bh,
4122				     struct inode *t_inode,
4123				     struct buffer_head *t_bh,
4124				     bool preserve)
4125{
4126	int ret;
4127	struct buffer_head *ref_root_bh = NULL;
4128	struct ocfs2_cached_dealloc_ctxt dealloc;
4129	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
 
4130	struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
4131	struct ocfs2_refcount_tree *ref_tree;
4132
4133	ocfs2_init_dealloc_ctxt(&dealloc);
4134
4135	ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4136				      le64_to_cpu(di->i_refcount_loc));
4137	if (ret) {
4138		mlog_errno(ret);
4139		goto out;
4140	}
4141
4142	if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4143		ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4144						  t_inode, t_bh);
4145		if (ret)
4146			mlog_errno(ret);
4147		goto out;
4148	}
4149
4150	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4151				       1, &ref_tree, &ref_root_bh);
4152	if (ret) {
4153		mlog_errno(ret);
4154		goto out;
4155	}
 
4156
4157	ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
4158					  &ref_tree->rf_ci, ref_root_bh,
4159					  &dealloc);
4160	if (ret) {
4161		mlog_errno(ret);
4162		goto out_unlock_refcount;
4163	}
4164
4165out_unlock_refcount:
4166	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4167	brelse(ref_root_bh);
4168out:
4169	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4170		ocfs2_schedule_truncate_log_flush(osb, 1);
4171		ocfs2_run_deallocs(osb, &dealloc);
4172	}
4173
4174	return ret;
4175}
4176
4177static int __ocfs2_reflink(struct dentry *old_dentry,
4178			   struct buffer_head *old_bh,
4179			   struct inode *new_inode,
4180			   bool preserve)
4181{
4182	int ret;
4183	struct inode *inode = d_inode(old_dentry);
4184	struct buffer_head *new_bh = NULL;
4185
4186	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
4187		ret = -EINVAL;
4188		mlog_errno(ret);
4189		goto out;
4190	}
4191
4192	ret = filemap_fdatawrite(inode->i_mapping);
4193	if (ret) {
4194		mlog_errno(ret);
4195		goto out;
4196	}
4197
4198	ret = ocfs2_attach_refcount_tree(inode, old_bh);
4199	if (ret) {
4200		mlog_errno(ret);
4201		goto out;
4202	}
4203
4204	inode_lock_nested(new_inode, I_MUTEX_CHILD);
4205	ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4206				      OI_LS_REFLINK_TARGET);
4207	if (ret) {
4208		mlog_errno(ret);
4209		goto out_unlock;
4210	}
4211
4212	ret = ocfs2_create_reflink_node(inode, old_bh,
4213					new_inode, new_bh, preserve);
4214	if (ret) {
4215		mlog_errno(ret);
4216		goto inode_unlock;
4217	}
4218
4219	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
4220		ret = ocfs2_reflink_xattrs(inode, old_bh,
4221					   new_inode, new_bh,
4222					   preserve);
4223		if (ret) {
4224			mlog_errno(ret);
4225			goto inode_unlock;
4226		}
4227	}
4228
4229	ret = ocfs2_complete_reflink(inode, old_bh,
4230				     new_inode, new_bh, preserve);
4231	if (ret)
4232		mlog_errno(ret);
4233
4234inode_unlock:
4235	ocfs2_inode_unlock(new_inode, 1);
4236	brelse(new_bh);
4237out_unlock:
4238	inode_unlock(new_inode);
4239out:
4240	if (!ret) {
4241		ret = filemap_fdatawait(inode->i_mapping);
4242		if (ret)
4243			mlog_errno(ret);
4244	}
4245	return ret;
4246}
4247
4248static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4249			 struct dentry *new_dentry, bool preserve)
4250{
4251	int error, had_lock;
4252	struct inode *inode = d_inode(old_dentry);
4253	struct buffer_head *old_bh = NULL;
4254	struct inode *new_orphan_inode = NULL;
4255	struct ocfs2_lock_holder oh;
4256
4257	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4258		return -EOPNOTSUPP;
4259
4260
4261	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
4262					     &new_orphan_inode);
4263	if (error) {
4264		mlog_errno(error);
4265		goto out;
4266	}
4267
4268	error = ocfs2_rw_lock(inode, 1);
4269	if (error) {
4270		mlog_errno(error);
4271		goto out;
4272	}
4273
4274	error = ocfs2_inode_lock(inode, &old_bh, 1);
4275	if (error) {
4276		mlog_errno(error);
4277		ocfs2_rw_unlock(inode, 1);
4278		goto out;
4279	}
4280
4281	down_write(&OCFS2_I(inode)->ip_xattr_sem);
4282	down_write(&OCFS2_I(inode)->ip_alloc_sem);
4283	error = __ocfs2_reflink(old_dentry, old_bh,
4284				new_orphan_inode, preserve);
4285	up_write(&OCFS2_I(inode)->ip_alloc_sem);
4286	up_write(&OCFS2_I(inode)->ip_xattr_sem);
4287
4288	ocfs2_inode_unlock(inode, 1);
4289	ocfs2_rw_unlock(inode, 1);
4290	brelse(old_bh);
4291
4292	if (error) {
4293		mlog_errno(error);
4294		goto out;
4295	}
4296
4297	had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
4298					    &oh);
4299	if (had_lock < 0) {
4300		error = had_lock;
4301		mlog_errno(error);
4302		goto out;
4303	}
4304
4305	/* If the security isn't preserved, we need to re-initialize them. */
4306	if (!preserve) {
4307		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
4308						    &new_dentry->d_name);
4309		if (error)
4310			mlog_errno(error);
4311	}
 
4312	if (!error) {
4313		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
4314						       new_dentry);
4315		if (error)
4316			mlog_errno(error);
4317	}
4318	ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
4319
4320out:
4321	if (new_orphan_inode) {
4322		/*
4323		 * We need to open_unlock the inode no matter whether we
4324		 * succeed or not, so that other nodes can delete it later.
4325		 */
4326		ocfs2_open_unlock(new_orphan_inode);
4327		if (error)
4328			iput(new_orphan_inode);
4329	}
4330
4331	return error;
4332}
4333
4334/*
4335 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
4336 * sys_reflink().  This will go away when vfs_reflink() exists in
4337 * fs/namei.c.
4338 */
4339
4340/* copied from may_create in VFS. */
4341static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
4342{
4343	if (d_really_is_positive(child))
4344		return -EEXIST;
4345	if (IS_DEADDIR(dir))
4346		return -ENOENT;
4347	return inode_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
4348}
4349
4350/**
4351 * ocfs2_vfs_reflink - Create a reference-counted link
4352 *
4353 * @old_dentry:        source dentry + inode
4354 * @dir:       directory to create the target
4355 * @new_dentry:        target dentry
4356 * @preserve:  if true, preserve all file attributes
4357 */
4358static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
4359			     struct dentry *new_dentry, bool preserve)
4360{
4361	struct inode *inode = d_inode(old_dentry);
4362	int error;
4363
4364	if (!inode)
4365		return -ENOENT;
4366
4367	error = ocfs2_may_create(dir, new_dentry);
4368	if (error)
4369		return error;
4370
4371	if (dir->i_sb != inode->i_sb)
4372		return -EXDEV;
4373
4374	/*
4375	 * A reflink to an append-only or immutable file cannot be created.
4376	 */
4377	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4378		return -EPERM;
4379
4380	/* Only regular files can be reflinked. */
4381	if (!S_ISREG(inode->i_mode))
4382		return -EPERM;
4383
4384	/*
4385	 * If the caller wants to preserve ownership, they require the
4386	 * rights to do so.
4387	 */
4388	if (preserve) {
4389		if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
4390			return -EPERM;
4391		if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
4392			return -EPERM;
4393	}
4394
4395	/*
4396	 * If the caller is modifying any aspect of the attributes, they
4397	 * are not creating a snapshot.  They need read permission on the
4398	 * file.
4399	 */
4400	if (!preserve) {
4401		error = inode_permission(&init_user_ns, inode, MAY_READ);
4402		if (error)
4403			return error;
4404	}
4405
4406	inode_lock(inode);
4407	error = dquot_initialize(dir);
4408	if (!error)
4409		error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
4410	inode_unlock(inode);
4411	if (!error)
4412		fsnotify_create(dir, new_dentry);
4413	return error;
4414}
4415/*
4416 * Most codes are copied from sys_linkat.
4417 */
4418int ocfs2_reflink_ioctl(struct inode *inode,
4419			const char __user *oldname,
4420			const char __user *newname,
4421			bool preserve)
4422{
4423	struct dentry *new_dentry;
4424	struct path old_path, new_path;
4425	int error;
4426
4427	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4428		return -EOPNOTSUPP;
4429
4430	error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
4431	if (error) {
4432		mlog_errno(error);
4433		return error;
4434	}
4435
4436	new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0);
4437	error = PTR_ERR(new_dentry);
4438	if (IS_ERR(new_dentry)) {
4439		mlog_errno(error);
4440		goto out;
4441	}
4442
4443	error = -EXDEV;
4444	if (old_path.mnt != new_path.mnt) {
4445		mlog_errno(error);
4446		goto out_dput;
4447	}
4448
4449	error = ocfs2_vfs_reflink(old_path.dentry,
4450				  d_inode(new_path.dentry),
4451				  new_dentry, preserve);
4452out_dput:
4453	done_path_create(&new_path, new_dentry);
4454out:
4455	path_put(&old_path);
4456
4457	return error;
4458}
4459
4460/* Update destination inode size, if necessary. */
4461int ocfs2_reflink_update_dest(struct inode *dest,
4462			      struct buffer_head *d_bh,
4463			      loff_t newlen)
4464{
4465	handle_t *handle;
4466	int ret;
4467
4468	dest->i_blocks = ocfs2_inode_sector_count(dest);
4469
4470	if (newlen <= i_size_read(dest))
4471		return 0;
4472
4473	handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb),
4474				   OCFS2_INODE_UPDATE_CREDITS);
4475	if (IS_ERR(handle)) {
4476		ret = PTR_ERR(handle);
4477		mlog_errno(ret);
4478		return ret;
4479	}
4480
4481	/* Extend i_size if needed. */
4482	spin_lock(&OCFS2_I(dest)->ip_lock);
4483	if (newlen > i_size_read(dest))
4484		i_size_write(dest, newlen);
4485	spin_unlock(&OCFS2_I(dest)->ip_lock);
4486	dest->i_ctime = dest->i_mtime = current_time(dest);
4487
4488	ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
4489	if (ret) {
4490		mlog_errno(ret);
4491		goto out_commit;
4492	}
4493
4494out_commit:
4495	ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle);
4496	return ret;
4497}
4498
4499/* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
4500static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode,
4501					 struct buffer_head *s_bh,
4502					 loff_t pos_in,
4503					 struct inode *t_inode,
4504					 struct buffer_head *t_bh,
4505					 loff_t pos_out,
4506					 loff_t len,
4507					 struct ocfs2_cached_dealloc_ctxt *dealloc)
4508{
4509	struct ocfs2_extent_tree s_et;
4510	struct ocfs2_extent_tree t_et;
4511	struct ocfs2_dinode *dis;
4512	struct buffer_head *ref_root_bh = NULL;
4513	struct ocfs2_refcount_tree *ref_tree;
4514	struct ocfs2_super *osb;
4515	loff_t remapped_bytes = 0;
4516	loff_t pstart, plen;
4517	u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0;
4518	unsigned int ext_flags;
4519	int ret = 0;
4520
4521	osb = OCFS2_SB(s_inode->i_sb);
4522	dis = (struct ocfs2_dinode *)s_bh->b_data;
4523	ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh);
4524	ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh);
4525
4526	spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in);
4527	tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out);
4528	slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len);
4529
4530	while (spos < slast) {
4531		if (fatal_signal_pending(current)) {
4532			ret = -EINTR;
4533			goto out;
4534		}
4535
4536		/* Look up the extent. */
4537		ret = ocfs2_get_clusters(s_inode, spos, &p_cluster,
4538					 &num_clusters, &ext_flags);
4539		if (ret) {
4540			mlog_errno(ret);
4541			goto out;
4542		}
4543
4544		num_clusters = min_t(u32, num_clusters, slast - spos);
4545
4546		/* Punch out the dest range. */
4547		pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos);
4548		plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters);
4549		ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen);
4550		if (ret) {
4551			mlog_errno(ret);
4552			goto out;
4553		}
4554
4555		if (p_cluster == 0)
4556			goto next_loop;
4557
4558		/* Lock the refcount btree... */
4559		ret = ocfs2_lock_refcount_tree(osb,
4560					       le64_to_cpu(dis->i_refcount_loc),
4561					       1, &ref_tree, &ref_root_bh);
4562		if (ret) {
4563			mlog_errno(ret);
4564			goto out;
4565		}
4566
4567		/* Mark s_inode's extent as refcounted. */
4568		if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) {
4569			ret = ocfs2_add_refcount_flag(s_inode, &s_et,
4570						      &ref_tree->rf_ci,
4571						      ref_root_bh, spos,
4572						      p_cluster, num_clusters,
4573						      dealloc, NULL);
4574			if (ret) {
4575				mlog_errno(ret);
4576				goto out_unlock_refcount;
4577			}
4578		}
4579
4580		/* Map in the new extent. */
4581		ext_flags |= OCFS2_EXT_REFCOUNTED;
4582		ret = ocfs2_add_refcounted_extent(t_inode, &t_et,
4583						  &ref_tree->rf_ci,
4584						  ref_root_bh,
4585						  tpos, p_cluster,
4586						  num_clusters,
4587						  ext_flags,
4588						  dealloc);
4589		if (ret) {
4590			mlog_errno(ret);
4591			goto out_unlock_refcount;
4592		}
4593
4594		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4595		brelse(ref_root_bh);
4596next_loop:
4597		spos += num_clusters;
4598		tpos += num_clusters;
4599		remapped_clus += num_clusters;
4600	}
4601
4602	goto out;
 
4603out_unlock_refcount:
4604	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4605	brelse(ref_root_bh);
4606out:
4607	remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus);
4608	remapped_bytes = min_t(loff_t, len, remapped_bytes);
4609
4610	return remapped_bytes > 0 ? remapped_bytes : ret;
4611}
4612
4613/* Set up refcount tree and remap s_inode to t_inode. */
4614loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
4615				  struct buffer_head *s_bh,
4616				  loff_t pos_in,
4617				  struct inode *t_inode,
4618				  struct buffer_head *t_bh,
4619				  loff_t pos_out,
4620				  loff_t len)
4621{
4622	struct ocfs2_cached_dealloc_ctxt dealloc;
4623	struct ocfs2_super *osb;
4624	struct ocfs2_dinode *dis;
4625	struct ocfs2_dinode *dit;
4626	loff_t ret;
4627
4628	osb = OCFS2_SB(s_inode->i_sb);
4629	dis = (struct ocfs2_dinode *)s_bh->b_data;
4630	dit = (struct ocfs2_dinode *)t_bh->b_data;
4631	ocfs2_init_dealloc_ctxt(&dealloc);
4632
4633	/*
4634	 * If we're reflinking the entire file and the source is inline
4635	 * data, just copy the contents.
4636	 */
4637	if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) &&
4638	    i_size_read(t_inode) <= len &&
4639	    (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) {
4640		ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh);
4641		if (ret)
4642			mlog_errno(ret);
4643		goto out;
4644	}
4645
4646	/*
4647	 * If both inodes belong to two different refcount groups then
4648	 * forget it because we don't know how (or want) to go merging
4649	 * refcount trees.
4650	 */
4651	ret = -EOPNOTSUPP;
4652	if (ocfs2_is_refcount_inode(s_inode) &&
4653	    ocfs2_is_refcount_inode(t_inode) &&
4654	    le64_to_cpu(dis->i_refcount_loc) !=
4655	    le64_to_cpu(dit->i_refcount_loc))
4656		goto out;
4657
4658	/* Neither inode has a refcount tree.  Add one to s_inode. */
4659	if (!ocfs2_is_refcount_inode(s_inode) &&
4660	    !ocfs2_is_refcount_inode(t_inode)) {
4661		ret = ocfs2_create_refcount_tree(s_inode, s_bh);
4662		if (ret) {
4663			mlog_errno(ret);
4664			goto out;
4665		}
4666	}
4667
4668	/* Ensure that both inodes end up with the same refcount tree. */
4669	if (!ocfs2_is_refcount_inode(s_inode)) {
4670		ret = ocfs2_set_refcount_tree(s_inode, s_bh,
4671					      le64_to_cpu(dit->i_refcount_loc));
4672		if (ret) {
4673			mlog_errno(ret);
4674			goto out;
4675		}
4676	}
4677	if (!ocfs2_is_refcount_inode(t_inode)) {
4678		ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4679					      le64_to_cpu(dis->i_refcount_loc));
4680		if (ret) {
4681			mlog_errno(ret);
4682			goto out;
4683		}
4684	}
4685
4686	/* Turn off inline data in the dest file. */
4687	if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4688		ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh);
4689		if (ret) {
4690			mlog_errno(ret);
4691			goto out;
4692		}
4693	}
4694
4695	/* Actually remap extents now. */
4696	ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
4697					 pos_out, len, &dealloc);
4698	if (ret < 0) {
4699		mlog_errno(ret);
4700		goto out;
4701	}
4702
4703out:
4704	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4705		ocfs2_schedule_truncate_log_flush(osb, 1);
4706		ocfs2_run_deallocs(osb, &dealloc);
4707	}
4708
4709	return ret;
4710}
4711
4712/* Lock an inode and grab a bh pointing to the inode. */
4713int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4714			      struct buffer_head **bh_s,
4715			      struct inode *t_inode,
4716			      struct buffer_head **bh_t)
4717{
4718	struct inode *inode1 = s_inode;
4719	struct inode *inode2 = t_inode;
4720	struct ocfs2_inode_info *oi1;
4721	struct ocfs2_inode_info *oi2;
4722	struct buffer_head *bh1 = NULL;
4723	struct buffer_head *bh2 = NULL;
4724	bool same_inode = (s_inode == t_inode);
4725	bool need_swap = (inode1->i_ino > inode2->i_ino);
4726	int status;
4727
4728	/* First grab the VFS and rw locks. */
4729	lock_two_nondirectories(s_inode, t_inode);
4730	if (need_swap)
 
 
4731		swap(inode1, inode2);
4732
4733	status = ocfs2_rw_lock(inode1, 1);
4734	if (status) {
4735		mlog_errno(status);
4736		goto out_i1;
4737	}
4738	if (!same_inode) {
4739		status = ocfs2_rw_lock(inode2, 1);
4740		if (status) {
4741			mlog_errno(status);
4742			goto out_i2;
4743		}
4744	}
4745
4746	/* Now go for the cluster locks */
4747	oi1 = OCFS2_I(inode1);
4748	oi2 = OCFS2_I(inode2);
4749
4750	trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
4751				(unsigned long long)oi2->ip_blkno);
4752
 
 
 
 
 
4753	/* We always want to lock the one with the lower lockid first. */
4754	if (oi1->ip_blkno > oi2->ip_blkno)
4755		mlog_errno(-ENOLCK);
4756
4757	/* lock id1 */
4758	status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
4759					 OI_LS_REFLINK_TARGET);
4760	if (status < 0) {
4761		if (status != -ENOENT)
4762			mlog_errno(status);
4763		goto out_rw2;
4764	}
4765
4766	/* lock id2 */
4767	if (!same_inode) {
4768		status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
4769						 OI_LS_REFLINK_TARGET);
4770		if (status < 0) {
4771			if (status != -ENOENT)
4772				mlog_errno(status);
4773			goto out_cl1;
4774		}
4775	} else {
4776		bh2 = bh1;
4777	}
4778
4779	/*
4780	 * If we swapped inode order above, we have to swap the buffer heads
4781	 * before passing them back to the caller.
4782	 */
4783	if (need_swap)
4784		swap(bh1, bh2);
4785	*bh_s = bh1;
4786	*bh_t = bh2;
4787
4788	trace_ocfs2_double_lock_end(
4789			(unsigned long long)oi1->ip_blkno,
4790			(unsigned long long)oi2->ip_blkno);
4791
4792	return 0;
4793
4794out_cl1:
4795	ocfs2_inode_unlock(inode1, 1);
4796	brelse(bh1);
 
4797out_rw2:
4798	ocfs2_rw_unlock(inode2, 1);
4799out_i2:
4800	ocfs2_rw_unlock(inode1, 1);
4801out_i1:
4802	unlock_two_nondirectories(s_inode, t_inode);
4803	return status;
4804}
4805
4806/* Unlock both inodes and release buffers. */
4807void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
4808				 struct buffer_head *s_bh,
4809				 struct inode *t_inode,
4810				 struct buffer_head *t_bh)
4811{
4812	ocfs2_inode_unlock(s_inode, 1);
4813	ocfs2_rw_unlock(s_inode, 1);
4814	brelse(s_bh);
4815	if (s_inode != t_inode) {
4816		ocfs2_inode_unlock(t_inode, 1);
4817		ocfs2_rw_unlock(t_inode, 1);
4818		brelse(t_bh);
4819	}
4820	unlock_two_nondirectories(s_inode, t_inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4821}