Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* -*- mode: c; c-basic-offset: 8; -*-
   3 * vim: noexpandtab sw=8 ts=8 sts=0:
   4 *
   5 * refcounttree.c
   6 *
   7 * Copyright (C) 2009 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
   8 */
   9
  10#include <linux/sort.h>
  11#include <cluster/masklog.h>
  12#include "ocfs2.h"
  13#include "inode.h"
  14#include "alloc.h"
  15#include "suballoc.h"
  16#include "journal.h"
  17#include "uptodate.h"
  18#include "super.h"
  19#include "buffer_head_io.h"
  20#include "blockcheck.h"
  21#include "refcounttree.h"
  22#include "sysfile.h"
  23#include "dlmglue.h"
  24#include "extent_map.h"
  25#include "aops.h"
  26#include "xattr.h"
  27#include "namei.h"
  28#include "ocfs2_trace.h"
  29#include "file.h"
  30
  31#include <linux/bio.h>
  32#include <linux/blkdev.h>
  33#include <linux/slab.h>
  34#include <linux/writeback.h>
  35#include <linux/pagevec.h>
  36#include <linux/swap.h>
  37#include <linux/security.h>
  38#include <linux/fsnotify.h>
  39#include <linux/quotaops.h>
  40#include <linux/namei.h>
  41#include <linux/mount.h>
  42#include <linux/posix_acl.h>
  43
  44struct ocfs2_cow_context {
  45	struct inode *inode;
  46	u32 cow_start;
  47	u32 cow_len;
  48	struct ocfs2_extent_tree data_et;
  49	struct ocfs2_refcount_tree *ref_tree;
  50	struct buffer_head *ref_root_bh;
  51	struct ocfs2_alloc_context *meta_ac;
  52	struct ocfs2_alloc_context *data_ac;
  53	struct ocfs2_cached_dealloc_ctxt dealloc;
  54	void *cow_object;
  55	struct ocfs2_post_refcount *post_refcount;
  56	int extra_credits;
  57	int (*get_clusters)(struct ocfs2_cow_context *context,
  58			    u32 v_cluster, u32 *p_cluster,
  59			    u32 *num_clusters,
  60			    unsigned int *extent_flags);
  61	int (*cow_duplicate_clusters)(handle_t *handle,
  62				      struct inode *inode,
  63				      u32 cpos, u32 old_cluster,
  64				      u32 new_cluster, u32 new_len);
  65};
  66
  67static inline struct ocfs2_refcount_tree *
  68cache_info_to_refcount(struct ocfs2_caching_info *ci)
  69{
  70	return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  71}
  72
  73static int ocfs2_validate_refcount_block(struct super_block *sb,
  74					 struct buffer_head *bh)
  75{
  76	int rc;
  77	struct ocfs2_refcount_block *rb =
  78		(struct ocfs2_refcount_block *)bh->b_data;
  79
  80	trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
  81
  82	BUG_ON(!buffer_uptodate(bh));
  83
  84	/*
  85	 * If the ecc fails, we return the error but otherwise
  86	 * leave the filesystem running.  We know any error is
  87	 * local to this block.
  88	 */
  89	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  90	if (rc) {
  91		mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  92		     (unsigned long long)bh->b_blocknr);
  93		return rc;
  94	}
  95
  96
  97	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  98		rc = ocfs2_error(sb,
  99				 "Refcount block #%llu has bad signature %.*s\n",
 100				 (unsigned long long)bh->b_blocknr, 7,
 101				 rb->rf_signature);
 102		goto out;
 103	}
 104
 105	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
 106		rc = ocfs2_error(sb,
 107				 "Refcount block #%llu has an invalid rf_blkno of %llu\n",
 108				 (unsigned long long)bh->b_blocknr,
 109				 (unsigned long long)le64_to_cpu(rb->rf_blkno));
 110		goto out;
 
 111	}
 112
 113	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
 114		rc = ocfs2_error(sb,
 115				 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
 116				 (unsigned long long)bh->b_blocknr,
 117				 le32_to_cpu(rb->rf_fs_generation));
 118		goto out;
 
 119	}
 120out:
 121	return rc;
 122}
 123
 124static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
 125				     u64 rb_blkno,
 126				     struct buffer_head **bh)
 127{
 128	int rc;
 129	struct buffer_head *tmp = *bh;
 130
 131	rc = ocfs2_read_block(ci, rb_blkno, &tmp,
 132			      ocfs2_validate_refcount_block);
 133
 134	/* If ocfs2_read_block() got us a new bh, pass it up. */
 135	if (!rc && !*bh)
 136		*bh = tmp;
 137
 138	return rc;
 139}
 140
 141static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
 142{
 143	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 144
 145	return rf->rf_blkno;
 146}
 147
 148static struct super_block *
 149ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
 150{
 151	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 152
 153	return rf->rf_sb;
 154}
 155
 156static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
 157{
 158	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 159
 160	spin_lock(&rf->rf_lock);
 161}
 162
 163static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
 164{
 165	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 166
 167	spin_unlock(&rf->rf_lock);
 168}
 169
 170static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
 171{
 172	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 173
 174	mutex_lock(&rf->rf_io_mutex);
 175}
 176
 177static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
 178{
 179	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 180
 181	mutex_unlock(&rf->rf_io_mutex);
 182}
 183
 184static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
 185	.co_owner		= ocfs2_refcount_cache_owner,
 186	.co_get_super		= ocfs2_refcount_cache_get_super,
 187	.co_cache_lock		= ocfs2_refcount_cache_lock,
 188	.co_cache_unlock	= ocfs2_refcount_cache_unlock,
 189	.co_io_lock		= ocfs2_refcount_cache_io_lock,
 190	.co_io_unlock		= ocfs2_refcount_cache_io_unlock,
 191};
 192
 193static struct ocfs2_refcount_tree *
 194ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
 195{
 196	struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
 197	struct ocfs2_refcount_tree *tree = NULL;
 198
 199	while (n) {
 200		tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
 201
 202		if (blkno < tree->rf_blkno)
 203			n = n->rb_left;
 204		else if (blkno > tree->rf_blkno)
 205			n = n->rb_right;
 206		else
 207			return tree;
 208	}
 209
 210	return NULL;
 211}
 212
 213/* osb_lock is already locked. */
 214static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
 215				       struct ocfs2_refcount_tree *new)
 216{
 217	u64 rf_blkno = new->rf_blkno;
 218	struct rb_node *parent = NULL;
 219	struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
 220	struct ocfs2_refcount_tree *tmp;
 221
 222	while (*p) {
 223		parent = *p;
 224
 225		tmp = rb_entry(parent, struct ocfs2_refcount_tree,
 226			       rf_node);
 227
 228		if (rf_blkno < tmp->rf_blkno)
 229			p = &(*p)->rb_left;
 230		else if (rf_blkno > tmp->rf_blkno)
 231			p = &(*p)->rb_right;
 232		else {
 233			/* This should never happen! */
 234			mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
 235			     (unsigned long long)rf_blkno);
 236			BUG();
 237		}
 238	}
 239
 240	rb_link_node(&new->rf_node, parent, p);
 241	rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
 242}
 243
 244static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
 245{
 246	ocfs2_metadata_cache_exit(&tree->rf_ci);
 247	ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
 248	ocfs2_lock_res_free(&tree->rf_lockres);
 249	kfree(tree);
 250}
 251
 252static inline void
 253ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
 254					struct ocfs2_refcount_tree *tree)
 255{
 256	rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
 257	if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
 258		osb->osb_ref_tree_lru = NULL;
 259}
 260
 261static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
 262					struct ocfs2_refcount_tree *tree)
 263{
 264	spin_lock(&osb->osb_lock);
 265	ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 266	spin_unlock(&osb->osb_lock);
 267}
 268
 269static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
 270{
 271	struct ocfs2_refcount_tree *tree =
 272		container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
 273
 274	ocfs2_free_refcount_tree(tree);
 275}
 276
 277static inline void
 278ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
 279{
 280	kref_get(&tree->rf_getcnt);
 281}
 282
 283static inline void
 284ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
 285{
 286	kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
 287}
 288
 289static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
 290					       struct super_block *sb)
 291{
 292	ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
 293	mutex_init(&new->rf_io_mutex);
 294	new->rf_sb = sb;
 295	spin_lock_init(&new->rf_lock);
 296}
 297
 298static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
 299					struct ocfs2_refcount_tree *new,
 300					u64 rf_blkno, u32 generation)
 301{
 302	init_rwsem(&new->rf_sem);
 303	ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
 304				     rf_blkno, generation);
 305}
 306
 307static struct ocfs2_refcount_tree*
 308ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
 309{
 310	struct ocfs2_refcount_tree *new;
 311
 312	new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
 313	if (!new)
 314		return NULL;
 315
 316	new->rf_blkno = rf_blkno;
 317	kref_init(&new->rf_getcnt);
 318	ocfs2_init_refcount_tree_ci(new, osb->sb);
 319
 320	return new;
 321}
 322
 323static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
 324				   struct ocfs2_refcount_tree **ret_tree)
 325{
 326	int ret = 0;
 327	struct ocfs2_refcount_tree *tree, *new = NULL;
 328	struct buffer_head *ref_root_bh = NULL;
 329	struct ocfs2_refcount_block *ref_rb;
 330
 331	spin_lock(&osb->osb_lock);
 332	if (osb->osb_ref_tree_lru &&
 333	    osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
 334		tree = osb->osb_ref_tree_lru;
 335	else
 336		tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 337	if (tree)
 338		goto out;
 339
 340	spin_unlock(&osb->osb_lock);
 341
 342	new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
 343	if (!new) {
 344		ret = -ENOMEM;
 345		mlog_errno(ret);
 346		return ret;
 347	}
 348	/*
 349	 * We need the generation to create the refcount tree lock and since
 350	 * it isn't changed during the tree modification, we are safe here to
 351	 * read without protection.
 352	 * We also have to purge the cache after we create the lock since the
 353	 * refcount block may have the stale data. It can only be trusted when
 354	 * we hold the refcount lock.
 355	 */
 356	ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
 357	if (ret) {
 358		mlog_errno(ret);
 359		ocfs2_metadata_cache_exit(&new->rf_ci);
 360		kfree(new);
 361		return ret;
 362	}
 363
 364	ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 365	new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
 366	ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
 367				      new->rf_generation);
 368	ocfs2_metadata_cache_purge(&new->rf_ci);
 369
 370	spin_lock(&osb->osb_lock);
 371	tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 372	if (tree)
 373		goto out;
 374
 375	ocfs2_insert_refcount_tree(osb, new);
 376
 377	tree = new;
 378	new = NULL;
 379
 380out:
 381	*ret_tree = tree;
 382
 383	osb->osb_ref_tree_lru = tree;
 384
 385	spin_unlock(&osb->osb_lock);
 386
 387	if (new)
 388		ocfs2_free_refcount_tree(new);
 389
 390	brelse(ref_root_bh);
 391	return ret;
 392}
 393
 394static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
 395{
 396	int ret;
 397	struct buffer_head *di_bh = NULL;
 398	struct ocfs2_dinode *di;
 399
 400	ret = ocfs2_read_inode_block(inode, &di_bh);
 401	if (ret) {
 402		mlog_errno(ret);
 403		goto out;
 404	}
 405
 406	BUG_ON(!ocfs2_is_refcount_inode(inode));
 407
 408	di = (struct ocfs2_dinode *)di_bh->b_data;
 409	*ref_blkno = le64_to_cpu(di->i_refcount_loc);
 410	brelse(di_bh);
 411out:
 412	return ret;
 413}
 414
 415static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 416				      struct ocfs2_refcount_tree *tree, int rw)
 417{
 418	int ret;
 419
 420	ret = ocfs2_refcount_lock(tree, rw);
 421	if (ret) {
 422		mlog_errno(ret);
 423		goto out;
 424	}
 425
 426	if (rw)
 427		down_write(&tree->rf_sem);
 428	else
 429		down_read(&tree->rf_sem);
 430
 431out:
 432	return ret;
 433}
 434
 435/*
 436 * Lock the refcount tree pointed by ref_blkno and return the tree.
 437 * In most case, we lock the tree and read the refcount block.
 438 * So read it here if the caller really needs it.
 439 *
 440 * If the tree has been re-created by other node, it will free the
 441 * old one and re-create it.
 442 */
 443int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 444			     u64 ref_blkno, int rw,
 445			     struct ocfs2_refcount_tree **ret_tree,
 446			     struct buffer_head **ref_bh)
 447{
 448	int ret, delete_tree = 0;
 449	struct ocfs2_refcount_tree *tree = NULL;
 450	struct buffer_head *ref_root_bh = NULL;
 451	struct ocfs2_refcount_block *rb;
 452
 453again:
 454	ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
 455	if (ret) {
 456		mlog_errno(ret);
 457		return ret;
 458	}
 459
 460	ocfs2_refcount_tree_get(tree);
 461
 462	ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
 463	if (ret) {
 464		mlog_errno(ret);
 465		ocfs2_refcount_tree_put(tree);
 466		goto out;
 467	}
 468
 469	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
 470					&ref_root_bh);
 471	if (ret) {
 472		mlog_errno(ret);
 473		ocfs2_unlock_refcount_tree(osb, tree, rw);
 
 474		goto out;
 475	}
 476
 477	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 478	/*
 479	 * If the refcount block has been freed and re-created, we may need
 480	 * to recreate the refcount tree also.
 481	 *
 482	 * Here we just remove the tree from the rb-tree, and the last
 483	 * kref holder will unlock and delete this refcount_tree.
 484	 * Then we goto "again" and ocfs2_get_refcount_tree will create
 485	 * the new refcount tree for us.
 486	 */
 487	if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
 488		if (!tree->rf_removed) {
 489			ocfs2_erase_refcount_tree_from_list(osb, tree);
 490			tree->rf_removed = 1;
 491			delete_tree = 1;
 492		}
 493
 494		ocfs2_unlock_refcount_tree(osb, tree, rw);
 495		/*
 496		 * We get an extra reference when we create the refcount
 497		 * tree, so another put will destroy it.
 498		 */
 499		if (delete_tree)
 500			ocfs2_refcount_tree_put(tree);
 501		brelse(ref_root_bh);
 502		ref_root_bh = NULL;
 503		goto again;
 504	}
 505
 506	*ret_tree = tree;
 507	if (ref_bh) {
 508		*ref_bh = ref_root_bh;
 509		ref_root_bh = NULL;
 510	}
 511out:
 512	brelse(ref_root_bh);
 513	return ret;
 514}
 515
 516void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
 517				struct ocfs2_refcount_tree *tree, int rw)
 518{
 519	if (rw)
 520		up_write(&tree->rf_sem);
 521	else
 522		up_read(&tree->rf_sem);
 523
 524	ocfs2_refcount_unlock(tree, rw);
 525	ocfs2_refcount_tree_put(tree);
 526}
 527
 528void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
 529{
 530	struct rb_node *node;
 531	struct ocfs2_refcount_tree *tree;
 532	struct rb_root *root = &osb->osb_rf_lock_tree;
 533
 534	while ((node = rb_last(root)) != NULL) {
 535		tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
 536
 537		trace_ocfs2_purge_refcount_trees(
 538				(unsigned long long) tree->rf_blkno);
 539
 540		rb_erase(&tree->rf_node, root);
 541		ocfs2_free_refcount_tree(tree);
 542	}
 543}
 544
 545/*
 546 * Create a refcount tree for an inode.
 547 * We take for granted that the inode is already locked.
 548 */
 549static int ocfs2_create_refcount_tree(struct inode *inode,
 550				      struct buffer_head *di_bh)
 551{
 552	int ret;
 553	handle_t *handle = NULL;
 554	struct ocfs2_alloc_context *meta_ac = NULL;
 555	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 556	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 557	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 558	struct buffer_head *new_bh = NULL;
 559	struct ocfs2_refcount_block *rb;
 560	struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
 561	u16 suballoc_bit_start;
 562	u32 num_got;
 563	u64 suballoc_loc, first_blkno;
 564
 565	BUG_ON(ocfs2_is_refcount_inode(inode));
 566
 567	trace_ocfs2_create_refcount_tree(
 568		(unsigned long long)oi->ip_blkno);
 569
 570	ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
 571	if (ret) {
 572		mlog_errno(ret);
 573		goto out;
 574	}
 575
 576	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
 577	if (IS_ERR(handle)) {
 578		ret = PTR_ERR(handle);
 579		mlog_errno(ret);
 580		goto out;
 581	}
 582
 583	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 584				      OCFS2_JOURNAL_ACCESS_WRITE);
 585	if (ret) {
 586		mlog_errno(ret);
 587		goto out_commit;
 588	}
 589
 590	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
 591				   &suballoc_bit_start, &num_got,
 592				   &first_blkno);
 593	if (ret) {
 594		mlog_errno(ret);
 595		goto out_commit;
 596	}
 597
 598	new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
 599	if (!new_tree) {
 600		ret = -ENOMEM;
 601		mlog_errno(ret);
 602		goto out_commit;
 603	}
 604
 605	new_bh = sb_getblk(inode->i_sb, first_blkno);
 606	if (!new_bh) {
 607		ret = -ENOMEM;
 608		mlog_errno(ret);
 609		goto out_commit;
 610	}
 611	ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
 612
 613	ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
 614				      OCFS2_JOURNAL_ACCESS_CREATE);
 615	if (ret) {
 616		mlog_errno(ret);
 617		goto out_commit;
 618	}
 619
 620	/* Initialize ocfs2_refcount_block. */
 621	rb = (struct ocfs2_refcount_block *)new_bh->b_data;
 622	memset(rb, 0, inode->i_sb->s_blocksize);
 623	strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
 624	rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
 625	rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
 626	rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
 627	rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
 628	rb->rf_blkno = cpu_to_le64(first_blkno);
 629	rb->rf_count = cpu_to_le32(1);
 630	rb->rf_records.rl_count =
 631			cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
 632	spin_lock(&osb->osb_lock);
 633	rb->rf_generation = osb->s_next_generation++;
 634	spin_unlock(&osb->osb_lock);
 635
 636	ocfs2_journal_dirty(handle, new_bh);
 637
 638	spin_lock(&oi->ip_lock);
 639	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 640	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 641	di->i_refcount_loc = cpu_to_le64(first_blkno);
 642	spin_unlock(&oi->ip_lock);
 643
 644	trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
 645
 646	ocfs2_journal_dirty(handle, di_bh);
 647
 648	/*
 649	 * We have to init the tree lock here since it will use
 650	 * the generation number to create it.
 651	 */
 652	new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
 653	ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
 654				      new_tree->rf_generation);
 655
 656	spin_lock(&osb->osb_lock);
 657	tree = ocfs2_find_refcount_tree(osb, first_blkno);
 658
 659	/*
 660	 * We've just created a new refcount tree in this block.  If
 661	 * we found a refcount tree on the ocfs2_super, it must be
 662	 * one we just deleted.  We free the old tree before
 663	 * inserting the new tree.
 664	 */
 665	BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
 666	if (tree)
 667		ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 668	ocfs2_insert_refcount_tree(osb, new_tree);
 669	spin_unlock(&osb->osb_lock);
 670	new_tree = NULL;
 671	if (tree)
 672		ocfs2_refcount_tree_put(tree);
 673
 674out_commit:
 675	ocfs2_commit_trans(osb, handle);
 676
 677out:
 678	if (new_tree) {
 679		ocfs2_metadata_cache_exit(&new_tree->rf_ci);
 680		kfree(new_tree);
 681	}
 682
 683	brelse(new_bh);
 684	if (meta_ac)
 685		ocfs2_free_alloc_context(meta_ac);
 686
 687	return ret;
 688}
 689
 690static int ocfs2_set_refcount_tree(struct inode *inode,
 691				   struct buffer_head *di_bh,
 692				   u64 refcount_loc)
 693{
 694	int ret;
 695	handle_t *handle = NULL;
 696	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 697	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 698	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 699	struct buffer_head *ref_root_bh = NULL;
 700	struct ocfs2_refcount_block *rb;
 701	struct ocfs2_refcount_tree *ref_tree;
 702
 703	BUG_ON(ocfs2_is_refcount_inode(inode));
 704
 705	ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
 706				       &ref_tree, &ref_root_bh);
 707	if (ret) {
 708		mlog_errno(ret);
 709		return ret;
 710	}
 711
 712	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
 713	if (IS_ERR(handle)) {
 714		ret = PTR_ERR(handle);
 715		mlog_errno(ret);
 716		goto out;
 717	}
 718
 719	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 720				      OCFS2_JOURNAL_ACCESS_WRITE);
 721	if (ret) {
 722		mlog_errno(ret);
 723		goto out_commit;
 724	}
 725
 726	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
 727				      OCFS2_JOURNAL_ACCESS_WRITE);
 728	if (ret) {
 729		mlog_errno(ret);
 730		goto out_commit;
 731	}
 732
 733	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 734	le32_add_cpu(&rb->rf_count, 1);
 735
 736	ocfs2_journal_dirty(handle, ref_root_bh);
 737
 738	spin_lock(&oi->ip_lock);
 739	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 740	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 741	di->i_refcount_loc = cpu_to_le64(refcount_loc);
 742	spin_unlock(&oi->ip_lock);
 743	ocfs2_journal_dirty(handle, di_bh);
 744
 745out_commit:
 746	ocfs2_commit_trans(osb, handle);
 747out:
 748	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 749	brelse(ref_root_bh);
 750
 751	return ret;
 752}
 753
 754int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
 755{
 756	int ret, delete_tree = 0;
 757	handle_t *handle = NULL;
 758	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 759	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 760	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 761	struct ocfs2_refcount_block *rb;
 762	struct inode *alloc_inode = NULL;
 763	struct buffer_head *alloc_bh = NULL;
 764	struct buffer_head *blk_bh = NULL;
 765	struct ocfs2_refcount_tree *ref_tree;
 766	int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
 767	u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
 768	u16 bit = 0;
 769
 770	if (!ocfs2_is_refcount_inode(inode))
 771		return 0;
 772
 773	BUG_ON(!ref_blkno);
 774	ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
 775	if (ret) {
 776		mlog_errno(ret);
 777		return ret;
 778	}
 779
 780	rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
 781
 782	/*
 783	 * If we are the last user, we need to free the block.
 784	 * So lock the allocator ahead.
 785	 */
 786	if (le32_to_cpu(rb->rf_count) == 1) {
 787		blk = le64_to_cpu(rb->rf_blkno);
 788		bit = le16_to_cpu(rb->rf_suballoc_bit);
 789		if (rb->rf_suballoc_loc)
 790			bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
 791		else
 792			bg_blkno = ocfs2_which_suballoc_group(blk, bit);
 793
 794		alloc_inode = ocfs2_get_system_file_inode(osb,
 795					EXTENT_ALLOC_SYSTEM_INODE,
 796					le16_to_cpu(rb->rf_suballoc_slot));
 797		if (!alloc_inode) {
 798			ret = -ENOMEM;
 799			mlog_errno(ret);
 800			goto out;
 801		}
 802		inode_lock(alloc_inode);
 803
 804		ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
 805		if (ret) {
 806			mlog_errno(ret);
 807			goto out_mutex;
 808		}
 809
 810		credits += OCFS2_SUBALLOC_FREE;
 811	}
 812
 813	handle = ocfs2_start_trans(osb, credits);
 814	if (IS_ERR(handle)) {
 815		ret = PTR_ERR(handle);
 816		mlog_errno(ret);
 817		goto out_unlock;
 818	}
 819
 820	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 821				      OCFS2_JOURNAL_ACCESS_WRITE);
 822	if (ret) {
 823		mlog_errno(ret);
 824		goto out_commit;
 825	}
 826
 827	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
 828				      OCFS2_JOURNAL_ACCESS_WRITE);
 829	if (ret) {
 830		mlog_errno(ret);
 831		goto out_commit;
 832	}
 833
 834	spin_lock(&oi->ip_lock);
 835	oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
 836	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 837	di->i_refcount_loc = 0;
 838	spin_unlock(&oi->ip_lock);
 839	ocfs2_journal_dirty(handle, di_bh);
 840
 841	le32_add_cpu(&rb->rf_count , -1);
 842	ocfs2_journal_dirty(handle, blk_bh);
 843
 844	if (!rb->rf_count) {
 845		delete_tree = 1;
 846		ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
 847		ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
 848					       alloc_bh, bit, bg_blkno, 1);
 849		if (ret)
 850			mlog_errno(ret);
 851	}
 852
 853out_commit:
 854	ocfs2_commit_trans(osb, handle);
 855out_unlock:
 856	if (alloc_inode) {
 857		ocfs2_inode_unlock(alloc_inode, 1);
 858		brelse(alloc_bh);
 859	}
 860out_mutex:
 861	if (alloc_inode) {
 862		inode_unlock(alloc_inode);
 863		iput(alloc_inode);
 864	}
 865out:
 866	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 867	if (delete_tree)
 868		ocfs2_refcount_tree_put(ref_tree);
 869	brelse(blk_bh);
 870
 871	return ret;
 872}
 873
 874static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
 875					  struct buffer_head *ref_leaf_bh,
 876					  u64 cpos, unsigned int len,
 877					  struct ocfs2_refcount_rec *ret_rec,
 878					  int *index)
 879{
 880	int i = 0;
 881	struct ocfs2_refcount_block *rb =
 882		(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
 883	struct ocfs2_refcount_rec *rec = NULL;
 884
 885	for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
 886		rec = &rb->rf_records.rl_recs[i];
 887
 888		if (le64_to_cpu(rec->r_cpos) +
 889		    le32_to_cpu(rec->r_clusters) <= cpos)
 890			continue;
 891		else if (le64_to_cpu(rec->r_cpos) > cpos)
 892			break;
 893
 894		/* ok, cpos fail in this rec. Just return. */
 895		if (ret_rec)
 896			*ret_rec = *rec;
 897		goto out;
 898	}
 899
 900	if (ret_rec) {
 901		/* We meet with a hole here, so fake the rec. */
 902		ret_rec->r_cpos = cpu_to_le64(cpos);
 903		ret_rec->r_refcount = 0;
 904		if (i < le16_to_cpu(rb->rf_records.rl_used) &&
 905		    le64_to_cpu(rec->r_cpos) < cpos + len)
 906			ret_rec->r_clusters =
 907				cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
 908		else
 909			ret_rec->r_clusters = cpu_to_le32(len);
 910	}
 911
 912out:
 913	*index = i;
 914}
 915
 916/*
 917 * Try to remove refcount tree. The mechanism is:
 918 * 1) Check whether i_clusters == 0, if no, exit.
 919 * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
 920 * 3) Check whether we have inline xattr stored outside, if yes, exit.
 921 * 4) Remove the tree.
 922 */
 923int ocfs2_try_remove_refcount_tree(struct inode *inode,
 924				   struct buffer_head *di_bh)
 925{
 926	int ret;
 927	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 928	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 929
 930	down_write(&oi->ip_xattr_sem);
 931	down_write(&oi->ip_alloc_sem);
 932
 933	if (oi->ip_clusters)
 934		goto out;
 935
 936	if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
 937		goto out;
 938
 939	if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
 940	    ocfs2_has_inline_xattr_value_outside(inode, di))
 941		goto out;
 942
 943	ret = ocfs2_remove_refcount_tree(inode, di_bh);
 944	if (ret)
 945		mlog_errno(ret);
 946out:
 947	up_write(&oi->ip_alloc_sem);
 948	up_write(&oi->ip_xattr_sem);
 949	return 0;
 950}
 951
 952/*
 953 * Find the end range for a leaf refcount block indicated by
 954 * el->l_recs[index].e_blkno.
 955 */
 956static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
 957				       struct buffer_head *ref_root_bh,
 958				       struct ocfs2_extent_block *eb,
 959				       struct ocfs2_extent_list *el,
 960				       int index,  u32 *cpos_end)
 961{
 962	int ret, i, subtree_root;
 963	u32 cpos;
 964	u64 blkno;
 965	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
 966	struct ocfs2_path *left_path = NULL, *right_path = NULL;
 967	struct ocfs2_extent_tree et;
 968	struct ocfs2_extent_list *tmp_el;
 969
 970	if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
 971		/*
 972		 * We have a extent rec after index, so just use the e_cpos
 973		 * of the next extent rec.
 974		 */
 975		*cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
 976		return 0;
 977	}
 978
 979	if (!eb || (eb && !eb->h_next_leaf_blk)) {
 980		/*
 981		 * We are the last extent rec, so any high cpos should
 982		 * be stored in this leaf refcount block.
 983		 */
 984		*cpos_end = UINT_MAX;
 985		return 0;
 986	}
 987
 988	/*
 989	 * If the extent block isn't the last one, we have to find
 990	 * the subtree root between this extent block and the next
 991	 * leaf extent block and get the corresponding e_cpos from
 992	 * the subroot. Otherwise we may corrupt the b-tree.
 993	 */
 994	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
 995
 996	left_path = ocfs2_new_path_from_et(&et);
 997	if (!left_path) {
 998		ret = -ENOMEM;
 999		mlog_errno(ret);
1000		goto out;
1001	}
1002
1003	cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
1004	ret = ocfs2_find_path(ci, left_path, cpos);
1005	if (ret) {
1006		mlog_errno(ret);
1007		goto out;
1008	}
1009
1010	right_path = ocfs2_new_path_from_path(left_path);
1011	if (!right_path) {
1012		ret = -ENOMEM;
1013		mlog_errno(ret);
1014		goto out;
1015	}
1016
1017	ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
1018	if (ret) {
1019		mlog_errno(ret);
1020		goto out;
1021	}
1022
1023	ret = ocfs2_find_path(ci, right_path, cpos);
1024	if (ret) {
1025		mlog_errno(ret);
1026		goto out;
1027	}
1028
1029	subtree_root = ocfs2_find_subtree_root(&et, left_path,
1030					       right_path);
1031
1032	tmp_el = left_path->p_node[subtree_root].el;
1033	blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
1034	for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
1035		if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
1036			*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
1037			break;
1038		}
1039	}
1040
1041	BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
1042
1043out:
1044	ocfs2_free_path(left_path);
1045	ocfs2_free_path(right_path);
1046	return ret;
1047}
1048
1049/*
1050 * Given a cpos and len, try to find the refcount record which contains cpos.
1051 * 1. If cpos can be found in one refcount record, return the record.
1052 * 2. If cpos can't be found, return a fake record which start from cpos
1053 *    and end at a small value between cpos+len and start of the next record.
1054 *    This fake record has r_refcount = 0.
1055 */
1056static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
1057				  struct buffer_head *ref_root_bh,
1058				  u64 cpos, unsigned int len,
1059				  struct ocfs2_refcount_rec *ret_rec,
1060				  int *index,
1061				  struct buffer_head **ret_bh)
1062{
1063	int ret = 0, i, found;
1064	u32 low_cpos, uninitialized_var(cpos_end);
1065	struct ocfs2_extent_list *el;
1066	struct ocfs2_extent_rec *rec = NULL;
1067	struct ocfs2_extent_block *eb = NULL;
1068	struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
1069	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1070	struct ocfs2_refcount_block *rb =
1071			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1072
1073	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
1074		ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
1075					      ret_rec, index);
1076		*ret_bh = ref_root_bh;
1077		get_bh(ref_root_bh);
1078		return 0;
1079	}
1080
1081	el = &rb->rf_list;
1082	low_cpos = cpos & OCFS2_32BIT_POS_MASK;
1083
1084	if (el->l_tree_depth) {
1085		ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
1086		if (ret) {
1087			mlog_errno(ret);
1088			goto out;
1089		}
1090
1091		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
1092		el = &eb->h_list;
1093
1094		if (el->l_tree_depth) {
1095			ret = ocfs2_error(sb,
1096					  "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
1097					  (unsigned long long)ocfs2_metadata_cache_owner(ci),
1098					  (unsigned long long)eb_bh->b_blocknr);
 
 
1099			goto out;
1100		}
1101	}
1102
1103	found = 0;
1104	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1105		rec = &el->l_recs[i];
1106
1107		if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
1108			found = 1;
1109			break;
1110		}
1111	}
1112
1113	if (found) {
1114		ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
1115						  eb, el, i, &cpos_end);
1116		if (ret) {
1117			mlog_errno(ret);
1118			goto out;
1119		}
1120
1121		if (cpos_end < low_cpos + len)
1122			len = cpos_end - low_cpos;
1123	}
1124
1125	ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
1126					&ref_leaf_bh);
1127	if (ret) {
1128		mlog_errno(ret);
1129		goto out;
1130	}
1131
1132	ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1133				      ret_rec, index);
1134	*ret_bh = ref_leaf_bh;
1135out:
1136	brelse(eb_bh);
1137	return ret;
1138}
1139
1140enum ocfs2_ref_rec_contig {
1141	REF_CONTIG_NONE = 0,
1142	REF_CONTIG_LEFT,
1143	REF_CONTIG_RIGHT,
1144	REF_CONTIG_LEFTRIGHT,
1145};
1146
1147static enum ocfs2_ref_rec_contig
1148	ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
1149				    int index)
1150{
1151	if ((rb->rf_records.rl_recs[index].r_refcount ==
1152	    rb->rf_records.rl_recs[index + 1].r_refcount) &&
1153	    (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
1154	    le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
1155	    le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
1156		return REF_CONTIG_RIGHT;
1157
1158	return REF_CONTIG_NONE;
1159}
1160
1161static enum ocfs2_ref_rec_contig
1162	ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
1163				  int index)
1164{
1165	enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
1166
1167	if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
1168		ret = ocfs2_refcount_rec_adjacent(rb, index);
1169
1170	if (index > 0) {
1171		enum ocfs2_ref_rec_contig tmp;
1172
1173		tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
1174
1175		if (tmp == REF_CONTIG_RIGHT) {
1176			if (ret == REF_CONTIG_RIGHT)
1177				ret = REF_CONTIG_LEFTRIGHT;
1178			else
1179				ret = REF_CONTIG_LEFT;
1180		}
1181	}
1182
1183	return ret;
1184}
1185
1186static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
1187					   int index)
1188{
1189	BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
1190	       rb->rf_records.rl_recs[index+1].r_refcount);
1191
1192	le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
1193		     le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
1194
1195	if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
1196		memmove(&rb->rf_records.rl_recs[index + 1],
1197			&rb->rf_records.rl_recs[index + 2],
1198			sizeof(struct ocfs2_refcount_rec) *
1199			(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
1200
1201	memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
1202	       0, sizeof(struct ocfs2_refcount_rec));
1203	le16_add_cpu(&rb->rf_records.rl_used, -1);
1204}
1205
1206/*
1207 * Merge the refcount rec if we are contiguous with the adjacent recs.
1208 */
1209static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
1210				     int index)
1211{
1212	enum ocfs2_ref_rec_contig contig =
1213				ocfs2_refcount_rec_contig(rb, index);
1214
1215	if (contig == REF_CONTIG_NONE)
1216		return;
1217
1218	if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
1219		BUG_ON(index == 0);
1220		index--;
1221	}
1222
1223	ocfs2_rotate_refcount_rec_left(rb, index);
1224
1225	if (contig == REF_CONTIG_LEFTRIGHT)
1226		ocfs2_rotate_refcount_rec_left(rb, index);
1227}
1228
1229/*
1230 * Change the refcount indexed by "index" in ref_bh.
1231 * If refcount reaches 0, remove it.
1232 */
1233static int ocfs2_change_refcount_rec(handle_t *handle,
1234				     struct ocfs2_caching_info *ci,
1235				     struct buffer_head *ref_leaf_bh,
1236				     int index, int merge, int change)
1237{
1238	int ret;
1239	struct ocfs2_refcount_block *rb =
1240			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1241	struct ocfs2_refcount_list *rl = &rb->rf_records;
1242	struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
1243
1244	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1245				      OCFS2_JOURNAL_ACCESS_WRITE);
1246	if (ret) {
1247		mlog_errno(ret);
1248		goto out;
1249	}
1250
1251	trace_ocfs2_change_refcount_rec(
1252		(unsigned long long)ocfs2_metadata_cache_owner(ci),
1253		index, le32_to_cpu(rec->r_refcount), change);
1254	le32_add_cpu(&rec->r_refcount, change);
1255
1256	if (!rec->r_refcount) {
1257		if (index != le16_to_cpu(rl->rl_used) - 1) {
1258			memmove(rec, rec + 1,
1259				(le16_to_cpu(rl->rl_used) - index - 1) *
1260				sizeof(struct ocfs2_refcount_rec));
1261			memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
1262			       0, sizeof(struct ocfs2_refcount_rec));
1263		}
1264
1265		le16_add_cpu(&rl->rl_used, -1);
1266	} else if (merge)
1267		ocfs2_refcount_rec_merge(rb, index);
1268
1269	ocfs2_journal_dirty(handle, ref_leaf_bh);
1270out:
1271	return ret;
1272}
1273
1274static int ocfs2_expand_inline_ref_root(handle_t *handle,
1275					struct ocfs2_caching_info *ci,
1276					struct buffer_head *ref_root_bh,
1277					struct buffer_head **ref_leaf_bh,
1278					struct ocfs2_alloc_context *meta_ac)
1279{
1280	int ret;
1281	u16 suballoc_bit_start;
1282	u32 num_got;
1283	u64 suballoc_loc, blkno;
1284	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1285	struct buffer_head *new_bh = NULL;
1286	struct ocfs2_refcount_block *new_rb;
1287	struct ocfs2_refcount_block *root_rb =
1288			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1289
1290	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1291				      OCFS2_JOURNAL_ACCESS_WRITE);
1292	if (ret) {
1293		mlog_errno(ret);
1294		goto out;
1295	}
1296
1297	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1298				   &suballoc_bit_start, &num_got,
1299				   &blkno);
1300	if (ret) {
1301		mlog_errno(ret);
1302		goto out;
1303	}
1304
1305	new_bh = sb_getblk(sb, blkno);
1306	if (new_bh == NULL) {
1307		ret = -ENOMEM;
1308		mlog_errno(ret);
1309		goto out;
1310	}
1311	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1312
1313	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1314				      OCFS2_JOURNAL_ACCESS_CREATE);
1315	if (ret) {
1316		mlog_errno(ret);
1317		goto out;
1318	}
1319
1320	/*
1321	 * Initialize ocfs2_refcount_block.
1322	 * It should contain the same information as the old root.
1323	 * so just memcpy it and change the corresponding field.
1324	 */
1325	memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1326
1327	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1328	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1329	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1330	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1331	new_rb->rf_blkno = cpu_to_le64(blkno);
1332	new_rb->rf_cpos = cpu_to_le32(0);
1333	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1334	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1335	ocfs2_journal_dirty(handle, new_bh);
1336
1337	/* Now change the root. */
1338	memset(&root_rb->rf_list, 0, sb->s_blocksize -
1339	       offsetof(struct ocfs2_refcount_block, rf_list));
1340	root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
1341	root_rb->rf_clusters = cpu_to_le32(1);
1342	root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
1343	root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
1344	root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
1345	root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
1346
1347	ocfs2_journal_dirty(handle, ref_root_bh);
1348
1349	trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
1350		le16_to_cpu(new_rb->rf_records.rl_used));
1351
1352	*ref_leaf_bh = new_bh;
1353	new_bh = NULL;
1354out:
1355	brelse(new_bh);
1356	return ret;
1357}
1358
1359static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
1360					   struct ocfs2_refcount_rec *next)
1361{
1362	if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
1363		ocfs2_get_ref_rec_low_cpos(next))
1364		return 1;
1365
1366	return 0;
1367}
1368
1369static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
1370{
1371	const struct ocfs2_refcount_rec *l = a, *r = b;
1372	u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
1373	u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
1374
1375	if (l_cpos > r_cpos)
1376		return 1;
1377	if (l_cpos < r_cpos)
1378		return -1;
1379	return 0;
1380}
1381
1382static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
1383{
1384	const struct ocfs2_refcount_rec *l = a, *r = b;
1385	u64 l_cpos = le64_to_cpu(l->r_cpos);
1386	u64 r_cpos = le64_to_cpu(r->r_cpos);
1387
1388	if (l_cpos > r_cpos)
1389		return 1;
1390	if (l_cpos < r_cpos)
1391		return -1;
1392	return 0;
1393}
1394
1395static void swap_refcount_rec(void *a, void *b, int size)
1396{
1397	struct ocfs2_refcount_rec *l = a, *r = b;
1398
1399	swap(*l, *r);
 
 
 
1400}
1401
1402/*
1403 * The refcount cpos are ordered by their 64bit cpos,
1404 * But we will use the low 32 bit to be the e_cpos in the b-tree.
1405 * So we need to make sure that this pos isn't intersected with others.
1406 *
1407 * Note: The refcount block is already sorted by their low 32 bit cpos,
1408 *       So just try the middle pos first, and we will exit when we find
1409 *       the good position.
1410 */
1411static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
1412					 u32 *split_pos, int *split_index)
1413{
1414	int num_used = le16_to_cpu(rl->rl_used);
1415	int delta, middle = num_used / 2;
1416
1417	for (delta = 0; delta < middle; delta++) {
1418		/* Let's check delta earlier than middle */
1419		if (ocfs2_refcount_rec_no_intersect(
1420					&rl->rl_recs[middle - delta - 1],
1421					&rl->rl_recs[middle - delta])) {
1422			*split_index = middle - delta;
1423			break;
1424		}
1425
1426		/* For even counts, don't walk off the end */
1427		if ((middle + delta + 1) == num_used)
1428			continue;
1429
1430		/* Now try delta past middle */
1431		if (ocfs2_refcount_rec_no_intersect(
1432					&rl->rl_recs[middle + delta],
1433					&rl->rl_recs[middle + delta + 1])) {
1434			*split_index = middle + delta + 1;
1435			break;
1436		}
1437	}
1438
1439	if (delta >= middle)
1440		return -ENOSPC;
1441
1442	*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
1443	return 0;
1444}
1445
1446static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1447					    struct buffer_head *new_bh,
1448					    u32 *split_cpos)
1449{
1450	int split_index = 0, num_moved, ret;
1451	u32 cpos = 0;
1452	struct ocfs2_refcount_block *rb =
1453			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1454	struct ocfs2_refcount_list *rl = &rb->rf_records;
1455	struct ocfs2_refcount_block *new_rb =
1456			(struct ocfs2_refcount_block *)new_bh->b_data;
1457	struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1458
1459	trace_ocfs2_divide_leaf_refcount_block(
1460		(unsigned long long)ref_leaf_bh->b_blocknr,
1461		le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
1462
1463	/*
1464	 * XXX: Improvement later.
1465	 * If we know all the high 32 bit cpos is the same, no need to sort.
1466	 *
1467	 * In order to make the whole process safe, we do:
1468	 * 1. sort the entries by their low 32 bit cpos first so that we can
1469	 *    find the split cpos easily.
1470	 * 2. call ocfs2_insert_extent to insert the new refcount block.
1471	 * 3. move the refcount rec to the new block.
1472	 * 4. sort the entries by their 64 bit cpos.
1473	 * 5. dirty the new_rb and rb.
1474	 */
1475	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1476	     sizeof(struct ocfs2_refcount_rec),
1477	     cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
1478
1479	ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1480	if (ret) {
1481		mlog_errno(ret);
1482		return ret;
1483	}
1484
1485	new_rb->rf_cpos = cpu_to_le32(cpos);
1486
1487	/* move refcount records starting from split_index to the new block. */
1488	num_moved = le16_to_cpu(rl->rl_used) - split_index;
1489	memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
1490	       num_moved * sizeof(struct ocfs2_refcount_rec));
1491
1492	/*ok, remove the entries we just moved over to the other block. */
1493	memset(&rl->rl_recs[split_index], 0,
1494	       num_moved * sizeof(struct ocfs2_refcount_rec));
1495
1496	/* change old and new rl_used accordingly. */
1497	le16_add_cpu(&rl->rl_used, -num_moved);
1498	new_rl->rl_used = cpu_to_le16(num_moved);
1499
1500	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1501	     sizeof(struct ocfs2_refcount_rec),
1502	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1503
1504	sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
1505	     sizeof(struct ocfs2_refcount_rec),
1506	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1507
1508	*split_cpos = cpos;
1509	return 0;
1510}
1511
1512static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1513					 struct ocfs2_caching_info *ci,
1514					 struct buffer_head *ref_root_bh,
1515					 struct buffer_head *ref_leaf_bh,
1516					 struct ocfs2_alloc_context *meta_ac)
1517{
1518	int ret;
1519	u16 suballoc_bit_start;
1520	u32 num_got, new_cpos;
1521	u64 suballoc_loc, blkno;
1522	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1523	struct ocfs2_refcount_block *root_rb =
1524			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1525	struct buffer_head *new_bh = NULL;
1526	struct ocfs2_refcount_block *new_rb;
1527	struct ocfs2_extent_tree ref_et;
1528
1529	BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
1530
1531	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1532				      OCFS2_JOURNAL_ACCESS_WRITE);
1533	if (ret) {
1534		mlog_errno(ret);
1535		goto out;
1536	}
1537
1538	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1539				      OCFS2_JOURNAL_ACCESS_WRITE);
1540	if (ret) {
1541		mlog_errno(ret);
1542		goto out;
1543	}
1544
1545	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1546				   &suballoc_bit_start, &num_got,
1547				   &blkno);
1548	if (ret) {
1549		mlog_errno(ret);
1550		goto out;
1551	}
1552
1553	new_bh = sb_getblk(sb, blkno);
1554	if (new_bh == NULL) {
1555		ret = -ENOMEM;
1556		mlog_errno(ret);
1557		goto out;
1558	}
1559	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1560
1561	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1562				      OCFS2_JOURNAL_ACCESS_CREATE);
1563	if (ret) {
1564		mlog_errno(ret);
1565		goto out;
1566	}
1567
1568	/* Initialize ocfs2_refcount_block. */
1569	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1570	memset(new_rb, 0, sb->s_blocksize);
1571	strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1572	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1573	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1574	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1575	new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1576	new_rb->rf_blkno = cpu_to_le64(blkno);
1577	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1578	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1579	new_rb->rf_records.rl_count =
1580				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
1581	new_rb->rf_generation = root_rb->rf_generation;
1582
1583	ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
1584	if (ret) {
1585		mlog_errno(ret);
1586		goto out;
1587	}
1588
1589	ocfs2_journal_dirty(handle, ref_leaf_bh);
1590	ocfs2_journal_dirty(handle, new_bh);
1591
1592	ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1593
1594	trace_ocfs2_new_leaf_refcount_block(
1595			(unsigned long long)new_bh->b_blocknr, new_cpos);
1596
1597	/* Insert the new leaf block with the specific offset cpos. */
1598	ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1599				  1, 0, meta_ac);
1600	if (ret)
1601		mlog_errno(ret);
1602
1603out:
1604	brelse(new_bh);
1605	return ret;
1606}
1607
1608static int ocfs2_expand_refcount_tree(handle_t *handle,
1609				      struct ocfs2_caching_info *ci,
1610				      struct buffer_head *ref_root_bh,
1611				      struct buffer_head *ref_leaf_bh,
1612				      struct ocfs2_alloc_context *meta_ac)
1613{
1614	int ret;
1615	struct buffer_head *expand_bh = NULL;
1616
1617	if (ref_root_bh == ref_leaf_bh) {
1618		/*
1619		 * the old root bh hasn't been expanded to a b-tree,
1620		 * so expand it first.
1621		 */
1622		ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
1623						   &expand_bh, meta_ac);
1624		if (ret) {
1625			mlog_errno(ret);
1626			goto out;
1627		}
1628	} else {
1629		expand_bh = ref_leaf_bh;
1630		get_bh(expand_bh);
1631	}
1632
1633
1634	/* Now add a new refcount block into the tree.*/
1635	ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
1636					    expand_bh, meta_ac);
1637	if (ret)
1638		mlog_errno(ret);
1639out:
1640	brelse(expand_bh);
1641	return ret;
1642}
1643
1644/*
1645 * Adjust the extent rec in b-tree representing ref_leaf_bh.
1646 *
1647 * Only called when we have inserted a new refcount rec at index 0
1648 * which means ocfs2_extent_rec.e_cpos may need some change.
1649 */
1650static int ocfs2_adjust_refcount_rec(handle_t *handle,
1651				     struct ocfs2_caching_info *ci,
1652				     struct buffer_head *ref_root_bh,
1653				     struct buffer_head *ref_leaf_bh,
1654				     struct ocfs2_refcount_rec *rec)
1655{
1656	int ret = 0, i;
1657	u32 new_cpos, old_cpos;
1658	struct ocfs2_path *path = NULL;
1659	struct ocfs2_extent_tree et;
1660	struct ocfs2_refcount_block *rb =
1661		(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1662	struct ocfs2_extent_list *el;
1663
1664	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
1665		goto out;
1666
1667	rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1668	old_cpos = le32_to_cpu(rb->rf_cpos);
1669	new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
1670	if (old_cpos <= new_cpos)
1671		goto out;
1672
1673	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1674
1675	path = ocfs2_new_path_from_et(&et);
1676	if (!path) {
1677		ret = -ENOMEM;
1678		mlog_errno(ret);
1679		goto out;
1680	}
1681
1682	ret = ocfs2_find_path(ci, path, old_cpos);
1683	if (ret) {
1684		mlog_errno(ret);
1685		goto out;
1686	}
1687
1688	/*
1689	 * 2 more credits, one for the leaf refcount block, one for
1690	 * the extent block contains the extent rec.
1691	 */
1692	ret = ocfs2_extend_trans(handle, 2);
1693	if (ret < 0) {
1694		mlog_errno(ret);
1695		goto out;
1696	}
1697
1698	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1699				      OCFS2_JOURNAL_ACCESS_WRITE);
1700	if (ret < 0) {
1701		mlog_errno(ret);
1702		goto out;
1703	}
1704
1705	ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
1706				      OCFS2_JOURNAL_ACCESS_WRITE);
1707	if (ret < 0) {
1708		mlog_errno(ret);
1709		goto out;
1710	}
1711
1712	/* change the leaf extent block first. */
1713	el = path_leaf_el(path);
1714
1715	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
1716		if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
1717			break;
1718
1719	BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
1720
1721	el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
1722
1723	/* change the r_cpos in the leaf block. */
1724	rb->rf_cpos = cpu_to_le32(new_cpos);
1725
1726	ocfs2_journal_dirty(handle, path_leaf_bh(path));
1727	ocfs2_journal_dirty(handle, ref_leaf_bh);
1728
1729out:
1730	ocfs2_free_path(path);
1731	return ret;
1732}
1733
1734static int ocfs2_insert_refcount_rec(handle_t *handle,
1735				     struct ocfs2_caching_info *ci,
1736				     struct buffer_head *ref_root_bh,
1737				     struct buffer_head *ref_leaf_bh,
1738				     struct ocfs2_refcount_rec *rec,
1739				     int index, int merge,
1740				     struct ocfs2_alloc_context *meta_ac)
1741{
1742	int ret;
1743	struct ocfs2_refcount_block *rb =
1744			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1745	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1746	struct buffer_head *new_bh = NULL;
1747
1748	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1749
1750	if (rf_list->rl_used == rf_list->rl_count) {
1751		u64 cpos = le64_to_cpu(rec->r_cpos);
1752		u32 len = le32_to_cpu(rec->r_clusters);
1753
1754		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1755						 ref_leaf_bh, meta_ac);
1756		if (ret) {
1757			mlog_errno(ret);
1758			goto out;
1759		}
1760
1761		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1762					     cpos, len, NULL, &index,
1763					     &new_bh);
1764		if (ret) {
1765			mlog_errno(ret);
1766			goto out;
1767		}
1768
1769		ref_leaf_bh = new_bh;
1770		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1771		rf_list = &rb->rf_records;
1772	}
1773
1774	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1775				      OCFS2_JOURNAL_ACCESS_WRITE);
1776	if (ret) {
1777		mlog_errno(ret);
1778		goto out;
1779	}
1780
1781	if (index < le16_to_cpu(rf_list->rl_used))
1782		memmove(&rf_list->rl_recs[index + 1],
1783			&rf_list->rl_recs[index],
1784			(le16_to_cpu(rf_list->rl_used) - index) *
1785			 sizeof(struct ocfs2_refcount_rec));
1786
1787	trace_ocfs2_insert_refcount_rec(
1788		(unsigned long long)ref_leaf_bh->b_blocknr, index,
1789		(unsigned long long)le64_to_cpu(rec->r_cpos),
1790		le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
1791
1792	rf_list->rl_recs[index] = *rec;
1793
1794	le16_add_cpu(&rf_list->rl_used, 1);
1795
1796	if (merge)
1797		ocfs2_refcount_rec_merge(rb, index);
1798
1799	ocfs2_journal_dirty(handle, ref_leaf_bh);
1800
1801	if (index == 0) {
1802		ret = ocfs2_adjust_refcount_rec(handle, ci,
1803						ref_root_bh,
1804						ref_leaf_bh, rec);
1805		if (ret)
1806			mlog_errno(ret);
1807	}
1808out:
1809	brelse(new_bh);
1810	return ret;
1811}
1812
1813/*
1814 * Split the refcount_rec indexed by "index" in ref_leaf_bh.
1815 * This is much simple than our b-tree code.
1816 * split_rec is the new refcount rec we want to insert.
1817 * If split_rec->r_refcount > 0, we are changing the refcount(in case we
1818 * increase refcount or decrease a refcount to non-zero).
1819 * If split_rec->r_refcount == 0, we are punching a hole in current refcount
1820 * rec( in case we decrease a refcount to zero).
1821 */
1822static int ocfs2_split_refcount_rec(handle_t *handle,
1823				    struct ocfs2_caching_info *ci,
1824				    struct buffer_head *ref_root_bh,
1825				    struct buffer_head *ref_leaf_bh,
1826				    struct ocfs2_refcount_rec *split_rec,
1827				    int index, int merge,
1828				    struct ocfs2_alloc_context *meta_ac,
1829				    struct ocfs2_cached_dealloc_ctxt *dealloc)
1830{
1831	int ret, recs_need;
1832	u32 len;
1833	struct ocfs2_refcount_block *rb =
1834			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1835	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1836	struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
1837	struct ocfs2_refcount_rec *tail_rec = NULL;
1838	struct buffer_head *new_bh = NULL;
1839
1840	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1841
1842	trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
1843		le32_to_cpu(orig_rec->r_clusters),
1844		le32_to_cpu(orig_rec->r_refcount),
1845		le64_to_cpu(split_rec->r_cpos),
1846		le32_to_cpu(split_rec->r_clusters),
1847		le32_to_cpu(split_rec->r_refcount));
1848
1849	/*
1850	 * If we just need to split the header or tail clusters,
1851	 * no more recs are needed, just split is OK.
1852	 * Otherwise we at least need one new recs.
1853	 */
1854	if (!split_rec->r_refcount &&
1855	    (split_rec->r_cpos == orig_rec->r_cpos ||
1856	     le64_to_cpu(split_rec->r_cpos) +
1857	     le32_to_cpu(split_rec->r_clusters) ==
1858	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1859		recs_need = 0;
1860	else
1861		recs_need = 1;
1862
1863	/*
1864	 * We need one more rec if we split in the middle and the new rec have
1865	 * some refcount in it.
1866	 */
1867	if (split_rec->r_refcount &&
1868	    (split_rec->r_cpos != orig_rec->r_cpos &&
1869	     le64_to_cpu(split_rec->r_cpos) +
1870	     le32_to_cpu(split_rec->r_clusters) !=
1871	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1872		recs_need++;
1873
1874	/* If the leaf block don't have enough record, expand it. */
1875	if (le16_to_cpu(rf_list->rl_used) + recs_need >
1876					 le16_to_cpu(rf_list->rl_count)) {
1877		struct ocfs2_refcount_rec tmp_rec;
1878		u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1879		len = le32_to_cpu(orig_rec->r_clusters);
1880		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1881						 ref_leaf_bh, meta_ac);
1882		if (ret) {
1883			mlog_errno(ret);
1884			goto out;
1885		}
1886
1887		/*
1888		 * We have to re-get it since now cpos may be moved to
1889		 * another leaf block.
1890		 */
1891		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1892					     cpos, len, &tmp_rec, &index,
1893					     &new_bh);
1894		if (ret) {
1895			mlog_errno(ret);
1896			goto out;
1897		}
1898
1899		ref_leaf_bh = new_bh;
1900		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1901		rf_list = &rb->rf_records;
1902		orig_rec = &rf_list->rl_recs[index];
1903	}
1904
1905	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1906				      OCFS2_JOURNAL_ACCESS_WRITE);
1907	if (ret) {
1908		mlog_errno(ret);
1909		goto out;
1910	}
1911
1912	/*
1913	 * We have calculated out how many new records we need and store
1914	 * in recs_need, so spare enough space first by moving the records
1915	 * after "index" to the end.
1916	 */
1917	if (index != le16_to_cpu(rf_list->rl_used) - 1)
1918		memmove(&rf_list->rl_recs[index + 1 + recs_need],
1919			&rf_list->rl_recs[index + 1],
1920			(le16_to_cpu(rf_list->rl_used) - index - 1) *
1921			 sizeof(struct ocfs2_refcount_rec));
1922
1923	len = (le64_to_cpu(orig_rec->r_cpos) +
1924	      le32_to_cpu(orig_rec->r_clusters)) -
1925	      (le64_to_cpu(split_rec->r_cpos) +
1926	      le32_to_cpu(split_rec->r_clusters));
1927
1928	/*
1929	 * If we have "len", the we will split in the tail and move it
1930	 * to the end of the space we have just spared.
1931	 */
1932	if (len) {
1933		tail_rec = &rf_list->rl_recs[index + recs_need];
1934
1935		memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
1936		le64_add_cpu(&tail_rec->r_cpos,
1937			     le32_to_cpu(tail_rec->r_clusters) - len);
1938		tail_rec->r_clusters = cpu_to_le32(len);
1939	}
1940
1941	/*
1942	 * If the split pos isn't the same as the original one, we need to
1943	 * split in the head.
1944	 *
1945	 * Note: We have the chance that split_rec.r_refcount = 0,
1946	 * recs_need = 0 and len > 0, which means we just cut the head from
1947	 * the orig_rec and in that case we have done some modification in
1948	 * orig_rec above, so the check for r_cpos is faked.
1949	 */
1950	if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
1951		len = le64_to_cpu(split_rec->r_cpos) -
1952		      le64_to_cpu(orig_rec->r_cpos);
1953		orig_rec->r_clusters = cpu_to_le32(len);
1954		index++;
1955	}
1956
1957	le16_add_cpu(&rf_list->rl_used, recs_need);
1958
1959	if (split_rec->r_refcount) {
1960		rf_list->rl_recs[index] = *split_rec;
1961		trace_ocfs2_split_refcount_rec_insert(
1962			(unsigned long long)ref_leaf_bh->b_blocknr, index,
1963			(unsigned long long)le64_to_cpu(split_rec->r_cpos),
1964			le32_to_cpu(split_rec->r_clusters),
1965			le32_to_cpu(split_rec->r_refcount));
1966
1967		if (merge)
1968			ocfs2_refcount_rec_merge(rb, index);
1969	}
1970
1971	ocfs2_journal_dirty(handle, ref_leaf_bh);
1972
1973out:
1974	brelse(new_bh);
1975	return ret;
1976}
1977
1978static int __ocfs2_increase_refcount(handle_t *handle,
1979				     struct ocfs2_caching_info *ci,
1980				     struct buffer_head *ref_root_bh,
1981				     u64 cpos, u32 len, int merge,
1982				     struct ocfs2_alloc_context *meta_ac,
1983				     struct ocfs2_cached_dealloc_ctxt *dealloc)
1984{
1985	int ret = 0, index;
1986	struct buffer_head *ref_leaf_bh = NULL;
1987	struct ocfs2_refcount_rec rec;
1988	unsigned int set_len = 0;
1989
1990	trace_ocfs2_increase_refcount_begin(
1991	     (unsigned long long)ocfs2_metadata_cache_owner(ci),
1992	     (unsigned long long)cpos, len);
1993
1994	while (len) {
1995		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1996					     cpos, len, &rec, &index,
1997					     &ref_leaf_bh);
1998		if (ret) {
1999			mlog_errno(ret);
2000			goto out;
2001		}
2002
2003		set_len = le32_to_cpu(rec.r_clusters);
2004
2005		/*
2006		 * Here we may meet with 3 situations:
2007		 *
2008		 * 1. If we find an already existing record, and the length
2009		 *    is the same, cool, we just need to increase the r_refcount
2010		 *    and it is OK.
2011		 * 2. If we find a hole, just insert it with r_refcount = 1.
2012		 * 3. If we are in the middle of one extent record, split
2013		 *    it.
2014		 */
2015		if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
2016		    set_len <= len) {
2017			trace_ocfs2_increase_refcount_change(
2018				(unsigned long long)cpos, set_len,
2019				le32_to_cpu(rec.r_refcount));
2020			ret = ocfs2_change_refcount_rec(handle, ci,
2021							ref_leaf_bh, index,
2022							merge, 1);
2023			if (ret) {
2024				mlog_errno(ret);
2025				goto out;
2026			}
2027		} else if (!rec.r_refcount) {
2028			rec.r_refcount = cpu_to_le32(1);
2029
2030			trace_ocfs2_increase_refcount_insert(
2031			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2032			     set_len);
2033			ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
2034							ref_leaf_bh,
2035							&rec, index,
2036							merge, meta_ac);
2037			if (ret) {
2038				mlog_errno(ret);
2039				goto out;
2040			}
2041		} else  {
2042			set_len = min((u64)(cpos + len),
2043				      le64_to_cpu(rec.r_cpos) + set_len) - cpos;
2044			rec.r_cpos = cpu_to_le64(cpos);
2045			rec.r_clusters = cpu_to_le32(set_len);
2046			le32_add_cpu(&rec.r_refcount, 1);
2047
2048			trace_ocfs2_increase_refcount_split(
2049			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2050			     set_len, le32_to_cpu(rec.r_refcount));
2051			ret = ocfs2_split_refcount_rec(handle, ci,
2052						       ref_root_bh, ref_leaf_bh,
2053						       &rec, index, merge,
2054						       meta_ac, dealloc);
2055			if (ret) {
2056				mlog_errno(ret);
2057				goto out;
2058			}
2059		}
2060
2061		cpos += set_len;
2062		len -= set_len;
2063		brelse(ref_leaf_bh);
2064		ref_leaf_bh = NULL;
2065	}
2066
2067out:
2068	brelse(ref_leaf_bh);
2069	return ret;
2070}
2071
2072static int ocfs2_remove_refcount_extent(handle_t *handle,
2073				struct ocfs2_caching_info *ci,
2074				struct buffer_head *ref_root_bh,
2075				struct buffer_head *ref_leaf_bh,
2076				struct ocfs2_alloc_context *meta_ac,
2077				struct ocfs2_cached_dealloc_ctxt *dealloc)
2078{
2079	int ret;
2080	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2081	struct ocfs2_refcount_block *rb =
2082			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2083	struct ocfs2_extent_tree et;
2084
2085	BUG_ON(rb->rf_records.rl_used);
2086
2087	trace_ocfs2_remove_refcount_extent(
2088		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2089		(unsigned long long)ref_leaf_bh->b_blocknr,
2090		le32_to_cpu(rb->rf_cpos));
2091
2092	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2093	ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
2094				  1, meta_ac, dealloc);
2095	if (ret) {
2096		mlog_errno(ret);
2097		goto out;
2098	}
2099
2100	ocfs2_remove_from_cache(ci, ref_leaf_bh);
2101
2102	/*
2103	 * add the freed block to the dealloc so that it will be freed
2104	 * when we run dealloc.
2105	 */
2106	ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
2107					le16_to_cpu(rb->rf_suballoc_slot),
2108					le64_to_cpu(rb->rf_suballoc_loc),
2109					le64_to_cpu(rb->rf_blkno),
2110					le16_to_cpu(rb->rf_suballoc_bit));
2111	if (ret) {
2112		mlog_errno(ret);
2113		goto out;
2114	}
2115
2116	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
2117				      OCFS2_JOURNAL_ACCESS_WRITE);
2118	if (ret) {
2119		mlog_errno(ret);
2120		goto out;
2121	}
2122
2123	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2124
2125	le32_add_cpu(&rb->rf_clusters, -1);
2126
2127	/*
2128	 * check whether we need to restore the root refcount block if
2129	 * there is no leaf extent block at atll.
2130	 */
2131	if (!rb->rf_list.l_next_free_rec) {
2132		BUG_ON(rb->rf_clusters);
2133
2134		trace_ocfs2_restore_refcount_block(
2135		     (unsigned long long)ref_root_bh->b_blocknr);
2136
2137		rb->rf_flags = 0;
2138		rb->rf_parent = 0;
2139		rb->rf_cpos = 0;
2140		memset(&rb->rf_records, 0, sb->s_blocksize -
2141		       offsetof(struct ocfs2_refcount_block, rf_records));
2142		rb->rf_records.rl_count =
2143				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
2144	}
2145
2146	ocfs2_journal_dirty(handle, ref_root_bh);
2147
2148out:
2149	return ret;
2150}
2151
2152int ocfs2_increase_refcount(handle_t *handle,
2153			    struct ocfs2_caching_info *ci,
2154			    struct buffer_head *ref_root_bh,
2155			    u64 cpos, u32 len,
2156			    struct ocfs2_alloc_context *meta_ac,
2157			    struct ocfs2_cached_dealloc_ctxt *dealloc)
2158{
2159	return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
2160					 cpos, len, 1,
2161					 meta_ac, dealloc);
2162}
2163
2164static int ocfs2_decrease_refcount_rec(handle_t *handle,
2165				struct ocfs2_caching_info *ci,
2166				struct buffer_head *ref_root_bh,
2167				struct buffer_head *ref_leaf_bh,
2168				int index, u64 cpos, unsigned int len,
2169				struct ocfs2_alloc_context *meta_ac,
2170				struct ocfs2_cached_dealloc_ctxt *dealloc)
2171{
2172	int ret;
2173	struct ocfs2_refcount_block *rb =
2174			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2175	struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
2176
2177	BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2178	BUG_ON(cpos + len >
2179	       le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2180
2181	trace_ocfs2_decrease_refcount_rec(
2182		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2183		(unsigned long long)cpos, len);
2184
2185	if (cpos == le64_to_cpu(rec->r_cpos) &&
2186	    len == le32_to_cpu(rec->r_clusters))
2187		ret = ocfs2_change_refcount_rec(handle, ci,
2188						ref_leaf_bh, index, 1, -1);
2189	else {
2190		struct ocfs2_refcount_rec split = *rec;
2191		split.r_cpos = cpu_to_le64(cpos);
2192		split.r_clusters = cpu_to_le32(len);
2193
2194		le32_add_cpu(&split.r_refcount, -1);
2195
2196		ret = ocfs2_split_refcount_rec(handle, ci,
2197					       ref_root_bh, ref_leaf_bh,
2198					       &split, index, 1,
2199					       meta_ac, dealloc);
2200	}
2201
2202	if (ret) {
2203		mlog_errno(ret);
2204		goto out;
2205	}
2206
2207	/* Remove the leaf refcount block if it contains no refcount record. */
2208	if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
2209		ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
2210						   ref_leaf_bh, meta_ac,
2211						   dealloc);
2212		if (ret)
2213			mlog_errno(ret);
2214	}
2215
2216out:
2217	return ret;
2218}
2219
2220static int __ocfs2_decrease_refcount(handle_t *handle,
2221				     struct ocfs2_caching_info *ci,
2222				     struct buffer_head *ref_root_bh,
2223				     u64 cpos, u32 len,
2224				     struct ocfs2_alloc_context *meta_ac,
2225				     struct ocfs2_cached_dealloc_ctxt *dealloc,
2226				     int delete)
2227{
2228	int ret = 0, index = 0;
2229	struct ocfs2_refcount_rec rec;
2230	unsigned int r_count = 0, r_len;
2231	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2232	struct buffer_head *ref_leaf_bh = NULL;
2233
2234	trace_ocfs2_decrease_refcount(
2235		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2236		(unsigned long long)cpos, len, delete);
2237
2238	while (len) {
2239		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2240					     cpos, len, &rec, &index,
2241					     &ref_leaf_bh);
2242		if (ret) {
2243			mlog_errno(ret);
2244			goto out;
2245		}
2246
2247		r_count = le32_to_cpu(rec.r_refcount);
2248		BUG_ON(r_count == 0);
2249		if (!delete)
2250			BUG_ON(r_count > 1);
2251
2252		r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2253			      le32_to_cpu(rec.r_clusters)) - cpos;
2254
2255		ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
2256						  ref_leaf_bh, index,
2257						  cpos, r_len,
2258						  meta_ac, dealloc);
2259		if (ret) {
2260			mlog_errno(ret);
2261			goto out;
2262		}
2263
2264		if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
2265			ret = ocfs2_cache_cluster_dealloc(dealloc,
2266					  ocfs2_clusters_to_blocks(sb, cpos),
2267							  r_len);
2268			if (ret) {
2269				mlog_errno(ret);
2270				goto out;
2271			}
2272		}
2273
2274		cpos += r_len;
2275		len -= r_len;
2276		brelse(ref_leaf_bh);
2277		ref_leaf_bh = NULL;
2278	}
2279
2280out:
2281	brelse(ref_leaf_bh);
2282	return ret;
2283}
2284
2285/* Caller must hold refcount tree lock. */
2286int ocfs2_decrease_refcount(struct inode *inode,
2287			    handle_t *handle, u32 cpos, u32 len,
2288			    struct ocfs2_alloc_context *meta_ac,
2289			    struct ocfs2_cached_dealloc_ctxt *dealloc,
2290			    int delete)
2291{
2292	int ret;
2293	u64 ref_blkno;
 
2294	struct buffer_head *ref_root_bh = NULL;
2295	struct ocfs2_refcount_tree *tree;
2296
2297	BUG_ON(!ocfs2_is_refcount_inode(inode));
2298
2299	ret = ocfs2_get_refcount_block(inode, &ref_blkno);
2300	if (ret) {
2301		mlog_errno(ret);
2302		goto out;
2303	}
2304
2305	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
2306	if (ret) {
2307		mlog_errno(ret);
2308		goto out;
2309	}
2310
2311	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
2312					&ref_root_bh);
2313	if (ret) {
2314		mlog_errno(ret);
2315		goto out;
2316	}
2317
2318	ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
2319					cpos, len, meta_ac, dealloc, delete);
2320	if (ret)
2321		mlog_errno(ret);
2322out:
2323	brelse(ref_root_bh);
2324	return ret;
2325}
2326
2327/*
2328 * Mark the already-existing extent at cpos as refcounted for len clusters.
2329 * This adds the refcount extent flag.
2330 *
2331 * If the existing extent is larger than the request, initiate a
2332 * split. An attempt will be made at merging with adjacent extents.
2333 *
2334 * The caller is responsible for passing down meta_ac if we'll need it.
2335 */
2336static int ocfs2_mark_extent_refcounted(struct inode *inode,
2337				struct ocfs2_extent_tree *et,
2338				handle_t *handle, u32 cpos,
2339				u32 len, u32 phys,
2340				struct ocfs2_alloc_context *meta_ac,
2341				struct ocfs2_cached_dealloc_ctxt *dealloc)
2342{
2343	int ret;
2344
2345	trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
2346					   cpos, len, phys);
2347
2348	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2349		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
2350				  inode->i_ino);
 
 
2351		goto out;
2352	}
2353
2354	ret = ocfs2_change_extent_flag(handle, et, cpos,
2355				       len, phys, meta_ac, dealloc,
2356				       OCFS2_EXT_REFCOUNTED, 0);
2357	if (ret)
2358		mlog_errno(ret);
2359
2360out:
2361	return ret;
2362}
2363
2364/*
2365 * Given some contiguous physical clusters, calculate what we need
2366 * for modifying their refcount.
2367 */
2368static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2369					    struct ocfs2_caching_info *ci,
2370					    struct buffer_head *ref_root_bh,
2371					    u64 start_cpos,
2372					    u32 clusters,
2373					    int *meta_add,
2374					    int *credits)
2375{
2376	int ret = 0, index, ref_blocks = 0, recs_add = 0;
2377	u64 cpos = start_cpos;
2378	struct ocfs2_refcount_block *rb;
2379	struct ocfs2_refcount_rec rec;
2380	struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2381	u32 len;
2382
2383	while (clusters) {
2384		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2385					     cpos, clusters, &rec,
2386					     &index, &ref_leaf_bh);
2387		if (ret) {
2388			mlog_errno(ret);
2389			goto out;
2390		}
2391
2392		if (ref_leaf_bh != prev_bh) {
2393			/*
2394			 * Now we encounter a new leaf block, so calculate
2395			 * whether we need to extend the old leaf.
2396			 */
2397			if (prev_bh) {
2398				rb = (struct ocfs2_refcount_block *)
2399							prev_bh->b_data;
2400
2401				if (le16_to_cpu(rb->rf_records.rl_used) +
2402				    recs_add >
2403				    le16_to_cpu(rb->rf_records.rl_count))
2404					ref_blocks++;
2405			}
2406
2407			recs_add = 0;
2408			*credits += 1;
2409			brelse(prev_bh);
2410			prev_bh = ref_leaf_bh;
2411			get_bh(prev_bh);
2412		}
2413
 
 
2414		trace_ocfs2_calc_refcount_meta_credits_iterate(
2415				recs_add, (unsigned long long)cpos, clusters,
2416				(unsigned long long)le64_to_cpu(rec.r_cpos),
2417				le32_to_cpu(rec.r_clusters),
2418				le32_to_cpu(rec.r_refcount), index);
2419
2420		len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2421			  le32_to_cpu(rec.r_clusters)) - cpos;
2422		/*
2423		 * We record all the records which will be inserted to the
2424		 * same refcount block, so that we can tell exactly whether
2425		 * we need a new refcount block or not.
2426		 *
2427		 * If we will insert a new one, this is easy and only happens
2428		 * during adding refcounted flag to the extent, so we don't
2429		 * have a chance of spliting. We just need one record.
2430		 *
2431		 * If the refcount rec already exists, that would be a little
2432		 * complicated. we may have to:
2433		 * 1) split at the beginning if the start pos isn't aligned.
2434		 *    we need 1 more record in this case.
2435		 * 2) split int the end if the end pos isn't aligned.
2436		 *    we need 1 more record in this case.
2437		 * 3) split in the middle because of file system fragmentation.
2438		 *    we need 2 more records in this case(we can't detect this
2439		 *    beforehand, so always think of the worst case).
2440		 */
2441		if (rec.r_refcount) {
2442			recs_add += 2;
2443			/* Check whether we need a split at the beginning. */
2444			if (cpos == start_cpos &&
2445			    cpos != le64_to_cpu(rec.r_cpos))
2446				recs_add++;
2447
2448			/* Check whether we need a split in the end. */
2449			if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2450			    le32_to_cpu(rec.r_clusters))
2451				recs_add++;
2452		} else
2453			recs_add++;
2454
2455		brelse(ref_leaf_bh);
2456		ref_leaf_bh = NULL;
2457		clusters -= len;
2458		cpos += len;
2459	}
2460
2461	if (prev_bh) {
2462		rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
2463
2464		if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
2465		    le16_to_cpu(rb->rf_records.rl_count))
2466			ref_blocks++;
2467
2468		*credits += 1;
2469	}
2470
2471	if (!ref_blocks)
2472		goto out;
2473
2474	*meta_add += ref_blocks;
2475	*credits += ref_blocks;
2476
2477	/*
2478	 * So we may need ref_blocks to insert into the tree.
2479	 * That also means we need to change the b-tree and add that number
2480	 * of records since we never merge them.
2481	 * We need one more block for expansion since the new created leaf
2482	 * block is also full and needs split.
2483	 */
2484	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2485	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
2486		struct ocfs2_extent_tree et;
2487
2488		ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2489		*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2490		*credits += ocfs2_calc_extend_credits(sb,
2491						      et.et_root_el);
2492	} else {
2493		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2494		*meta_add += 1;
2495	}
2496
2497out:
2498
2499	trace_ocfs2_calc_refcount_meta_credits(
2500		(unsigned long long)start_cpos, clusters,
2501		*meta_add, *credits);
2502	brelse(ref_leaf_bh);
2503	brelse(prev_bh);
2504	return ret;
2505}
2506
2507/*
2508 * For refcount tree, we will decrease some contiguous clusters
2509 * refcount count, so just go through it to see how many blocks
2510 * we gonna touch and whether we need to create new blocks.
2511 *
2512 * Normally the refcount blocks store these refcount should be
2513 * contiguous also, so that we can get the number easily.
2514 * We will at most add split 2 refcount records and 2 more
2515 * refcount blocks, so just check it in a rough way.
2516 *
2517 * Caller must hold refcount tree lock.
2518 */
2519int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2520					  u64 refcount_loc,
2521					  u64 phys_blkno,
2522					  u32 clusters,
2523					  int *credits,
2524					  int *ref_blocks)
2525{
2526	int ret;
 
2527	struct buffer_head *ref_root_bh = NULL;
2528	struct ocfs2_refcount_tree *tree;
2529	u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
2530
2531	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2532		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
2533				  inode->i_ino);
 
 
2534		goto out;
2535	}
2536
2537	BUG_ON(!ocfs2_is_refcount_inode(inode));
2538
2539	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
2540				      refcount_loc, &tree);
2541	if (ret) {
2542		mlog_errno(ret);
2543		goto out;
2544	}
2545
2546	ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
2547					&ref_root_bh);
2548	if (ret) {
2549		mlog_errno(ret);
2550		goto out;
2551	}
2552
2553	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
2554					       &tree->rf_ci,
2555					       ref_root_bh,
2556					       start_cpos, clusters,
2557					       ref_blocks, credits);
2558	if (ret) {
2559		mlog_errno(ret);
2560		goto out;
2561	}
2562
2563	trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
2564
2565out:
2566	brelse(ref_root_bh);
2567	return ret;
2568}
2569
2570#define	MAX_CONTIG_BYTES	1048576
2571
2572static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
2573{
2574	return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
2575}
2576
2577static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
2578{
2579	return ~(ocfs2_cow_contig_clusters(sb) - 1);
2580}
2581
2582/*
2583 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2584 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2585 * while still being less than or equal to it.
2586 *
2587 * The goal is to break the extent at a multiple of contig_clusters.
2588 */
2589static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
2590						 unsigned int start,
2591						 unsigned int cpos)
2592{
2593	BUG_ON(start > cpos);
2594
2595	return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2596}
2597
2598/*
2599 * Given a cluster count of len, pad it out so that it is a multiple
2600 * of contig_clusters.
2601 */
2602static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
2603						  unsigned int len)
2604{
2605	unsigned int padded =
2606		(len + (ocfs2_cow_contig_clusters(sb) - 1)) &
2607		ocfs2_cow_contig_mask(sb);
2608
2609	/* Did we wrap? */
2610	if (padded < len)
2611		padded = UINT_MAX;
2612
2613	return padded;
2614}
2615
2616/*
2617 * Calculate out the start and number of virtual clusters we need to to CoW.
2618 *
2619 * cpos is vitual start cluster position we want to do CoW in a
2620 * file and write_len is the cluster length.
2621 * max_cpos is the place where we want to stop CoW intentionally.
2622 *
2623 * Normal we will start CoW from the beginning of extent record cotaining cpos.
2624 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
2625 * get good I/O from the resulting extent tree.
2626 */
2627static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
2628					   struct ocfs2_extent_list *el,
2629					   u32 cpos,
2630					   u32 write_len,
2631					   u32 max_cpos,
2632					   u32 *cow_start,
2633					   u32 *cow_len)
2634{
2635	int ret = 0;
2636	int tree_height = le16_to_cpu(el->l_tree_depth), i;
2637	struct buffer_head *eb_bh = NULL;
2638	struct ocfs2_extent_block *eb = NULL;
2639	struct ocfs2_extent_rec *rec;
2640	unsigned int want_clusters, rec_end = 0;
2641	int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
2642	int leaf_clusters;
2643
2644	BUG_ON(cpos + write_len > max_cpos);
2645
2646	if (tree_height > 0) {
2647		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2648		if (ret) {
2649			mlog_errno(ret);
2650			goto out;
2651		}
2652
2653		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2654		el = &eb->h_list;
2655
2656		if (el->l_tree_depth) {
2657			ret = ocfs2_error(inode->i_sb,
2658					  "Inode %lu has non zero tree depth in leaf block %llu\n",
2659					  inode->i_ino,
2660					  (unsigned long long)eb_bh->b_blocknr);
 
2661			goto out;
2662		}
2663	}
2664
2665	*cow_len = 0;
2666	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2667		rec = &el->l_recs[i];
2668
2669		if (ocfs2_is_empty_extent(rec)) {
2670			mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
2671					"index %d\n", inode->i_ino, i);
2672			continue;
2673		}
2674
2675		if (le32_to_cpu(rec->e_cpos) +
2676		    le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2677			continue;
2678
2679		if (*cow_len == 0) {
2680			/*
2681			 * We should find a refcounted record in the
2682			 * first pass.
2683			 */
2684			BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
2685			*cow_start = le32_to_cpu(rec->e_cpos);
2686		}
2687
2688		/*
2689		 * If we encounter a hole, a non-refcounted record or
2690		 * pass the max_cpos, stop the search.
2691		 */
2692		if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
2693		    (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
2694		    (max_cpos <= le32_to_cpu(rec->e_cpos)))
2695			break;
2696
2697		leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
2698		rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
2699		if (rec_end > max_cpos) {
2700			rec_end = max_cpos;
2701			leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
2702		}
2703
2704		/*
2705		 * How many clusters do we actually need from
2706		 * this extent?  First we see how many we actually
2707		 * need to complete the write.  If that's smaller
2708		 * than contig_clusters, we try for contig_clusters.
2709		 */
2710		if (!*cow_len)
2711			want_clusters = write_len;
2712		else
2713			want_clusters = (cpos + write_len) -
2714				(*cow_start + *cow_len);
2715		if (want_clusters < contig_clusters)
2716			want_clusters = contig_clusters;
2717
2718		/*
2719		 * If the write does not cover the whole extent, we
2720		 * need to calculate how we're going to split the extent.
2721		 * We try to do it on contig_clusters boundaries.
2722		 *
2723		 * Any extent smaller than contig_clusters will be
2724		 * CoWed in its entirety.
2725		 */
2726		if (leaf_clusters <= contig_clusters)
2727			*cow_len += leaf_clusters;
2728		else if (*cow_len || (*cow_start == cpos)) {
2729			/*
2730			 * This extent needs to be CoW'd from its
2731			 * beginning, so all we have to do is compute
2732			 * how many clusters to grab.  We align
2733			 * want_clusters to the edge of contig_clusters
2734			 * to get better I/O.
2735			 */
2736			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2737							       want_clusters);
2738
2739			if (leaf_clusters < want_clusters)
2740				*cow_len += leaf_clusters;
2741			else
2742				*cow_len += want_clusters;
2743		} else if ((*cow_start + contig_clusters) >=
2744			   (cpos + write_len)) {
2745			/*
2746			 * Breaking off contig_clusters at the front
2747			 * of the extent will cover our write.  That's
2748			 * easy.
2749			 */
2750			*cow_len = contig_clusters;
2751		} else if ((rec_end - cpos) <= contig_clusters) {
2752			/*
2753			 * Breaking off contig_clusters at the tail of
2754			 * this extent will cover cpos.
2755			 */
2756			*cow_start = rec_end - contig_clusters;
2757			*cow_len = contig_clusters;
2758		} else if ((rec_end - cpos) <= want_clusters) {
2759			/*
2760			 * While we can't fit the entire write in this
2761			 * extent, we know that the write goes from cpos
2762			 * to the end of the extent.  Break that off.
2763			 * We try to break it at some multiple of
2764			 * contig_clusters from the front of the extent.
2765			 * Failing that (ie, cpos is within
2766			 * contig_clusters of the front), we'll CoW the
2767			 * entire extent.
2768			 */
2769			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2770							   *cow_start, cpos);
2771			*cow_len = rec_end - *cow_start;
2772		} else {
2773			/*
2774			 * Ok, the entire write lives in the middle of
2775			 * this extent.  Let's try to slice the extent up
2776			 * nicely.  Optimally, our CoW region starts at
2777			 * m*contig_clusters from the beginning of the
2778			 * extent and goes for n*contig_clusters,
2779			 * covering the entire write.
2780			 */
2781			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2782							   *cow_start, cpos);
2783
2784			want_clusters = (cpos + write_len) - *cow_start;
2785			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2786							       want_clusters);
2787			if (*cow_start + want_clusters <= rec_end)
2788				*cow_len = want_clusters;
2789			else
2790				*cow_len = rec_end - *cow_start;
2791		}
2792
2793		/* Have we covered our entire write yet? */
2794		if ((*cow_start + *cow_len) >= (cpos + write_len))
2795			break;
2796
2797		/*
2798		 * If we reach the end of the extent block and don't get enough
2799		 * clusters, continue with the next extent block if possible.
2800		 */
2801		if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
2802		    eb && eb->h_next_leaf_blk) {
2803			brelse(eb_bh);
2804			eb_bh = NULL;
2805
2806			ret = ocfs2_read_extent_block(INODE_CACHE(inode),
2807					       le64_to_cpu(eb->h_next_leaf_blk),
2808					       &eb_bh);
2809			if (ret) {
2810				mlog_errno(ret);
2811				goto out;
2812			}
2813
2814			eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2815			el = &eb->h_list;
2816			i = -1;
2817		}
2818	}
2819
2820out:
2821	brelse(eb_bh);
2822	return ret;
2823}
2824
2825/*
2826 * Prepare meta_ac, data_ac and calculate credits when we want to add some
2827 * num_clusters in data_tree "et" and change the refcount for the old
2828 * clusters(starting form p_cluster) in the refcount tree.
2829 *
2830 * Note:
2831 * 1. since we may split the old tree, so we at most will need num_clusters + 2
2832 *    more new leaf records.
2833 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
2834 *    just give data_ac = NULL.
2835 */
2836static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2837					u32 p_cluster, u32 num_clusters,
2838					struct ocfs2_extent_tree *et,
2839					struct ocfs2_caching_info *ref_ci,
2840					struct buffer_head *ref_root_bh,
2841					struct ocfs2_alloc_context **meta_ac,
2842					struct ocfs2_alloc_context **data_ac,
2843					int *credits)
2844{
2845	int ret = 0, meta_add = 0;
2846	int num_free_extents = ocfs2_num_free_extents(et);
2847
2848	if (num_free_extents < 0) {
2849		ret = num_free_extents;
2850		mlog_errno(ret);
2851		goto out;
2852	}
2853
2854	if (num_free_extents < num_clusters + 2)
2855		meta_add =
2856			ocfs2_extend_meta_needed(et->et_root_el);
2857
2858	*credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
2859
2860	ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2861					       p_cluster, num_clusters,
2862					       &meta_add, credits);
2863	if (ret) {
2864		mlog_errno(ret);
2865		goto out;
2866	}
2867
2868	trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
2869	ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2870						meta_ac);
2871	if (ret) {
2872		mlog_errno(ret);
2873		goto out;
2874	}
2875
2876	if (data_ac) {
2877		ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
2878					     data_ac);
2879		if (ret)
2880			mlog_errno(ret);
2881	}
2882
2883out:
2884	if (ret) {
2885		if (*meta_ac) {
2886			ocfs2_free_alloc_context(*meta_ac);
2887			*meta_ac = NULL;
2888		}
2889	}
2890
2891	return ret;
2892}
2893
2894static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2895{
2896	BUG_ON(buffer_dirty(bh));
2897
2898	clear_buffer_mapped(bh);
2899
2900	return 0;
2901}
2902
2903int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2904				     struct inode *inode,
2905				     u32 cpos, u32 old_cluster,
2906				     u32 new_cluster, u32 new_len)
2907{
2908	int ret = 0, partial;
2909	struct super_block *sb = inode->i_sb;
2910	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2911	struct page *page;
2912	pgoff_t page_index;
2913	unsigned int from, to;
2914	loff_t offset, end, map_end;
2915	struct address_space *mapping = inode->i_mapping;
2916
2917	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2918					       new_cluster, new_len);
2919
 
 
 
2920	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2921	end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
2922	/*
2923	 * We only duplicate pages until we reach the page contains i_size - 1.
2924	 * So trim 'end' to i_size.
2925	 */
2926	if (end > i_size_read(inode))
2927		end = i_size_read(inode);
2928
2929	while (offset < end) {
2930		page_index = offset >> PAGE_SHIFT;
2931		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
2932		if (map_end > end)
2933			map_end = end;
2934
2935		/* from, to is the offset within the page. */
2936		from = offset & (PAGE_SIZE - 1);
2937		to = PAGE_SIZE;
2938		if (map_end & (PAGE_SIZE - 1))
2939			to = map_end & (PAGE_SIZE - 1);
2940
2941retry:
2942		page = find_or_create_page(mapping, page_index, GFP_NOFS);
2943		if (!page) {
2944			ret = -ENOMEM;
2945			mlog_errno(ret);
2946			break;
2947		}
2948
2949		/*
2950		 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
2951		 * page, so write it back.
2952		 */
2953		if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
2954			if (PageDirty(page)) {
2955				/*
2956				 * write_on_page will unlock the page on return
2957				 */
2958				ret = write_one_page(page);
2959				goto retry;
2960			}
2961		}
2962
2963		if (!PageUptodate(page)) {
2964			ret = block_read_full_page(page, ocfs2_get_block);
2965			if (ret) {
2966				mlog_errno(ret);
2967				goto unlock;
2968			}
2969			lock_page(page);
2970		}
2971
2972		if (page_has_buffers(page)) {
2973			ret = walk_page_buffers(handle, page_buffers(page),
2974						from, to, &partial,
2975						ocfs2_clear_cow_buffer);
2976			if (ret) {
2977				mlog_errno(ret);
2978				goto unlock;
2979			}
2980		}
2981
2982		ocfs2_map_and_dirty_page(inode,
2983					 handle, from, to,
2984					 page, 0, &new_block);
2985		mark_page_accessed(page);
2986unlock:
2987		unlock_page(page);
2988		put_page(page);
2989		page = NULL;
2990		offset = map_end;
2991		if (ret)
2992			break;
2993	}
2994
2995	return ret;
2996}
2997
2998int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
2999				    struct inode *inode,
3000				    u32 cpos, u32 old_cluster,
3001				    u32 new_cluster, u32 new_len)
3002{
3003	int ret = 0;
3004	struct super_block *sb = inode->i_sb;
3005	struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3006	int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
3007	u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
3008	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
3009	struct ocfs2_super *osb = OCFS2_SB(sb);
3010	struct buffer_head *old_bh = NULL;
3011	struct buffer_head *new_bh = NULL;
3012
3013	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
3014					       new_cluster, new_len);
3015
3016	for (i = 0; i < blocks; i++, old_block++, new_block++) {
3017		new_bh = sb_getblk(osb->sb, new_block);
3018		if (new_bh == NULL) {
3019			ret = -ENOMEM;
3020			mlog_errno(ret);
3021			break;
3022		}
3023
3024		ocfs2_set_new_buffer_uptodate(ci, new_bh);
3025
3026		ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
3027		if (ret) {
3028			mlog_errno(ret);
3029			break;
3030		}
3031
3032		ret = ocfs2_journal_access(handle, ci, new_bh,
3033					   OCFS2_JOURNAL_ACCESS_CREATE);
3034		if (ret) {
3035			mlog_errno(ret);
3036			break;
3037		}
3038
3039		memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
3040		ocfs2_journal_dirty(handle, new_bh);
3041
3042		brelse(new_bh);
3043		brelse(old_bh);
3044		new_bh = NULL;
3045		old_bh = NULL;
3046	}
3047
3048	brelse(new_bh);
3049	brelse(old_bh);
3050	return ret;
3051}
3052
3053static int ocfs2_clear_ext_refcount(handle_t *handle,
3054				    struct ocfs2_extent_tree *et,
3055				    u32 cpos, u32 p_cluster, u32 len,
3056				    unsigned int ext_flags,
3057				    struct ocfs2_alloc_context *meta_ac,
3058				    struct ocfs2_cached_dealloc_ctxt *dealloc)
3059{
3060	int ret, index;
3061	struct ocfs2_extent_rec replace_rec;
3062	struct ocfs2_path *path = NULL;
3063	struct ocfs2_extent_list *el;
3064	struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
3065	u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
3066
3067	trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
3068				       cpos, len, p_cluster, ext_flags);
3069
3070	memset(&replace_rec, 0, sizeof(replace_rec));
3071	replace_rec.e_cpos = cpu_to_le32(cpos);
3072	replace_rec.e_leaf_clusters = cpu_to_le16(len);
3073	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
3074								   p_cluster));
3075	replace_rec.e_flags = ext_flags;
3076	replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
3077
3078	path = ocfs2_new_path_from_et(et);
3079	if (!path) {
3080		ret = -ENOMEM;
3081		mlog_errno(ret);
3082		goto out;
3083	}
3084
3085	ret = ocfs2_find_path(et->et_ci, path, cpos);
3086	if (ret) {
3087		mlog_errno(ret);
3088		goto out;
3089	}
3090
3091	el = path_leaf_el(path);
3092
3093	index = ocfs2_search_extent_list(el, cpos);
3094	if (index == -1) {
3095		ret = ocfs2_error(sb,
3096				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
3097				  (unsigned long long)ino, cpos);
 
 
3098		goto out;
3099	}
3100
3101	ret = ocfs2_split_extent(handle, et, path, index,
3102				 &replace_rec, meta_ac, dealloc);
3103	if (ret)
3104		mlog_errno(ret);
3105
3106out:
3107	ocfs2_free_path(path);
3108	return ret;
3109}
3110
3111static int ocfs2_replace_clusters(handle_t *handle,
3112				  struct ocfs2_cow_context *context,
3113				  u32 cpos, u32 old,
3114				  u32 new, u32 len,
3115				  unsigned int ext_flags)
3116{
3117	int ret;
3118	struct ocfs2_caching_info *ci = context->data_et.et_ci;
3119	u64 ino = ocfs2_metadata_cache_owner(ci);
3120
3121	trace_ocfs2_replace_clusters((unsigned long long)ino,
3122				     cpos, old, new, len, ext_flags);
3123
3124	/*If the old clusters is unwritten, no need to duplicate. */
3125	if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3126		ret = context->cow_duplicate_clusters(handle, context->inode,
3127						      cpos, old, new, len);
3128		if (ret) {
3129			mlog_errno(ret);
3130			goto out;
3131		}
3132	}
3133
3134	ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
3135				       cpos, new, len, ext_flags,
3136				       context->meta_ac, &context->dealloc);
3137	if (ret)
3138		mlog_errno(ret);
3139out:
3140	return ret;
3141}
3142
3143int ocfs2_cow_sync_writeback(struct super_block *sb,
3144			     struct inode *inode,
3145			     u32 cpos, u32 num_clusters)
3146{
3147	int ret = 0;
3148	loff_t offset, end, map_end;
3149	pgoff_t page_index;
3150	struct page *page;
3151
3152	if (ocfs2_should_order_data(inode))
3153		return 0;
3154
3155	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
3156	end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
3157
3158	ret = filemap_fdatawrite_range(inode->i_mapping,
3159				       offset, end - 1);
3160	if (ret < 0) {
3161		mlog_errno(ret);
3162		return ret;
3163	}
3164
3165	while (offset < end) {
3166		page_index = offset >> PAGE_SHIFT;
3167		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
3168		if (map_end > end)
3169			map_end = end;
3170
3171		page = find_or_create_page(inode->i_mapping,
3172					   page_index, GFP_NOFS);
3173		BUG_ON(!page);
3174
3175		wait_on_page_writeback(page);
3176		if (PageError(page)) {
3177			ret = -EIO;
3178			mlog_errno(ret);
3179		} else
3180			mark_page_accessed(page);
3181
3182		unlock_page(page);
3183		put_page(page);
3184		page = NULL;
3185		offset = map_end;
3186		if (ret)
3187			break;
3188	}
3189
3190	return ret;
3191}
3192
3193static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
3194				 u32 v_cluster, u32 *p_cluster,
3195				 u32 *num_clusters,
3196				 unsigned int *extent_flags)
3197{
3198	return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
3199				  num_clusters, extent_flags);
3200}
3201
3202static int ocfs2_make_clusters_writable(struct super_block *sb,
3203					struct ocfs2_cow_context *context,
3204					u32 cpos, u32 p_cluster,
3205					u32 num_clusters, unsigned int e_flags)
3206{
3207	int ret, delete, index, credits =  0;
3208	u32 new_bit, new_len, orig_num_clusters;
3209	unsigned int set_len;
3210	struct ocfs2_super *osb = OCFS2_SB(sb);
3211	handle_t *handle;
3212	struct buffer_head *ref_leaf_bh = NULL;
3213	struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
3214	struct ocfs2_refcount_rec rec;
3215
3216	trace_ocfs2_make_clusters_writable(cpos, p_cluster,
3217					   num_clusters, e_flags);
3218
3219	ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
3220					     &context->data_et,
3221					     ref_ci,
3222					     context->ref_root_bh,
3223					     &context->meta_ac,
3224					     &context->data_ac, &credits);
3225	if (ret) {
3226		mlog_errno(ret);
3227		return ret;
3228	}
3229
3230	if (context->post_refcount)
3231		credits += context->post_refcount->credits;
3232
3233	credits += context->extra_credits;
3234	handle = ocfs2_start_trans(osb, credits);
3235	if (IS_ERR(handle)) {
3236		ret = PTR_ERR(handle);
3237		mlog_errno(ret);
3238		goto out;
3239	}
3240
3241	orig_num_clusters = num_clusters;
3242
3243	while (num_clusters) {
3244		ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3245					     p_cluster, num_clusters,
3246					     &rec, &index, &ref_leaf_bh);
3247		if (ret) {
3248			mlog_errno(ret);
3249			goto out_commit;
3250		}
3251
3252		BUG_ON(!rec.r_refcount);
3253		set_len = min((u64)p_cluster + num_clusters,
3254			      le64_to_cpu(rec.r_cpos) +
3255			      le32_to_cpu(rec.r_clusters)) - p_cluster;
3256
3257		/*
3258		 * There are many different situation here.
3259		 * 1. If refcount == 1, remove the flag and don't COW.
3260		 * 2. If refcount > 1, allocate clusters.
3261		 *    Here we may not allocate r_len once at a time, so continue
3262		 *    until we reach num_clusters.
3263		 */
3264		if (le32_to_cpu(rec.r_refcount) == 1) {
3265			delete = 0;
3266			ret = ocfs2_clear_ext_refcount(handle,
3267						       &context->data_et,
3268						       cpos, p_cluster,
3269						       set_len, e_flags,
3270						       context->meta_ac,
3271						       &context->dealloc);
3272			if (ret) {
3273				mlog_errno(ret);
3274				goto out_commit;
3275			}
3276		} else {
3277			delete = 1;
3278
3279			ret = __ocfs2_claim_clusters(handle,
3280						     context->data_ac,
3281						     1, set_len,
3282						     &new_bit, &new_len);
3283			if (ret) {
3284				mlog_errno(ret);
3285				goto out_commit;
3286			}
3287
3288			ret = ocfs2_replace_clusters(handle, context,
3289						     cpos, p_cluster, new_bit,
3290						     new_len, e_flags);
3291			if (ret) {
3292				mlog_errno(ret);
3293				goto out_commit;
3294			}
3295			set_len = new_len;
3296		}
3297
3298		ret = __ocfs2_decrease_refcount(handle, ref_ci,
3299						context->ref_root_bh,
3300						p_cluster, set_len,
3301						context->meta_ac,
3302						&context->dealloc, delete);
3303		if (ret) {
3304			mlog_errno(ret);
3305			goto out_commit;
3306		}
3307
3308		cpos += set_len;
3309		p_cluster += set_len;
3310		num_clusters -= set_len;
3311		brelse(ref_leaf_bh);
3312		ref_leaf_bh = NULL;
3313	}
3314
3315	/* handle any post_cow action. */
3316	if (context->post_refcount && context->post_refcount->func) {
3317		ret = context->post_refcount->func(context->inode, handle,
3318						context->post_refcount->para);
3319		if (ret) {
3320			mlog_errno(ret);
3321			goto out_commit;
3322		}
3323	}
3324
3325	/*
3326	 * Here we should write the new page out first if we are
3327	 * in write-back mode.
3328	 */
3329	if (context->get_clusters == ocfs2_di_get_clusters) {
3330		ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
3331					       orig_num_clusters);
3332		if (ret)
3333			mlog_errno(ret);
3334	}
3335
3336out_commit:
3337	ocfs2_commit_trans(osb, handle);
3338
3339out:
3340	if (context->data_ac) {
3341		ocfs2_free_alloc_context(context->data_ac);
3342		context->data_ac = NULL;
3343	}
3344	if (context->meta_ac) {
3345		ocfs2_free_alloc_context(context->meta_ac);
3346		context->meta_ac = NULL;
3347	}
3348	brelse(ref_leaf_bh);
3349
3350	return ret;
3351}
3352
3353static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3354{
3355	int ret = 0;
3356	struct inode *inode = context->inode;
3357	u32 cow_start = context->cow_start, cow_len = context->cow_len;
3358	u32 p_cluster, num_clusters;
3359	unsigned int ext_flags;
3360	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3361
3362	if (!ocfs2_refcount_tree(osb)) {
3363		return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
3364				   inode->i_ino);
 
 
3365	}
3366
3367	ocfs2_init_dealloc_ctxt(&context->dealloc);
3368
3369	while (cow_len) {
3370		ret = context->get_clusters(context, cow_start, &p_cluster,
3371					    &num_clusters, &ext_flags);
3372		if (ret) {
3373			mlog_errno(ret);
3374			break;
3375		}
3376
3377		BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
3378
3379		if (cow_len < num_clusters)
3380			num_clusters = cow_len;
3381
3382		ret = ocfs2_make_clusters_writable(inode->i_sb, context,
3383						   cow_start, p_cluster,
3384						   num_clusters, ext_flags);
3385		if (ret) {
3386			mlog_errno(ret);
3387			break;
3388		}
3389
3390		cow_len -= num_clusters;
3391		cow_start += num_clusters;
3392	}
3393
3394	if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
3395		ocfs2_schedule_truncate_log_flush(osb, 1);
3396		ocfs2_run_deallocs(osb, &context->dealloc);
3397	}
3398
3399	return ret;
3400}
3401
3402/*
3403 * Starting at cpos, try to CoW write_len clusters.  Don't CoW
3404 * past max_cpos.  This will stop when it runs into a hole or an
3405 * unrefcounted extent.
3406 */
3407static int ocfs2_refcount_cow_hunk(struct inode *inode,
3408				   struct buffer_head *di_bh,
3409				   u32 cpos, u32 write_len, u32 max_cpos)
3410{
3411	int ret;
3412	u32 cow_start = 0, cow_len = 0;
 
3413	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3414	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3415	struct buffer_head *ref_root_bh = NULL;
3416	struct ocfs2_refcount_tree *ref_tree;
3417	struct ocfs2_cow_context *context = NULL;
3418
3419	BUG_ON(!ocfs2_is_refcount_inode(inode));
3420
3421	ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
3422					      cpos, write_len, max_cpos,
3423					      &cow_start, &cow_len);
3424	if (ret) {
3425		mlog_errno(ret);
3426		goto out;
3427	}
3428
3429	trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
3430				      cpos, write_len, max_cpos,
3431				      cow_start, cow_len);
3432
3433	BUG_ON(cow_len == 0);
3434
3435	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3436	if (!context) {
3437		ret = -ENOMEM;
3438		mlog_errno(ret);
3439		goto out;
3440	}
3441
3442	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3443				       1, &ref_tree, &ref_root_bh);
3444	if (ret) {
3445		mlog_errno(ret);
3446		goto out;
3447	}
3448
3449	context->inode = inode;
3450	context->cow_start = cow_start;
3451	context->cow_len = cow_len;
3452	context->ref_tree = ref_tree;
3453	context->ref_root_bh = ref_root_bh;
3454	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3455	context->get_clusters = ocfs2_di_get_clusters;
3456
3457	ocfs2_init_dinode_extent_tree(&context->data_et,
3458				      INODE_CACHE(inode), di_bh);
3459
3460	ret = ocfs2_replace_cow(context);
3461	if (ret)
3462		mlog_errno(ret);
3463
3464	/*
3465	 * truncate the extent map here since no matter whether we meet with
3466	 * any error during the action, we shouldn't trust cached extent map
3467	 * any more.
3468	 */
3469	ocfs2_extent_map_trunc(inode, cow_start);
3470
3471	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3472	brelse(ref_root_bh);
3473out:
3474	kfree(context);
3475	return ret;
3476}
3477
3478/*
3479 * CoW any and all clusters between cpos and cpos+write_len.
3480 * Don't CoW past max_cpos.  If this returns successfully, all
3481 * clusters between cpos and cpos+write_len are safe to modify.
3482 */
3483int ocfs2_refcount_cow(struct inode *inode,
3484		       struct buffer_head *di_bh,
3485		       u32 cpos, u32 write_len, u32 max_cpos)
3486{
3487	int ret = 0;
3488	u32 p_cluster, num_clusters;
3489	unsigned int ext_flags;
3490
3491	while (write_len) {
3492		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3493					 &num_clusters, &ext_flags);
3494		if (ret) {
3495			mlog_errno(ret);
3496			break;
3497		}
3498
3499		if (write_len < num_clusters)
3500			num_clusters = write_len;
3501
3502		if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3503			ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3504						      num_clusters, max_cpos);
3505			if (ret) {
3506				mlog_errno(ret);
3507				break;
3508			}
3509		}
3510
3511		write_len -= num_clusters;
3512		cpos += num_clusters;
3513	}
3514
3515	return ret;
3516}
3517
3518static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
3519					  u32 v_cluster, u32 *p_cluster,
3520					  u32 *num_clusters,
3521					  unsigned int *extent_flags)
3522{
3523	struct inode *inode = context->inode;
3524	struct ocfs2_xattr_value_root *xv = context->cow_object;
3525
3526	return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
3527					num_clusters, &xv->xr_list,
3528					extent_flags);
3529}
3530
3531/*
3532 * Given a xattr value root, calculate the most meta/credits we need for
3533 * refcount tree change if we truncate it to 0.
3534 */
3535int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
3536				       struct ocfs2_caching_info *ref_ci,
3537				       struct buffer_head *ref_root_bh,
3538				       struct ocfs2_xattr_value_root *xv,
3539				       int *meta_add, int *credits)
3540{
3541	int ret = 0, index, ref_blocks = 0;
3542	u32 p_cluster, num_clusters;
3543	u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
3544	struct ocfs2_refcount_block *rb;
3545	struct ocfs2_refcount_rec rec;
3546	struct buffer_head *ref_leaf_bh = NULL;
3547
3548	while (cpos < clusters) {
3549		ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
3550					       &num_clusters, &xv->xr_list,
3551					       NULL);
3552		if (ret) {
3553			mlog_errno(ret);
3554			goto out;
3555		}
3556
3557		cpos += num_clusters;
3558
3559		while (num_clusters) {
3560			ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
3561						     p_cluster, num_clusters,
3562						     &rec, &index,
3563						     &ref_leaf_bh);
3564			if (ret) {
3565				mlog_errno(ret);
3566				goto out;
3567			}
3568
3569			BUG_ON(!rec.r_refcount);
3570
3571			rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
3572
3573			/*
3574			 * We really don't know whether the other clusters is in
3575			 * this refcount block or not, so just take the worst
3576			 * case that all the clusters are in this block and each
3577			 * one will split a refcount rec, so totally we need
3578			 * clusters * 2 new refcount rec.
3579			 */
3580			if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
3581			    le16_to_cpu(rb->rf_records.rl_count))
3582				ref_blocks++;
3583
3584			*credits += 1;
3585			brelse(ref_leaf_bh);
3586			ref_leaf_bh = NULL;
3587
3588			if (num_clusters <= le32_to_cpu(rec.r_clusters))
3589				break;
3590			else
3591				num_clusters -= le32_to_cpu(rec.r_clusters);
3592			p_cluster += num_clusters;
3593		}
3594	}
3595
3596	*meta_add += ref_blocks;
3597	if (!ref_blocks)
3598		goto out;
3599
3600	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
3601	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
3602		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
3603	else {
3604		struct ocfs2_extent_tree et;
3605
3606		ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
3607		*credits += ocfs2_calc_extend_credits(inode->i_sb,
3608						      et.et_root_el);
3609	}
3610
3611out:
3612	brelse(ref_leaf_bh);
3613	return ret;
3614}
3615
3616/*
3617 * Do CoW for xattr.
3618 */
3619int ocfs2_refcount_cow_xattr(struct inode *inode,
3620			     struct ocfs2_dinode *di,
3621			     struct ocfs2_xattr_value_buf *vb,
3622			     struct ocfs2_refcount_tree *ref_tree,
3623			     struct buffer_head *ref_root_bh,
3624			     u32 cpos, u32 write_len,
3625			     struct ocfs2_post_refcount *post)
3626{
3627	int ret;
3628	struct ocfs2_xattr_value_root *xv = vb->vb_xv;
 
3629	struct ocfs2_cow_context *context = NULL;
3630	u32 cow_start, cow_len;
3631
3632	BUG_ON(!ocfs2_is_refcount_inode(inode));
3633
3634	ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
3635					      cpos, write_len, UINT_MAX,
3636					      &cow_start, &cow_len);
3637	if (ret) {
3638		mlog_errno(ret);
3639		goto out;
3640	}
3641
3642	BUG_ON(cow_len == 0);
3643
3644	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3645	if (!context) {
3646		ret = -ENOMEM;
3647		mlog_errno(ret);
3648		goto out;
3649	}
3650
3651	context->inode = inode;
3652	context->cow_start = cow_start;
3653	context->cow_len = cow_len;
3654	context->ref_tree = ref_tree;
3655	context->ref_root_bh = ref_root_bh;
3656	context->cow_object = xv;
3657
3658	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
3659	/* We need the extra credits for duplicate_clusters by jbd. */
3660	context->extra_credits =
3661		ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
3662	context->get_clusters = ocfs2_xattr_value_get_clusters;
3663	context->post_refcount = post;
3664
3665	ocfs2_init_xattr_value_extent_tree(&context->data_et,
3666					   INODE_CACHE(inode), vb);
3667
3668	ret = ocfs2_replace_cow(context);
3669	if (ret)
3670		mlog_errno(ret);
3671
3672out:
3673	kfree(context);
3674	return ret;
3675}
3676
3677/*
3678 * Insert a new extent into refcount tree and mark a extent rec
3679 * as refcounted in the dinode tree.
3680 */
3681int ocfs2_add_refcount_flag(struct inode *inode,
3682			    struct ocfs2_extent_tree *data_et,
3683			    struct ocfs2_caching_info *ref_ci,
3684			    struct buffer_head *ref_root_bh,
3685			    u32 cpos, u32 p_cluster, u32 num_clusters,
3686			    struct ocfs2_cached_dealloc_ctxt *dealloc,
3687			    struct ocfs2_post_refcount *post)
3688{
3689	int ret;
3690	handle_t *handle;
3691	int credits = 1, ref_blocks = 0;
3692	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3693	struct ocfs2_alloc_context *meta_ac = NULL;
3694
3695	/* We need to be able to handle at least an extent tree split. */
3696	ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el);
3697
3698	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
3699					       ref_ci, ref_root_bh,
3700					       p_cluster, num_clusters,
3701					       &ref_blocks, &credits);
3702	if (ret) {
3703		mlog_errno(ret);
3704		goto out;
3705	}
3706
3707	trace_ocfs2_add_refcount_flag(ref_blocks, credits);
3708
3709	if (ref_blocks) {
3710		ret = ocfs2_reserve_new_metadata_blocks(osb,
3711							ref_blocks, &meta_ac);
3712		if (ret) {
3713			mlog_errno(ret);
3714			goto out;
3715		}
3716	}
3717
3718	if (post)
3719		credits += post->credits;
3720
3721	handle = ocfs2_start_trans(osb, credits);
3722	if (IS_ERR(handle)) {
3723		ret = PTR_ERR(handle);
3724		mlog_errno(ret);
3725		goto out;
3726	}
3727
3728	ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
3729					   cpos, num_clusters, p_cluster,
3730					   meta_ac, dealloc);
3731	if (ret) {
3732		mlog_errno(ret);
3733		goto out_commit;
3734	}
3735
3736	ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3737					p_cluster, num_clusters, 0,
3738					meta_ac, dealloc);
3739	if (ret) {
3740		mlog_errno(ret);
3741		goto out_commit;
3742	}
3743
3744	if (post && post->func) {
3745		ret = post->func(inode, handle, post->para);
3746		if (ret)
3747			mlog_errno(ret);
3748	}
3749
3750out_commit:
3751	ocfs2_commit_trans(osb, handle);
3752out:
3753	if (meta_ac)
3754		ocfs2_free_alloc_context(meta_ac);
3755	return ret;
3756}
3757
3758static int ocfs2_change_ctime(struct inode *inode,
3759			      struct buffer_head *di_bh)
3760{
3761	int ret;
3762	handle_t *handle;
3763	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3764
3765	handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
3766				   OCFS2_INODE_UPDATE_CREDITS);
3767	if (IS_ERR(handle)) {
3768		ret = PTR_ERR(handle);
3769		mlog_errno(ret);
3770		goto out;
3771	}
3772
3773	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
3774				      OCFS2_JOURNAL_ACCESS_WRITE);
3775	if (ret) {
3776		mlog_errno(ret);
3777		goto out_commit;
3778	}
3779
3780	inode->i_ctime = current_time(inode);
3781	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3782	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3783
3784	ocfs2_journal_dirty(handle, di_bh);
3785
3786out_commit:
3787	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
3788out:
3789	return ret;
3790}
3791
3792static int ocfs2_attach_refcount_tree(struct inode *inode,
3793				      struct buffer_head *di_bh)
3794{
3795	int ret, data_changed = 0;
3796	struct buffer_head *ref_root_bh = NULL;
3797	struct ocfs2_inode_info *oi = OCFS2_I(inode);
3798	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3799	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3800	struct ocfs2_refcount_tree *ref_tree;
3801	unsigned int ext_flags;
3802	loff_t size;
3803	u32 cpos, num_clusters, clusters, p_cluster;
3804	struct ocfs2_cached_dealloc_ctxt dealloc;
3805	struct ocfs2_extent_tree di_et;
3806
3807	ocfs2_init_dealloc_ctxt(&dealloc);
3808
3809	if (!ocfs2_is_refcount_inode(inode)) {
3810		ret = ocfs2_create_refcount_tree(inode, di_bh);
3811		if (ret) {
3812			mlog_errno(ret);
3813			goto out;
3814		}
3815	}
3816
3817	BUG_ON(!di->i_refcount_loc);
3818	ret = ocfs2_lock_refcount_tree(osb,
3819				       le64_to_cpu(di->i_refcount_loc), 1,
3820				       &ref_tree, &ref_root_bh);
3821	if (ret) {
3822		mlog_errno(ret);
3823		goto out;
3824	}
3825
3826	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3827		goto attach_xattr;
3828
3829	ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3830
3831	size = i_size_read(inode);
3832	clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
3833
3834	cpos = 0;
3835	while (cpos < clusters) {
3836		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3837					 &num_clusters, &ext_flags);
3838		if (ret) {
3839			mlog_errno(ret);
3840			goto unlock;
3841		}
3842		if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
3843			ret = ocfs2_add_refcount_flag(inode, &di_et,
3844						      &ref_tree->rf_ci,
3845						      ref_root_bh, cpos,
3846						      p_cluster, num_clusters,
3847						      &dealloc, NULL);
3848			if (ret) {
3849				mlog_errno(ret);
3850				goto unlock;
3851			}
3852
3853			data_changed = 1;
3854		}
3855		cpos += num_clusters;
3856	}
3857
3858attach_xattr:
3859	if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3860		ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3861						       &ref_tree->rf_ci,
3862						       ref_root_bh,
3863						       &dealloc);
3864		if (ret) {
3865			mlog_errno(ret);
3866			goto unlock;
3867		}
3868	}
3869
3870	if (data_changed) {
3871		ret = ocfs2_change_ctime(inode, di_bh);
3872		if (ret)
3873			mlog_errno(ret);
3874	}
3875
3876unlock:
3877	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3878	brelse(ref_root_bh);
3879
3880	if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
3881		ocfs2_schedule_truncate_log_flush(osb, 1);
3882		ocfs2_run_deallocs(osb, &dealloc);
3883	}
3884out:
3885	/*
3886	 * Empty the extent map so that we may get the right extent
3887	 * record from the disk.
3888	 */
3889	ocfs2_extent_map_trunc(inode, 0);
3890
3891	return ret;
3892}
3893
3894static int ocfs2_add_refcounted_extent(struct inode *inode,
3895				   struct ocfs2_extent_tree *et,
3896				   struct ocfs2_caching_info *ref_ci,
3897				   struct buffer_head *ref_root_bh,
3898				   u32 cpos, u32 p_cluster, u32 num_clusters,
3899				   unsigned int ext_flags,
3900				   struct ocfs2_cached_dealloc_ctxt *dealloc)
3901{
3902	int ret;
3903	handle_t *handle;
3904	int credits = 0;
3905	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3906	struct ocfs2_alloc_context *meta_ac = NULL;
3907
3908	ret = ocfs2_lock_refcount_allocators(inode->i_sb,
3909					     p_cluster, num_clusters,
3910					     et, ref_ci,
3911					     ref_root_bh, &meta_ac,
3912					     NULL, &credits);
3913	if (ret) {
3914		mlog_errno(ret);
3915		goto out;
3916	}
3917
3918	handle = ocfs2_start_trans(osb, credits);
3919	if (IS_ERR(handle)) {
3920		ret = PTR_ERR(handle);
3921		mlog_errno(ret);
3922		goto out;
3923	}
3924
3925	ret = ocfs2_insert_extent(handle, et, cpos,
3926			ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
3927			num_clusters, ext_flags, meta_ac);
3928	if (ret) {
3929		mlog_errno(ret);
3930		goto out_commit;
3931	}
3932
3933	ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3934				      p_cluster, num_clusters,
3935				      meta_ac, dealloc);
3936	if (ret) {
3937		mlog_errno(ret);
3938		goto out_commit;
3939	}
3940
3941	ret = dquot_alloc_space_nodirty(inode,
3942		ocfs2_clusters_to_bytes(osb->sb, num_clusters));
3943	if (ret)
3944		mlog_errno(ret);
3945
3946out_commit:
3947	ocfs2_commit_trans(osb, handle);
3948out:
3949	if (meta_ac)
3950		ocfs2_free_alloc_context(meta_ac);
3951	return ret;
3952}
3953
3954static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3955				       struct buffer_head *s_bh,
3956				       struct inode *t_inode,
3957				       struct buffer_head *t_bh)
3958{
3959	int ret;
3960	handle_t *handle;
3961	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3962	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3963	struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3964
3965	BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3966
3967	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3968	if (IS_ERR(handle)) {
3969		ret = PTR_ERR(handle);
3970		mlog_errno(ret);
3971		goto out;
3972	}
3973
3974	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3975				      OCFS2_JOURNAL_ACCESS_WRITE);
3976	if (ret) {
3977		mlog_errno(ret);
3978		goto out_commit;
3979	}
3980
3981	t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3982	memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3983	       le16_to_cpu(s_di->id2.i_data.id_count));
3984	spin_lock(&OCFS2_I(t_inode)->ip_lock);
3985	OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3986	t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3987	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3988
3989	ocfs2_journal_dirty(handle, t_bh);
3990
3991out_commit:
3992	ocfs2_commit_trans(osb, handle);
3993out:
3994	return ret;
3995}
3996
3997static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3998				struct inode *t_inode,
3999				struct buffer_head *t_bh,
4000				struct ocfs2_caching_info *ref_ci,
4001				struct buffer_head *ref_root_bh,
4002				struct ocfs2_cached_dealloc_ctxt *dealloc)
4003{
4004	int ret = 0;
4005	u32 p_cluster, num_clusters, clusters, cpos;
4006	loff_t size;
4007	unsigned int ext_flags;
4008	struct ocfs2_extent_tree et;
4009
4010	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
4011
4012	size = i_size_read(s_inode);
4013	clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
4014
4015	cpos = 0;
4016	while (cpos < clusters) {
4017		ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
4018					 &num_clusters, &ext_flags);
4019		if (ret) {
4020			mlog_errno(ret);
4021			goto out;
4022		}
4023		if (p_cluster) {
4024			ret = ocfs2_add_refcounted_extent(t_inode, &et,
4025							  ref_ci, ref_root_bh,
4026							  cpos, p_cluster,
4027							  num_clusters,
4028							  ext_flags,
4029							  dealloc);
4030			if (ret) {
4031				mlog_errno(ret);
4032				goto out;
4033			}
4034		}
4035
4036		cpos += num_clusters;
4037	}
4038
4039out:
4040	return ret;
4041}
4042
4043/*
4044 * change the new file's attributes to the src.
4045 *
4046 * reflink creates a snapshot of a file, that means the attributes
4047 * must be identical except for three exceptions - nlink, ino, and ctime.
4048 */
4049static int ocfs2_complete_reflink(struct inode *s_inode,
4050				  struct buffer_head *s_bh,
4051				  struct inode *t_inode,
4052				  struct buffer_head *t_bh,
4053				  bool preserve)
4054{
4055	int ret;
4056	handle_t *handle;
4057	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
4058	struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
4059	loff_t size = i_size_read(s_inode);
4060
4061	handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
4062				   OCFS2_INODE_UPDATE_CREDITS);
4063	if (IS_ERR(handle)) {
4064		ret = PTR_ERR(handle);
4065		mlog_errno(ret);
4066		return ret;
4067	}
4068
4069	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
4070				      OCFS2_JOURNAL_ACCESS_WRITE);
4071	if (ret) {
4072		mlog_errno(ret);
4073		goto out_commit;
4074	}
4075
4076	spin_lock(&OCFS2_I(t_inode)->ip_lock);
4077	OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
4078	OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
4079	OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
4080	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
4081	i_size_write(t_inode, size);
4082	t_inode->i_blocks = s_inode->i_blocks;
4083
4084	di->i_xattr_inline_size = s_di->i_xattr_inline_size;
4085	di->i_clusters = s_di->i_clusters;
4086	di->i_size = s_di->i_size;
4087	di->i_dyn_features = s_di->i_dyn_features;
4088	di->i_attr = s_di->i_attr;
4089
4090	if (preserve) {
4091		t_inode->i_uid = s_inode->i_uid;
4092		t_inode->i_gid = s_inode->i_gid;
4093		t_inode->i_mode = s_inode->i_mode;
4094		di->i_uid = s_di->i_uid;
4095		di->i_gid = s_di->i_gid;
4096		di->i_mode = s_di->i_mode;
4097
4098		/*
4099		 * update time.
4100		 * we want mtime to appear identical to the source and
4101		 * update ctime.
4102		 */
4103		t_inode->i_ctime = current_time(t_inode);
4104
4105		di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
4106		di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
4107
4108		t_inode->i_mtime = s_inode->i_mtime;
4109		di->i_mtime = s_di->i_mtime;
4110		di->i_mtime_nsec = s_di->i_mtime_nsec;
4111	}
4112
4113	ocfs2_journal_dirty(handle, t_bh);
4114
4115out_commit:
4116	ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
4117	return ret;
4118}
4119
4120static int ocfs2_create_reflink_node(struct inode *s_inode,
4121				     struct buffer_head *s_bh,
4122				     struct inode *t_inode,
4123				     struct buffer_head *t_bh,
4124				     bool preserve)
4125{
4126	int ret;
4127	struct buffer_head *ref_root_bh = NULL;
4128	struct ocfs2_cached_dealloc_ctxt dealloc;
4129	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
 
4130	struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
4131	struct ocfs2_refcount_tree *ref_tree;
4132
4133	ocfs2_init_dealloc_ctxt(&dealloc);
4134
4135	ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4136				      le64_to_cpu(di->i_refcount_loc));
4137	if (ret) {
4138		mlog_errno(ret);
4139		goto out;
4140	}
4141
4142	if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4143		ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4144						  t_inode, t_bh);
4145		if (ret)
4146			mlog_errno(ret);
4147		goto out;
4148	}
4149
4150	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4151				       1, &ref_tree, &ref_root_bh);
4152	if (ret) {
4153		mlog_errno(ret);
4154		goto out;
4155	}
 
4156
4157	ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
4158					  &ref_tree->rf_ci, ref_root_bh,
4159					  &dealloc);
4160	if (ret) {
4161		mlog_errno(ret);
4162		goto out_unlock_refcount;
4163	}
4164
4165out_unlock_refcount:
4166	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4167	brelse(ref_root_bh);
4168out:
4169	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4170		ocfs2_schedule_truncate_log_flush(osb, 1);
4171		ocfs2_run_deallocs(osb, &dealloc);
4172	}
4173
4174	return ret;
4175}
4176
4177static int __ocfs2_reflink(struct dentry *old_dentry,
4178			   struct buffer_head *old_bh,
4179			   struct inode *new_inode,
4180			   bool preserve)
4181{
4182	int ret;
4183	struct inode *inode = d_inode(old_dentry);
4184	struct buffer_head *new_bh = NULL;
4185
4186	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
4187		ret = -EINVAL;
4188		mlog_errno(ret);
4189		goto out;
4190	}
4191
4192	ret = filemap_fdatawrite(inode->i_mapping);
4193	if (ret) {
4194		mlog_errno(ret);
4195		goto out;
4196	}
4197
4198	ret = ocfs2_attach_refcount_tree(inode, old_bh);
4199	if (ret) {
4200		mlog_errno(ret);
4201		goto out;
4202	}
4203
4204	inode_lock_nested(new_inode, I_MUTEX_CHILD);
4205	ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4206				      OI_LS_REFLINK_TARGET);
4207	if (ret) {
4208		mlog_errno(ret);
4209		goto out_unlock;
4210	}
4211
4212	ret = ocfs2_create_reflink_node(inode, old_bh,
4213					new_inode, new_bh, preserve);
4214	if (ret) {
4215		mlog_errno(ret);
4216		goto inode_unlock;
4217	}
4218
4219	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
4220		ret = ocfs2_reflink_xattrs(inode, old_bh,
4221					   new_inode, new_bh,
4222					   preserve);
4223		if (ret) {
4224			mlog_errno(ret);
4225			goto inode_unlock;
4226		}
4227	}
4228
4229	ret = ocfs2_complete_reflink(inode, old_bh,
4230				     new_inode, new_bh, preserve);
4231	if (ret)
4232		mlog_errno(ret);
4233
4234inode_unlock:
4235	ocfs2_inode_unlock(new_inode, 1);
4236	brelse(new_bh);
4237out_unlock:
4238	inode_unlock(new_inode);
4239out:
4240	if (!ret) {
4241		ret = filemap_fdatawait(inode->i_mapping);
4242		if (ret)
4243			mlog_errno(ret);
4244	}
4245	return ret;
4246}
4247
4248static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4249			 struct dentry *new_dentry, bool preserve)
4250{
4251	int error, had_lock;
4252	struct inode *inode = d_inode(old_dentry);
4253	struct buffer_head *old_bh = NULL;
4254	struct inode *new_orphan_inode = NULL;
4255	struct ocfs2_lock_holder oh;
 
4256
4257	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4258		return -EOPNOTSUPP;
4259
4260
4261	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
4262					     &new_orphan_inode);
4263	if (error) {
4264		mlog_errno(error);
4265		goto out;
4266	}
4267
4268	error = ocfs2_rw_lock(inode, 1);
 
4269	if (error) {
4270		mlog_errno(error);
4271		goto out;
4272	}
4273
4274	error = ocfs2_inode_lock(inode, &old_bh, 1);
4275	if (error) {
4276		mlog_errno(error);
4277		ocfs2_rw_unlock(inode, 1);
4278		goto out;
4279	}
4280
4281	down_write(&OCFS2_I(inode)->ip_xattr_sem);
4282	down_write(&OCFS2_I(inode)->ip_alloc_sem);
4283	error = __ocfs2_reflink(old_dentry, old_bh,
4284				new_orphan_inode, preserve);
4285	up_write(&OCFS2_I(inode)->ip_alloc_sem);
4286	up_write(&OCFS2_I(inode)->ip_xattr_sem);
4287
4288	ocfs2_inode_unlock(inode, 1);
4289	ocfs2_rw_unlock(inode, 1);
4290	brelse(old_bh);
4291
4292	if (error) {
4293		mlog_errno(error);
4294		goto out;
4295	}
4296
4297	had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
4298					    &oh);
4299	if (had_lock < 0) {
4300		error = had_lock;
4301		mlog_errno(error);
4302		goto out;
4303	}
4304
4305	/* If the security isn't preserved, we need to re-initialize them. */
4306	if (!preserve) {
4307		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
4308						    &new_dentry->d_name);
 
4309		if (error)
4310			mlog_errno(error);
4311	}
 
 
 
 
 
4312	if (!error) {
4313		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
4314						       new_dentry);
4315		if (error)
4316			mlog_errno(error);
4317	}
4318	ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
4319
4320out:
4321	if (new_orphan_inode) {
4322		/*
4323		 * We need to open_unlock the inode no matter whether we
4324		 * succeed or not, so that other nodes can delete it later.
4325		 */
4326		ocfs2_open_unlock(new_orphan_inode);
4327		if (error)
4328			iput(new_orphan_inode);
4329	}
4330
4331	return error;
4332}
4333
4334/*
4335 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
4336 * sys_reflink().  This will go away when vfs_reflink() exists in
4337 * fs/namei.c.
4338 */
4339
4340/* copied from may_create in VFS. */
4341static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
4342{
4343	if (d_really_is_positive(child))
4344		return -EEXIST;
4345	if (IS_DEADDIR(dir))
4346		return -ENOENT;
4347	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
4348}
4349
4350/**
4351 * ocfs2_vfs_reflink - Create a reference-counted link
4352 *
4353 * @old_dentry:        source dentry + inode
4354 * @dir:       directory to create the target
4355 * @new_dentry:        target dentry
4356 * @preserve:  if true, preserve all file attributes
4357 */
4358static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
4359			     struct dentry *new_dentry, bool preserve)
4360{
4361	struct inode *inode = d_inode(old_dentry);
4362	int error;
4363
4364	if (!inode)
4365		return -ENOENT;
4366
4367	error = ocfs2_may_create(dir, new_dentry);
4368	if (error)
4369		return error;
4370
4371	if (dir->i_sb != inode->i_sb)
4372		return -EXDEV;
4373
4374	/*
4375	 * A reflink to an append-only or immutable file cannot be created.
4376	 */
4377	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4378		return -EPERM;
4379
4380	/* Only regular files can be reflinked. */
4381	if (!S_ISREG(inode->i_mode))
4382		return -EPERM;
4383
4384	/*
4385	 * If the caller wants to preserve ownership, they require the
4386	 * rights to do so.
4387	 */
4388	if (preserve) {
4389		if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
4390			return -EPERM;
4391		if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
4392			return -EPERM;
4393	}
4394
4395	/*
4396	 * If the caller is modifying any aspect of the attributes, they
4397	 * are not creating a snapshot.  They need read permission on the
4398	 * file.
4399	 */
4400	if (!preserve) {
4401		error = inode_permission(inode, MAY_READ);
4402		if (error)
4403			return error;
4404	}
4405
4406	inode_lock(inode);
4407	error = dquot_initialize(dir);
4408	if (!error)
4409		error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
4410	inode_unlock(inode);
4411	if (!error)
4412		fsnotify_create(dir, new_dentry);
4413	return error;
4414}
4415/*
4416 * Most codes are copied from sys_linkat.
4417 */
4418int ocfs2_reflink_ioctl(struct inode *inode,
4419			const char __user *oldname,
4420			const char __user *newname,
4421			bool preserve)
4422{
4423	struct dentry *new_dentry;
4424	struct path old_path, new_path;
4425	int error;
4426
4427	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4428		return -EOPNOTSUPP;
4429
4430	error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
4431	if (error) {
4432		mlog_errno(error);
4433		return error;
4434	}
4435
4436	new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0);
4437	error = PTR_ERR(new_dentry);
4438	if (IS_ERR(new_dentry)) {
4439		mlog_errno(error);
4440		goto out;
4441	}
4442
4443	error = -EXDEV;
4444	if (old_path.mnt != new_path.mnt) {
4445		mlog_errno(error);
4446		goto out_dput;
4447	}
4448
4449	error = ocfs2_vfs_reflink(old_path.dentry,
4450				  d_inode(new_path.dentry),
4451				  new_dentry, preserve);
4452out_dput:
4453	done_path_create(&new_path, new_dentry);
4454out:
4455	path_put(&old_path);
4456
4457	return error;
4458}
4459
4460/* Update destination inode size, if necessary. */
4461int ocfs2_reflink_update_dest(struct inode *dest,
4462			      struct buffer_head *d_bh,
4463			      loff_t newlen)
4464{
4465	handle_t *handle;
4466	int ret;
4467
4468	dest->i_blocks = ocfs2_inode_sector_count(dest);
4469
4470	if (newlen <= i_size_read(dest))
4471		return 0;
4472
4473	handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb),
4474				   OCFS2_INODE_UPDATE_CREDITS);
4475	if (IS_ERR(handle)) {
4476		ret = PTR_ERR(handle);
4477		mlog_errno(ret);
4478		return ret;
4479	}
4480
4481	/* Extend i_size if needed. */
4482	spin_lock(&OCFS2_I(dest)->ip_lock);
4483	if (newlen > i_size_read(dest))
4484		i_size_write(dest, newlen);
4485	spin_unlock(&OCFS2_I(dest)->ip_lock);
4486	dest->i_ctime = dest->i_mtime = current_time(dest);
4487
4488	ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
4489	if (ret) {
4490		mlog_errno(ret);
4491		goto out_commit;
4492	}
4493
4494out_commit:
4495	ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle);
4496	return ret;
4497}
4498
4499/* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
4500static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode,
4501					 struct buffer_head *s_bh,
4502					 loff_t pos_in,
4503					 struct inode *t_inode,
4504					 struct buffer_head *t_bh,
4505					 loff_t pos_out,
4506					 loff_t len,
4507					 struct ocfs2_cached_dealloc_ctxt *dealloc)
4508{
4509	struct ocfs2_extent_tree s_et;
4510	struct ocfs2_extent_tree t_et;
4511	struct ocfs2_dinode *dis;
4512	struct buffer_head *ref_root_bh = NULL;
4513	struct ocfs2_refcount_tree *ref_tree;
4514	struct ocfs2_super *osb;
4515	loff_t remapped_bytes = 0;
4516	loff_t pstart, plen;
4517	u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0;
4518	unsigned int ext_flags;
4519	int ret = 0;
4520
4521	osb = OCFS2_SB(s_inode->i_sb);
4522	dis = (struct ocfs2_dinode *)s_bh->b_data;
4523	ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh);
4524	ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh);
4525
4526	spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in);
4527	tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out);
4528	slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len);
4529
4530	while (spos < slast) {
4531		if (fatal_signal_pending(current)) {
4532			ret = -EINTR;
4533			goto out;
4534		}
4535
4536		/* Look up the extent. */
4537		ret = ocfs2_get_clusters(s_inode, spos, &p_cluster,
4538					 &num_clusters, &ext_flags);
4539		if (ret) {
4540			mlog_errno(ret);
4541			goto out;
4542		}
4543
4544		num_clusters = min_t(u32, num_clusters, slast - spos);
4545
4546		/* Punch out the dest range. */
4547		pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos);
4548		plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters);
4549		ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen);
4550		if (ret) {
4551			mlog_errno(ret);
4552			goto out;
4553		}
4554
4555		if (p_cluster == 0)
4556			goto next_loop;
4557
4558		/* Lock the refcount btree... */
4559		ret = ocfs2_lock_refcount_tree(osb,
4560					       le64_to_cpu(dis->i_refcount_loc),
4561					       1, &ref_tree, &ref_root_bh);
4562		if (ret) {
4563			mlog_errno(ret);
4564			goto out;
4565		}
4566
4567		/* Mark s_inode's extent as refcounted. */
4568		if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) {
4569			ret = ocfs2_add_refcount_flag(s_inode, &s_et,
4570						      &ref_tree->rf_ci,
4571						      ref_root_bh, spos,
4572						      p_cluster, num_clusters,
4573						      dealloc, NULL);
4574			if (ret) {
4575				mlog_errno(ret);
4576				goto out_unlock_refcount;
4577			}
4578		}
4579
4580		/* Map in the new extent. */
4581		ext_flags |= OCFS2_EXT_REFCOUNTED;
4582		ret = ocfs2_add_refcounted_extent(t_inode, &t_et,
4583						  &ref_tree->rf_ci,
4584						  ref_root_bh,
4585						  tpos, p_cluster,
4586						  num_clusters,
4587						  ext_flags,
4588						  dealloc);
4589		if (ret) {
4590			mlog_errno(ret);
4591			goto out_unlock_refcount;
4592		}
4593
4594		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4595		brelse(ref_root_bh);
4596next_loop:
4597		spos += num_clusters;
4598		tpos += num_clusters;
4599		remapped_clus += num_clusters;
4600	}
4601
4602	goto out;
4603out_unlock_refcount:
4604	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4605	brelse(ref_root_bh);
4606out:
4607	remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus);
4608	remapped_bytes = min_t(loff_t, len, remapped_bytes);
4609
4610	return remapped_bytes > 0 ? remapped_bytes : ret;
4611}
4612
4613/* Set up refcount tree and remap s_inode to t_inode. */
4614loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
4615				  struct buffer_head *s_bh,
4616				  loff_t pos_in,
4617				  struct inode *t_inode,
4618				  struct buffer_head *t_bh,
4619				  loff_t pos_out,
4620				  loff_t len)
4621{
4622	struct ocfs2_cached_dealloc_ctxt dealloc;
4623	struct ocfs2_super *osb;
4624	struct ocfs2_dinode *dis;
4625	struct ocfs2_dinode *dit;
4626	loff_t ret;
4627
4628	osb = OCFS2_SB(s_inode->i_sb);
4629	dis = (struct ocfs2_dinode *)s_bh->b_data;
4630	dit = (struct ocfs2_dinode *)t_bh->b_data;
4631	ocfs2_init_dealloc_ctxt(&dealloc);
4632
4633	/*
4634	 * If we're reflinking the entire file and the source is inline
4635	 * data, just copy the contents.
4636	 */
4637	if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) &&
4638	    i_size_read(t_inode) <= len &&
4639	    (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) {
4640		ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh);
4641		if (ret)
4642			mlog_errno(ret);
4643		goto out;
4644	}
4645
4646	/*
4647	 * If both inodes belong to two different refcount groups then
4648	 * forget it because we don't know how (or want) to go merging
4649	 * refcount trees.
4650	 */
4651	ret = -EOPNOTSUPP;
4652	if (ocfs2_is_refcount_inode(s_inode) &&
4653	    ocfs2_is_refcount_inode(t_inode) &&
4654	    le64_to_cpu(dis->i_refcount_loc) !=
4655	    le64_to_cpu(dit->i_refcount_loc))
4656		goto out;
4657
4658	/* Neither inode has a refcount tree.  Add one to s_inode. */
4659	if (!ocfs2_is_refcount_inode(s_inode) &&
4660	    !ocfs2_is_refcount_inode(t_inode)) {
4661		ret = ocfs2_create_refcount_tree(s_inode, s_bh);
4662		if (ret) {
4663			mlog_errno(ret);
4664			goto out;
4665		}
4666	}
4667
4668	/* Ensure that both inodes end up with the same refcount tree. */
4669	if (!ocfs2_is_refcount_inode(s_inode)) {
4670		ret = ocfs2_set_refcount_tree(s_inode, s_bh,
4671					      le64_to_cpu(dit->i_refcount_loc));
4672		if (ret) {
4673			mlog_errno(ret);
4674			goto out;
4675		}
4676	}
4677	if (!ocfs2_is_refcount_inode(t_inode)) {
4678		ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4679					      le64_to_cpu(dis->i_refcount_loc));
4680		if (ret) {
4681			mlog_errno(ret);
4682			goto out;
4683		}
4684	}
4685
4686	/* Turn off inline data in the dest file. */
4687	if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4688		ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh);
4689		if (ret) {
4690			mlog_errno(ret);
4691			goto out;
4692		}
4693	}
4694
4695	/* Actually remap extents now. */
4696	ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
4697					 pos_out, len, &dealloc);
4698	if (ret < 0) {
4699		mlog_errno(ret);
4700		goto out;
4701	}
4702
4703out:
4704	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4705		ocfs2_schedule_truncate_log_flush(osb, 1);
4706		ocfs2_run_deallocs(osb, &dealloc);
4707	}
4708
4709	return ret;
4710}
4711
4712/* Lock an inode and grab a bh pointing to the inode. */
4713int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4714			      struct buffer_head **bh_s,
4715			      struct inode *t_inode,
4716			      struct buffer_head **bh_t)
4717{
4718	struct inode *inode1 = s_inode;
4719	struct inode *inode2 = t_inode;
4720	struct ocfs2_inode_info *oi1;
4721	struct ocfs2_inode_info *oi2;
4722	struct buffer_head *bh1 = NULL;
4723	struct buffer_head *bh2 = NULL;
4724	bool same_inode = (s_inode == t_inode);
4725	bool need_swap = (inode1->i_ino > inode2->i_ino);
4726	int status;
4727
4728	/* First grab the VFS and rw locks. */
4729	lock_two_nondirectories(s_inode, t_inode);
4730	if (need_swap)
4731		swap(inode1, inode2);
4732
4733	status = ocfs2_rw_lock(inode1, 1);
4734	if (status) {
4735		mlog_errno(status);
4736		goto out_i1;
4737	}
4738	if (!same_inode) {
4739		status = ocfs2_rw_lock(inode2, 1);
4740		if (status) {
4741			mlog_errno(status);
4742			goto out_i2;
4743		}
4744	}
4745
4746	/* Now go for the cluster locks */
4747	oi1 = OCFS2_I(inode1);
4748	oi2 = OCFS2_I(inode2);
4749
4750	trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
4751				(unsigned long long)oi2->ip_blkno);
4752
4753	/* We always want to lock the one with the lower lockid first. */
4754	if (oi1->ip_blkno > oi2->ip_blkno)
4755		mlog_errno(-ENOLCK);
4756
4757	/* lock id1 */
4758	status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
4759					 OI_LS_REFLINK_TARGET);
4760	if (status < 0) {
4761		if (status != -ENOENT)
4762			mlog_errno(status);
4763		goto out_rw2;
4764	}
4765
4766	/* lock id2 */
4767	if (!same_inode) {
4768		status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
4769						 OI_LS_REFLINK_TARGET);
4770		if (status < 0) {
4771			if (status != -ENOENT)
4772				mlog_errno(status);
4773			goto out_cl1;
4774		}
4775	} else {
4776		bh2 = bh1;
4777	}
4778
4779	/*
4780	 * If we swapped inode order above, we have to swap the buffer heads
4781	 * before passing them back to the caller.
4782	 */
4783	if (need_swap)
4784		swap(bh1, bh2);
4785	*bh_s = bh1;
4786	*bh_t = bh2;
4787
4788	trace_ocfs2_double_lock_end(
4789			(unsigned long long)oi1->ip_blkno,
4790			(unsigned long long)oi2->ip_blkno);
4791
4792	return 0;
4793
4794out_cl1:
4795	ocfs2_inode_unlock(inode1, 1);
4796	brelse(bh1);
4797out_rw2:
4798	ocfs2_rw_unlock(inode2, 1);
4799out_i2:
4800	ocfs2_rw_unlock(inode1, 1);
4801out_i1:
4802	unlock_two_nondirectories(s_inode, t_inode);
4803	return status;
4804}
4805
4806/* Unlock both inodes and release buffers. */
4807void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
4808				 struct buffer_head *s_bh,
4809				 struct inode *t_inode,
4810				 struct buffer_head *t_bh)
4811{
4812	ocfs2_inode_unlock(s_inode, 1);
4813	ocfs2_rw_unlock(s_inode, 1);
4814	brelse(s_bh);
4815	if (s_inode != t_inode) {
4816		ocfs2_inode_unlock(t_inode, 1);
4817		ocfs2_rw_unlock(t_inode, 1);
4818		brelse(t_bh);
4819	}
4820	unlock_two_nondirectories(s_inode, t_inode);
4821}
v3.15
 
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * refcounttree.c
   5 *
   6 * Copyright (C) 2009 Oracle.  All rights reserved.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public
  10 * License version 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include <linux/sort.h>
  19#include <cluster/masklog.h>
  20#include "ocfs2.h"
  21#include "inode.h"
  22#include "alloc.h"
  23#include "suballoc.h"
  24#include "journal.h"
  25#include "uptodate.h"
  26#include "super.h"
  27#include "buffer_head_io.h"
  28#include "blockcheck.h"
  29#include "refcounttree.h"
  30#include "sysfile.h"
  31#include "dlmglue.h"
  32#include "extent_map.h"
  33#include "aops.h"
  34#include "xattr.h"
  35#include "namei.h"
  36#include "ocfs2_trace.h"
 
  37
  38#include <linux/bio.h>
  39#include <linux/blkdev.h>
  40#include <linux/slab.h>
  41#include <linux/writeback.h>
  42#include <linux/pagevec.h>
  43#include <linux/swap.h>
  44#include <linux/security.h>
  45#include <linux/fsnotify.h>
  46#include <linux/quotaops.h>
  47#include <linux/namei.h>
  48#include <linux/mount.h>
  49#include <linux/posix_acl.h>
  50
  51struct ocfs2_cow_context {
  52	struct inode *inode;
  53	u32 cow_start;
  54	u32 cow_len;
  55	struct ocfs2_extent_tree data_et;
  56	struct ocfs2_refcount_tree *ref_tree;
  57	struct buffer_head *ref_root_bh;
  58	struct ocfs2_alloc_context *meta_ac;
  59	struct ocfs2_alloc_context *data_ac;
  60	struct ocfs2_cached_dealloc_ctxt dealloc;
  61	void *cow_object;
  62	struct ocfs2_post_refcount *post_refcount;
  63	int extra_credits;
  64	int (*get_clusters)(struct ocfs2_cow_context *context,
  65			    u32 v_cluster, u32 *p_cluster,
  66			    u32 *num_clusters,
  67			    unsigned int *extent_flags);
  68	int (*cow_duplicate_clusters)(handle_t *handle,
  69				      struct inode *inode,
  70				      u32 cpos, u32 old_cluster,
  71				      u32 new_cluster, u32 new_len);
  72};
  73
  74static inline struct ocfs2_refcount_tree *
  75cache_info_to_refcount(struct ocfs2_caching_info *ci)
  76{
  77	return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  78}
  79
  80static int ocfs2_validate_refcount_block(struct super_block *sb,
  81					 struct buffer_head *bh)
  82{
  83	int rc;
  84	struct ocfs2_refcount_block *rb =
  85		(struct ocfs2_refcount_block *)bh->b_data;
  86
  87	trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
  88
  89	BUG_ON(!buffer_uptodate(bh));
  90
  91	/*
  92	 * If the ecc fails, we return the error but otherwise
  93	 * leave the filesystem running.  We know any error is
  94	 * local to this block.
  95	 */
  96	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  97	if (rc) {
  98		mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  99		     (unsigned long long)bh->b_blocknr);
 100		return rc;
 101	}
 102
 103
 104	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
 105		ocfs2_error(sb,
 106			    "Refcount block #%llu has bad signature %.*s",
 107			    (unsigned long long)bh->b_blocknr, 7,
 108			    rb->rf_signature);
 109		return -EINVAL;
 110	}
 111
 112	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
 113		ocfs2_error(sb,
 114			    "Refcount block #%llu has an invalid rf_blkno "
 115			    "of %llu",
 116			    (unsigned long long)bh->b_blocknr,
 117			    (unsigned long long)le64_to_cpu(rb->rf_blkno));
 118		return -EINVAL;
 119	}
 120
 121	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
 122		ocfs2_error(sb,
 123			    "Refcount block #%llu has an invalid "
 124			    "rf_fs_generation of #%u",
 125			    (unsigned long long)bh->b_blocknr,
 126			    le32_to_cpu(rb->rf_fs_generation));
 127		return -EINVAL;
 128	}
 129
 130	return 0;
 131}
 132
 133static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
 134				     u64 rb_blkno,
 135				     struct buffer_head **bh)
 136{
 137	int rc;
 138	struct buffer_head *tmp = *bh;
 139
 140	rc = ocfs2_read_block(ci, rb_blkno, &tmp,
 141			      ocfs2_validate_refcount_block);
 142
 143	/* If ocfs2_read_block() got us a new bh, pass it up. */
 144	if (!rc && !*bh)
 145		*bh = tmp;
 146
 147	return rc;
 148}
 149
 150static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
 151{
 152	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 153
 154	return rf->rf_blkno;
 155}
 156
 157static struct super_block *
 158ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
 159{
 160	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 161
 162	return rf->rf_sb;
 163}
 164
 165static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
 166{
 167	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 168
 169	spin_lock(&rf->rf_lock);
 170}
 171
 172static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
 173{
 174	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 175
 176	spin_unlock(&rf->rf_lock);
 177}
 178
 179static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
 180{
 181	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 182
 183	mutex_lock(&rf->rf_io_mutex);
 184}
 185
 186static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
 187{
 188	struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
 189
 190	mutex_unlock(&rf->rf_io_mutex);
 191}
 192
 193static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
 194	.co_owner		= ocfs2_refcount_cache_owner,
 195	.co_get_super		= ocfs2_refcount_cache_get_super,
 196	.co_cache_lock		= ocfs2_refcount_cache_lock,
 197	.co_cache_unlock	= ocfs2_refcount_cache_unlock,
 198	.co_io_lock		= ocfs2_refcount_cache_io_lock,
 199	.co_io_unlock		= ocfs2_refcount_cache_io_unlock,
 200};
 201
 202static struct ocfs2_refcount_tree *
 203ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
 204{
 205	struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
 206	struct ocfs2_refcount_tree *tree = NULL;
 207
 208	while (n) {
 209		tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
 210
 211		if (blkno < tree->rf_blkno)
 212			n = n->rb_left;
 213		else if (blkno > tree->rf_blkno)
 214			n = n->rb_right;
 215		else
 216			return tree;
 217	}
 218
 219	return NULL;
 220}
 221
 222/* osb_lock is already locked. */
 223static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
 224				       struct ocfs2_refcount_tree *new)
 225{
 226	u64 rf_blkno = new->rf_blkno;
 227	struct rb_node *parent = NULL;
 228	struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
 229	struct ocfs2_refcount_tree *tmp;
 230
 231	while (*p) {
 232		parent = *p;
 233
 234		tmp = rb_entry(parent, struct ocfs2_refcount_tree,
 235			       rf_node);
 236
 237		if (rf_blkno < tmp->rf_blkno)
 238			p = &(*p)->rb_left;
 239		else if (rf_blkno > tmp->rf_blkno)
 240			p = &(*p)->rb_right;
 241		else {
 242			/* This should never happen! */
 243			mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
 244			     (unsigned long long)rf_blkno);
 245			BUG();
 246		}
 247	}
 248
 249	rb_link_node(&new->rf_node, parent, p);
 250	rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
 251}
 252
 253static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
 254{
 255	ocfs2_metadata_cache_exit(&tree->rf_ci);
 256	ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
 257	ocfs2_lock_res_free(&tree->rf_lockres);
 258	kfree(tree);
 259}
 260
 261static inline void
 262ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
 263					struct ocfs2_refcount_tree *tree)
 264{
 265	rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
 266	if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
 267		osb->osb_ref_tree_lru = NULL;
 268}
 269
 270static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
 271					struct ocfs2_refcount_tree *tree)
 272{
 273	spin_lock(&osb->osb_lock);
 274	ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 275	spin_unlock(&osb->osb_lock);
 276}
 277
 278static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
 279{
 280	struct ocfs2_refcount_tree *tree =
 281		container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
 282
 283	ocfs2_free_refcount_tree(tree);
 284}
 285
 286static inline void
 287ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
 288{
 289	kref_get(&tree->rf_getcnt);
 290}
 291
 292static inline void
 293ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
 294{
 295	kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
 296}
 297
 298static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
 299					       struct super_block *sb)
 300{
 301	ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
 302	mutex_init(&new->rf_io_mutex);
 303	new->rf_sb = sb;
 304	spin_lock_init(&new->rf_lock);
 305}
 306
 307static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
 308					struct ocfs2_refcount_tree *new,
 309					u64 rf_blkno, u32 generation)
 310{
 311	init_rwsem(&new->rf_sem);
 312	ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
 313				     rf_blkno, generation);
 314}
 315
 316static struct ocfs2_refcount_tree*
 317ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
 318{
 319	struct ocfs2_refcount_tree *new;
 320
 321	new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
 322	if (!new)
 323		return NULL;
 324
 325	new->rf_blkno = rf_blkno;
 326	kref_init(&new->rf_getcnt);
 327	ocfs2_init_refcount_tree_ci(new, osb->sb);
 328
 329	return new;
 330}
 331
 332static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
 333				   struct ocfs2_refcount_tree **ret_tree)
 334{
 335	int ret = 0;
 336	struct ocfs2_refcount_tree *tree, *new = NULL;
 337	struct buffer_head *ref_root_bh = NULL;
 338	struct ocfs2_refcount_block *ref_rb;
 339
 340	spin_lock(&osb->osb_lock);
 341	if (osb->osb_ref_tree_lru &&
 342	    osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
 343		tree = osb->osb_ref_tree_lru;
 344	else
 345		tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 346	if (tree)
 347		goto out;
 348
 349	spin_unlock(&osb->osb_lock);
 350
 351	new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
 352	if (!new) {
 353		ret = -ENOMEM;
 354		mlog_errno(ret);
 355		return ret;
 356	}
 357	/*
 358	 * We need the generation to create the refcount tree lock and since
 359	 * it isn't changed during the tree modification, we are safe here to
 360	 * read without protection.
 361	 * We also have to purge the cache after we create the lock since the
 362	 * refcount block may have the stale data. It can only be trusted when
 363	 * we hold the refcount lock.
 364	 */
 365	ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
 366	if (ret) {
 367		mlog_errno(ret);
 368		ocfs2_metadata_cache_exit(&new->rf_ci);
 369		kfree(new);
 370		return ret;
 371	}
 372
 373	ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 374	new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
 375	ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
 376				      new->rf_generation);
 377	ocfs2_metadata_cache_purge(&new->rf_ci);
 378
 379	spin_lock(&osb->osb_lock);
 380	tree = ocfs2_find_refcount_tree(osb, rf_blkno);
 381	if (tree)
 382		goto out;
 383
 384	ocfs2_insert_refcount_tree(osb, new);
 385
 386	tree = new;
 387	new = NULL;
 388
 389out:
 390	*ret_tree = tree;
 391
 392	osb->osb_ref_tree_lru = tree;
 393
 394	spin_unlock(&osb->osb_lock);
 395
 396	if (new)
 397		ocfs2_free_refcount_tree(new);
 398
 399	brelse(ref_root_bh);
 400	return ret;
 401}
 402
 403static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
 404{
 405	int ret;
 406	struct buffer_head *di_bh = NULL;
 407	struct ocfs2_dinode *di;
 408
 409	ret = ocfs2_read_inode_block(inode, &di_bh);
 410	if (ret) {
 411		mlog_errno(ret);
 412		goto out;
 413	}
 414
 415	BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
 416
 417	di = (struct ocfs2_dinode *)di_bh->b_data;
 418	*ref_blkno = le64_to_cpu(di->i_refcount_loc);
 419	brelse(di_bh);
 420out:
 421	return ret;
 422}
 423
 424static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 425				      struct ocfs2_refcount_tree *tree, int rw)
 426{
 427	int ret;
 428
 429	ret = ocfs2_refcount_lock(tree, rw);
 430	if (ret) {
 431		mlog_errno(ret);
 432		goto out;
 433	}
 434
 435	if (rw)
 436		down_write(&tree->rf_sem);
 437	else
 438		down_read(&tree->rf_sem);
 439
 440out:
 441	return ret;
 442}
 443
 444/*
 445 * Lock the refcount tree pointed by ref_blkno and return the tree.
 446 * In most case, we lock the tree and read the refcount block.
 447 * So read it here if the caller really needs it.
 448 *
 449 * If the tree has been re-created by other node, it will free the
 450 * old one and re-create it.
 451 */
 452int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
 453			     u64 ref_blkno, int rw,
 454			     struct ocfs2_refcount_tree **ret_tree,
 455			     struct buffer_head **ref_bh)
 456{
 457	int ret, delete_tree = 0;
 458	struct ocfs2_refcount_tree *tree = NULL;
 459	struct buffer_head *ref_root_bh = NULL;
 460	struct ocfs2_refcount_block *rb;
 461
 462again:
 463	ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
 464	if (ret) {
 465		mlog_errno(ret);
 466		return ret;
 467	}
 468
 469	ocfs2_refcount_tree_get(tree);
 470
 471	ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
 472	if (ret) {
 473		mlog_errno(ret);
 474		ocfs2_refcount_tree_put(tree);
 475		goto out;
 476	}
 477
 478	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
 479					&ref_root_bh);
 480	if (ret) {
 481		mlog_errno(ret);
 482		ocfs2_unlock_refcount_tree(osb, tree, rw);
 483		ocfs2_refcount_tree_put(tree);
 484		goto out;
 485	}
 486
 487	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 488	/*
 489	 * If the refcount block has been freed and re-created, we may need
 490	 * to recreate the refcount tree also.
 491	 *
 492	 * Here we just remove the tree from the rb-tree, and the last
 493	 * kref holder will unlock and delete this refcount_tree.
 494	 * Then we goto "again" and ocfs2_get_refcount_tree will create
 495	 * the new refcount tree for us.
 496	 */
 497	if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
 498		if (!tree->rf_removed) {
 499			ocfs2_erase_refcount_tree_from_list(osb, tree);
 500			tree->rf_removed = 1;
 501			delete_tree = 1;
 502		}
 503
 504		ocfs2_unlock_refcount_tree(osb, tree, rw);
 505		/*
 506		 * We get an extra reference when we create the refcount
 507		 * tree, so another put will destroy it.
 508		 */
 509		if (delete_tree)
 510			ocfs2_refcount_tree_put(tree);
 511		brelse(ref_root_bh);
 512		ref_root_bh = NULL;
 513		goto again;
 514	}
 515
 516	*ret_tree = tree;
 517	if (ref_bh) {
 518		*ref_bh = ref_root_bh;
 519		ref_root_bh = NULL;
 520	}
 521out:
 522	brelse(ref_root_bh);
 523	return ret;
 524}
 525
 526void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
 527				struct ocfs2_refcount_tree *tree, int rw)
 528{
 529	if (rw)
 530		up_write(&tree->rf_sem);
 531	else
 532		up_read(&tree->rf_sem);
 533
 534	ocfs2_refcount_unlock(tree, rw);
 535	ocfs2_refcount_tree_put(tree);
 536}
 537
 538void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
 539{
 540	struct rb_node *node;
 541	struct ocfs2_refcount_tree *tree;
 542	struct rb_root *root = &osb->osb_rf_lock_tree;
 543
 544	while ((node = rb_last(root)) != NULL) {
 545		tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
 546
 547		trace_ocfs2_purge_refcount_trees(
 548				(unsigned long long) tree->rf_blkno);
 549
 550		rb_erase(&tree->rf_node, root);
 551		ocfs2_free_refcount_tree(tree);
 552	}
 553}
 554
 555/*
 556 * Create a refcount tree for an inode.
 557 * We take for granted that the inode is already locked.
 558 */
 559static int ocfs2_create_refcount_tree(struct inode *inode,
 560				      struct buffer_head *di_bh)
 561{
 562	int ret;
 563	handle_t *handle = NULL;
 564	struct ocfs2_alloc_context *meta_ac = NULL;
 565	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 566	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 567	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 568	struct buffer_head *new_bh = NULL;
 569	struct ocfs2_refcount_block *rb;
 570	struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
 571	u16 suballoc_bit_start;
 572	u32 num_got;
 573	u64 suballoc_loc, first_blkno;
 574
 575	BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
 576
 577	trace_ocfs2_create_refcount_tree(
 578		(unsigned long long)OCFS2_I(inode)->ip_blkno);
 579
 580	ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
 581	if (ret) {
 582		mlog_errno(ret);
 583		goto out;
 584	}
 585
 586	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
 587	if (IS_ERR(handle)) {
 588		ret = PTR_ERR(handle);
 589		mlog_errno(ret);
 590		goto out;
 591	}
 592
 593	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 594				      OCFS2_JOURNAL_ACCESS_WRITE);
 595	if (ret) {
 596		mlog_errno(ret);
 597		goto out_commit;
 598	}
 599
 600	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
 601				   &suballoc_bit_start, &num_got,
 602				   &first_blkno);
 603	if (ret) {
 604		mlog_errno(ret);
 605		goto out_commit;
 606	}
 607
 608	new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
 609	if (!new_tree) {
 610		ret = -ENOMEM;
 611		mlog_errno(ret);
 612		goto out_commit;
 613	}
 614
 615	new_bh = sb_getblk(inode->i_sb, first_blkno);
 616	if (!new_bh) {
 617		ret = -ENOMEM;
 618		mlog_errno(ret);
 619		goto out_commit;
 620	}
 621	ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
 622
 623	ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
 624				      OCFS2_JOURNAL_ACCESS_CREATE);
 625	if (ret) {
 626		mlog_errno(ret);
 627		goto out_commit;
 628	}
 629
 630	/* Initialize ocfs2_refcount_block. */
 631	rb = (struct ocfs2_refcount_block *)new_bh->b_data;
 632	memset(rb, 0, inode->i_sb->s_blocksize);
 633	strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
 634	rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
 635	rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
 636	rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
 637	rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
 638	rb->rf_blkno = cpu_to_le64(first_blkno);
 639	rb->rf_count = cpu_to_le32(1);
 640	rb->rf_records.rl_count =
 641			cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
 642	spin_lock(&osb->osb_lock);
 643	rb->rf_generation = osb->s_next_generation++;
 644	spin_unlock(&osb->osb_lock);
 645
 646	ocfs2_journal_dirty(handle, new_bh);
 647
 648	spin_lock(&oi->ip_lock);
 649	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 650	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 651	di->i_refcount_loc = cpu_to_le64(first_blkno);
 652	spin_unlock(&oi->ip_lock);
 653
 654	trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
 655
 656	ocfs2_journal_dirty(handle, di_bh);
 657
 658	/*
 659	 * We have to init the tree lock here since it will use
 660	 * the generation number to create it.
 661	 */
 662	new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
 663	ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
 664				      new_tree->rf_generation);
 665
 666	spin_lock(&osb->osb_lock);
 667	tree = ocfs2_find_refcount_tree(osb, first_blkno);
 668
 669	/*
 670	 * We've just created a new refcount tree in this block.  If
 671	 * we found a refcount tree on the ocfs2_super, it must be
 672	 * one we just deleted.  We free the old tree before
 673	 * inserting the new tree.
 674	 */
 675	BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
 676	if (tree)
 677		ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
 678	ocfs2_insert_refcount_tree(osb, new_tree);
 679	spin_unlock(&osb->osb_lock);
 680	new_tree = NULL;
 681	if (tree)
 682		ocfs2_refcount_tree_put(tree);
 683
 684out_commit:
 685	ocfs2_commit_trans(osb, handle);
 686
 687out:
 688	if (new_tree) {
 689		ocfs2_metadata_cache_exit(&new_tree->rf_ci);
 690		kfree(new_tree);
 691	}
 692
 693	brelse(new_bh);
 694	if (meta_ac)
 695		ocfs2_free_alloc_context(meta_ac);
 696
 697	return ret;
 698}
 699
 700static int ocfs2_set_refcount_tree(struct inode *inode,
 701				   struct buffer_head *di_bh,
 702				   u64 refcount_loc)
 703{
 704	int ret;
 705	handle_t *handle = NULL;
 706	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 707	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 708	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 709	struct buffer_head *ref_root_bh = NULL;
 710	struct ocfs2_refcount_block *rb;
 711	struct ocfs2_refcount_tree *ref_tree;
 712
 713	BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
 714
 715	ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
 716				       &ref_tree, &ref_root_bh);
 717	if (ret) {
 718		mlog_errno(ret);
 719		return ret;
 720	}
 721
 722	handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
 723	if (IS_ERR(handle)) {
 724		ret = PTR_ERR(handle);
 725		mlog_errno(ret);
 726		goto out;
 727	}
 728
 729	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 730				      OCFS2_JOURNAL_ACCESS_WRITE);
 731	if (ret) {
 732		mlog_errno(ret);
 733		goto out_commit;
 734	}
 735
 736	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
 737				      OCFS2_JOURNAL_ACCESS_WRITE);
 738	if (ret) {
 739		mlog_errno(ret);
 740		goto out_commit;
 741	}
 742
 743	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
 744	le32_add_cpu(&rb->rf_count, 1);
 745
 746	ocfs2_journal_dirty(handle, ref_root_bh);
 747
 748	spin_lock(&oi->ip_lock);
 749	oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
 750	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 751	di->i_refcount_loc = cpu_to_le64(refcount_loc);
 752	spin_unlock(&oi->ip_lock);
 753	ocfs2_journal_dirty(handle, di_bh);
 754
 755out_commit:
 756	ocfs2_commit_trans(osb, handle);
 757out:
 758	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 759	brelse(ref_root_bh);
 760
 761	return ret;
 762}
 763
 764int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
 765{
 766	int ret, delete_tree = 0;
 767	handle_t *handle = NULL;
 768	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 769	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 770	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 771	struct ocfs2_refcount_block *rb;
 772	struct inode *alloc_inode = NULL;
 773	struct buffer_head *alloc_bh = NULL;
 774	struct buffer_head *blk_bh = NULL;
 775	struct ocfs2_refcount_tree *ref_tree;
 776	int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
 777	u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
 778	u16 bit = 0;
 779
 780	if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
 781		return 0;
 782
 783	BUG_ON(!ref_blkno);
 784	ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
 785	if (ret) {
 786		mlog_errno(ret);
 787		return ret;
 788	}
 789
 790	rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
 791
 792	/*
 793	 * If we are the last user, we need to free the block.
 794	 * So lock the allocator ahead.
 795	 */
 796	if (le32_to_cpu(rb->rf_count) == 1) {
 797		blk = le64_to_cpu(rb->rf_blkno);
 798		bit = le16_to_cpu(rb->rf_suballoc_bit);
 799		if (rb->rf_suballoc_loc)
 800			bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
 801		else
 802			bg_blkno = ocfs2_which_suballoc_group(blk, bit);
 803
 804		alloc_inode = ocfs2_get_system_file_inode(osb,
 805					EXTENT_ALLOC_SYSTEM_INODE,
 806					le16_to_cpu(rb->rf_suballoc_slot));
 807		if (!alloc_inode) {
 808			ret = -ENOMEM;
 809			mlog_errno(ret);
 810			goto out;
 811		}
 812		mutex_lock(&alloc_inode->i_mutex);
 813
 814		ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
 815		if (ret) {
 816			mlog_errno(ret);
 817			goto out_mutex;
 818		}
 819
 820		credits += OCFS2_SUBALLOC_FREE;
 821	}
 822
 823	handle = ocfs2_start_trans(osb, credits);
 824	if (IS_ERR(handle)) {
 825		ret = PTR_ERR(handle);
 826		mlog_errno(ret);
 827		goto out_unlock;
 828	}
 829
 830	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 831				      OCFS2_JOURNAL_ACCESS_WRITE);
 832	if (ret) {
 833		mlog_errno(ret);
 834		goto out_commit;
 835	}
 836
 837	ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
 838				      OCFS2_JOURNAL_ACCESS_WRITE);
 839	if (ret) {
 840		mlog_errno(ret);
 841		goto out_commit;
 842	}
 843
 844	spin_lock(&oi->ip_lock);
 845	oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
 846	di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
 847	di->i_refcount_loc = 0;
 848	spin_unlock(&oi->ip_lock);
 849	ocfs2_journal_dirty(handle, di_bh);
 850
 851	le32_add_cpu(&rb->rf_count , -1);
 852	ocfs2_journal_dirty(handle, blk_bh);
 853
 854	if (!rb->rf_count) {
 855		delete_tree = 1;
 856		ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
 857		ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
 858					       alloc_bh, bit, bg_blkno, 1);
 859		if (ret)
 860			mlog_errno(ret);
 861	}
 862
 863out_commit:
 864	ocfs2_commit_trans(osb, handle);
 865out_unlock:
 866	if (alloc_inode) {
 867		ocfs2_inode_unlock(alloc_inode, 1);
 868		brelse(alloc_bh);
 869	}
 870out_mutex:
 871	if (alloc_inode) {
 872		mutex_unlock(&alloc_inode->i_mutex);
 873		iput(alloc_inode);
 874	}
 875out:
 876	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 877	if (delete_tree)
 878		ocfs2_refcount_tree_put(ref_tree);
 879	brelse(blk_bh);
 880
 881	return ret;
 882}
 883
 884static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
 885					  struct buffer_head *ref_leaf_bh,
 886					  u64 cpos, unsigned int len,
 887					  struct ocfs2_refcount_rec *ret_rec,
 888					  int *index)
 889{
 890	int i = 0;
 891	struct ocfs2_refcount_block *rb =
 892		(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
 893	struct ocfs2_refcount_rec *rec = NULL;
 894
 895	for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
 896		rec = &rb->rf_records.rl_recs[i];
 897
 898		if (le64_to_cpu(rec->r_cpos) +
 899		    le32_to_cpu(rec->r_clusters) <= cpos)
 900			continue;
 901		else if (le64_to_cpu(rec->r_cpos) > cpos)
 902			break;
 903
 904		/* ok, cpos fail in this rec. Just return. */
 905		if (ret_rec)
 906			*ret_rec = *rec;
 907		goto out;
 908	}
 909
 910	if (ret_rec) {
 911		/* We meet with a hole here, so fake the rec. */
 912		ret_rec->r_cpos = cpu_to_le64(cpos);
 913		ret_rec->r_refcount = 0;
 914		if (i < le16_to_cpu(rb->rf_records.rl_used) &&
 915		    le64_to_cpu(rec->r_cpos) < cpos + len)
 916			ret_rec->r_clusters =
 917				cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
 918		else
 919			ret_rec->r_clusters = cpu_to_le32(len);
 920	}
 921
 922out:
 923	*index = i;
 924}
 925
 926/*
 927 * Try to remove refcount tree. The mechanism is:
 928 * 1) Check whether i_clusters == 0, if no, exit.
 929 * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
 930 * 3) Check whether we have inline xattr stored outside, if yes, exit.
 931 * 4) Remove the tree.
 932 */
 933int ocfs2_try_remove_refcount_tree(struct inode *inode,
 934				   struct buffer_head *di_bh)
 935{
 936	int ret;
 937	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 938	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 939
 940	down_write(&oi->ip_xattr_sem);
 941	down_write(&oi->ip_alloc_sem);
 942
 943	if (oi->ip_clusters)
 944		goto out;
 945
 946	if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
 947		goto out;
 948
 949	if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
 950	    ocfs2_has_inline_xattr_value_outside(inode, di))
 951		goto out;
 952
 953	ret = ocfs2_remove_refcount_tree(inode, di_bh);
 954	if (ret)
 955		mlog_errno(ret);
 956out:
 957	up_write(&oi->ip_alloc_sem);
 958	up_write(&oi->ip_xattr_sem);
 959	return 0;
 960}
 961
 962/*
 963 * Find the end range for a leaf refcount block indicated by
 964 * el->l_recs[index].e_blkno.
 965 */
 966static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
 967				       struct buffer_head *ref_root_bh,
 968				       struct ocfs2_extent_block *eb,
 969				       struct ocfs2_extent_list *el,
 970				       int index,  u32 *cpos_end)
 971{
 972	int ret, i, subtree_root;
 973	u32 cpos;
 974	u64 blkno;
 975	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
 976	struct ocfs2_path *left_path = NULL, *right_path = NULL;
 977	struct ocfs2_extent_tree et;
 978	struct ocfs2_extent_list *tmp_el;
 979
 980	if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
 981		/*
 982		 * We have a extent rec after index, so just use the e_cpos
 983		 * of the next extent rec.
 984		 */
 985		*cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
 986		return 0;
 987	}
 988
 989	if (!eb || (eb && !eb->h_next_leaf_blk)) {
 990		/*
 991		 * We are the last extent rec, so any high cpos should
 992		 * be stored in this leaf refcount block.
 993		 */
 994		*cpos_end = UINT_MAX;
 995		return 0;
 996	}
 997
 998	/*
 999	 * If the extent block isn't the last one, we have to find
1000	 * the subtree root between this extent block and the next
1001	 * leaf extent block and get the corresponding e_cpos from
1002	 * the subroot. Otherwise we may corrupt the b-tree.
1003	 */
1004	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1005
1006	left_path = ocfs2_new_path_from_et(&et);
1007	if (!left_path) {
1008		ret = -ENOMEM;
1009		mlog_errno(ret);
1010		goto out;
1011	}
1012
1013	cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
1014	ret = ocfs2_find_path(ci, left_path, cpos);
1015	if (ret) {
1016		mlog_errno(ret);
1017		goto out;
1018	}
1019
1020	right_path = ocfs2_new_path_from_path(left_path);
1021	if (!right_path) {
1022		ret = -ENOMEM;
1023		mlog_errno(ret);
1024		goto out;
1025	}
1026
1027	ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
1028	if (ret) {
1029		mlog_errno(ret);
1030		goto out;
1031	}
1032
1033	ret = ocfs2_find_path(ci, right_path, cpos);
1034	if (ret) {
1035		mlog_errno(ret);
1036		goto out;
1037	}
1038
1039	subtree_root = ocfs2_find_subtree_root(&et, left_path,
1040					       right_path);
1041
1042	tmp_el = left_path->p_node[subtree_root].el;
1043	blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
1044	for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
1045		if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
1046			*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
1047			break;
1048		}
1049	}
1050
1051	BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
1052
1053out:
1054	ocfs2_free_path(left_path);
1055	ocfs2_free_path(right_path);
1056	return ret;
1057}
1058
1059/*
1060 * Given a cpos and len, try to find the refcount record which contains cpos.
1061 * 1. If cpos can be found in one refcount record, return the record.
1062 * 2. If cpos can't be found, return a fake record which start from cpos
1063 *    and end at a small value between cpos+len and start of the next record.
1064 *    This fake record has r_refcount = 0.
1065 */
1066static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
1067				  struct buffer_head *ref_root_bh,
1068				  u64 cpos, unsigned int len,
1069				  struct ocfs2_refcount_rec *ret_rec,
1070				  int *index,
1071				  struct buffer_head **ret_bh)
1072{
1073	int ret = 0, i, found;
1074	u32 low_cpos, uninitialized_var(cpos_end);
1075	struct ocfs2_extent_list *el;
1076	struct ocfs2_extent_rec *rec = NULL;
1077	struct ocfs2_extent_block *eb = NULL;
1078	struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
1079	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1080	struct ocfs2_refcount_block *rb =
1081			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1082
1083	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
1084		ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
1085					      ret_rec, index);
1086		*ret_bh = ref_root_bh;
1087		get_bh(ref_root_bh);
1088		return 0;
1089	}
1090
1091	el = &rb->rf_list;
1092	low_cpos = cpos & OCFS2_32BIT_POS_MASK;
1093
1094	if (el->l_tree_depth) {
1095		ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
1096		if (ret) {
1097			mlog_errno(ret);
1098			goto out;
1099		}
1100
1101		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
1102		el = &eb->h_list;
1103
1104		if (el->l_tree_depth) {
1105			ocfs2_error(sb,
1106			"refcount tree %llu has non zero tree "
1107			"depth in leaf btree tree block %llu\n",
1108			(unsigned long long)ocfs2_metadata_cache_owner(ci),
1109			(unsigned long long)eb_bh->b_blocknr);
1110			ret = -EROFS;
1111			goto out;
1112		}
1113	}
1114
1115	found = 0;
1116	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1117		rec = &el->l_recs[i];
1118
1119		if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
1120			found = 1;
1121			break;
1122		}
1123	}
1124
1125	if (found) {
1126		ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
1127						  eb, el, i, &cpos_end);
1128		if (ret) {
1129			mlog_errno(ret);
1130			goto out;
1131		}
1132
1133		if (cpos_end < low_cpos + len)
1134			len = cpos_end - low_cpos;
1135	}
1136
1137	ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
1138					&ref_leaf_bh);
1139	if (ret) {
1140		mlog_errno(ret);
1141		goto out;
1142	}
1143
1144	ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1145				      ret_rec, index);
1146	*ret_bh = ref_leaf_bh;
1147out:
1148	brelse(eb_bh);
1149	return ret;
1150}
1151
1152enum ocfs2_ref_rec_contig {
1153	REF_CONTIG_NONE = 0,
1154	REF_CONTIG_LEFT,
1155	REF_CONTIG_RIGHT,
1156	REF_CONTIG_LEFTRIGHT,
1157};
1158
1159static enum ocfs2_ref_rec_contig
1160	ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
1161				    int index)
1162{
1163	if ((rb->rf_records.rl_recs[index].r_refcount ==
1164	    rb->rf_records.rl_recs[index + 1].r_refcount) &&
1165	    (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
1166	    le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
1167	    le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
1168		return REF_CONTIG_RIGHT;
1169
1170	return REF_CONTIG_NONE;
1171}
1172
1173static enum ocfs2_ref_rec_contig
1174	ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
1175				  int index)
1176{
1177	enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
1178
1179	if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
1180		ret = ocfs2_refcount_rec_adjacent(rb, index);
1181
1182	if (index > 0) {
1183		enum ocfs2_ref_rec_contig tmp;
1184
1185		tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
1186
1187		if (tmp == REF_CONTIG_RIGHT) {
1188			if (ret == REF_CONTIG_RIGHT)
1189				ret = REF_CONTIG_LEFTRIGHT;
1190			else
1191				ret = REF_CONTIG_LEFT;
1192		}
1193	}
1194
1195	return ret;
1196}
1197
1198static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
1199					   int index)
1200{
1201	BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
1202	       rb->rf_records.rl_recs[index+1].r_refcount);
1203
1204	le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
1205		     le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
1206
1207	if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
1208		memmove(&rb->rf_records.rl_recs[index + 1],
1209			&rb->rf_records.rl_recs[index + 2],
1210			sizeof(struct ocfs2_refcount_rec) *
1211			(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
1212
1213	memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
1214	       0, sizeof(struct ocfs2_refcount_rec));
1215	le16_add_cpu(&rb->rf_records.rl_used, -1);
1216}
1217
1218/*
1219 * Merge the refcount rec if we are contiguous with the adjacent recs.
1220 */
1221static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
1222				     int index)
1223{
1224	enum ocfs2_ref_rec_contig contig =
1225				ocfs2_refcount_rec_contig(rb, index);
1226
1227	if (contig == REF_CONTIG_NONE)
1228		return;
1229
1230	if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
1231		BUG_ON(index == 0);
1232		index--;
1233	}
1234
1235	ocfs2_rotate_refcount_rec_left(rb, index);
1236
1237	if (contig == REF_CONTIG_LEFTRIGHT)
1238		ocfs2_rotate_refcount_rec_left(rb, index);
1239}
1240
1241/*
1242 * Change the refcount indexed by "index" in ref_bh.
1243 * If refcount reaches 0, remove it.
1244 */
1245static int ocfs2_change_refcount_rec(handle_t *handle,
1246				     struct ocfs2_caching_info *ci,
1247				     struct buffer_head *ref_leaf_bh,
1248				     int index, int merge, int change)
1249{
1250	int ret;
1251	struct ocfs2_refcount_block *rb =
1252			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1253	struct ocfs2_refcount_list *rl = &rb->rf_records;
1254	struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
1255
1256	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1257				      OCFS2_JOURNAL_ACCESS_WRITE);
1258	if (ret) {
1259		mlog_errno(ret);
1260		goto out;
1261	}
1262
1263	trace_ocfs2_change_refcount_rec(
1264		(unsigned long long)ocfs2_metadata_cache_owner(ci),
1265		index, le32_to_cpu(rec->r_refcount), change);
1266	le32_add_cpu(&rec->r_refcount, change);
1267
1268	if (!rec->r_refcount) {
1269		if (index != le16_to_cpu(rl->rl_used) - 1) {
1270			memmove(rec, rec + 1,
1271				(le16_to_cpu(rl->rl_used) - index - 1) *
1272				sizeof(struct ocfs2_refcount_rec));
1273			memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
1274			       0, sizeof(struct ocfs2_refcount_rec));
1275		}
1276
1277		le16_add_cpu(&rl->rl_used, -1);
1278	} else if (merge)
1279		ocfs2_refcount_rec_merge(rb, index);
1280
1281	ocfs2_journal_dirty(handle, ref_leaf_bh);
1282out:
1283	return ret;
1284}
1285
1286static int ocfs2_expand_inline_ref_root(handle_t *handle,
1287					struct ocfs2_caching_info *ci,
1288					struct buffer_head *ref_root_bh,
1289					struct buffer_head **ref_leaf_bh,
1290					struct ocfs2_alloc_context *meta_ac)
1291{
1292	int ret;
1293	u16 suballoc_bit_start;
1294	u32 num_got;
1295	u64 suballoc_loc, blkno;
1296	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1297	struct buffer_head *new_bh = NULL;
1298	struct ocfs2_refcount_block *new_rb;
1299	struct ocfs2_refcount_block *root_rb =
1300			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1301
1302	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1303				      OCFS2_JOURNAL_ACCESS_WRITE);
1304	if (ret) {
1305		mlog_errno(ret);
1306		goto out;
1307	}
1308
1309	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1310				   &suballoc_bit_start, &num_got,
1311				   &blkno);
1312	if (ret) {
1313		mlog_errno(ret);
1314		goto out;
1315	}
1316
1317	new_bh = sb_getblk(sb, blkno);
1318	if (new_bh == NULL) {
1319		ret = -ENOMEM;
1320		mlog_errno(ret);
1321		goto out;
1322	}
1323	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1324
1325	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1326				      OCFS2_JOURNAL_ACCESS_CREATE);
1327	if (ret) {
1328		mlog_errno(ret);
1329		goto out;
1330	}
1331
1332	/*
1333	 * Initialize ocfs2_refcount_block.
1334	 * It should contain the same information as the old root.
1335	 * so just memcpy it and change the corresponding field.
1336	 */
1337	memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1338
1339	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1340	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1341	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1342	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1343	new_rb->rf_blkno = cpu_to_le64(blkno);
1344	new_rb->rf_cpos = cpu_to_le32(0);
1345	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1346	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1347	ocfs2_journal_dirty(handle, new_bh);
1348
1349	/* Now change the root. */
1350	memset(&root_rb->rf_list, 0, sb->s_blocksize -
1351	       offsetof(struct ocfs2_refcount_block, rf_list));
1352	root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
1353	root_rb->rf_clusters = cpu_to_le32(1);
1354	root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
1355	root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
1356	root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
1357	root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
1358
1359	ocfs2_journal_dirty(handle, ref_root_bh);
1360
1361	trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
1362		le16_to_cpu(new_rb->rf_records.rl_used));
1363
1364	*ref_leaf_bh = new_bh;
1365	new_bh = NULL;
1366out:
1367	brelse(new_bh);
1368	return ret;
1369}
1370
1371static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
1372					   struct ocfs2_refcount_rec *next)
1373{
1374	if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
1375		ocfs2_get_ref_rec_low_cpos(next))
1376		return 1;
1377
1378	return 0;
1379}
1380
1381static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
1382{
1383	const struct ocfs2_refcount_rec *l = a, *r = b;
1384	u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
1385	u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
1386
1387	if (l_cpos > r_cpos)
1388		return 1;
1389	if (l_cpos < r_cpos)
1390		return -1;
1391	return 0;
1392}
1393
1394static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
1395{
1396	const struct ocfs2_refcount_rec *l = a, *r = b;
1397	u64 l_cpos = le64_to_cpu(l->r_cpos);
1398	u64 r_cpos = le64_to_cpu(r->r_cpos);
1399
1400	if (l_cpos > r_cpos)
1401		return 1;
1402	if (l_cpos < r_cpos)
1403		return -1;
1404	return 0;
1405}
1406
1407static void swap_refcount_rec(void *a, void *b, int size)
1408{
1409	struct ocfs2_refcount_rec *l = a, *r = b, tmp;
1410
1411	tmp = *(struct ocfs2_refcount_rec *)l;
1412	*(struct ocfs2_refcount_rec *)l =
1413			*(struct ocfs2_refcount_rec *)r;
1414	*(struct ocfs2_refcount_rec *)r = tmp;
1415}
1416
1417/*
1418 * The refcount cpos are ordered by their 64bit cpos,
1419 * But we will use the low 32 bit to be the e_cpos in the b-tree.
1420 * So we need to make sure that this pos isn't intersected with others.
1421 *
1422 * Note: The refcount block is already sorted by their low 32 bit cpos,
1423 *       So just try the middle pos first, and we will exit when we find
1424 *       the good position.
1425 */
1426static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
1427					 u32 *split_pos, int *split_index)
1428{
1429	int num_used = le16_to_cpu(rl->rl_used);
1430	int delta, middle = num_used / 2;
1431
1432	for (delta = 0; delta < middle; delta++) {
1433		/* Let's check delta earlier than middle */
1434		if (ocfs2_refcount_rec_no_intersect(
1435					&rl->rl_recs[middle - delta - 1],
1436					&rl->rl_recs[middle - delta])) {
1437			*split_index = middle - delta;
1438			break;
1439		}
1440
1441		/* For even counts, don't walk off the end */
1442		if ((middle + delta + 1) == num_used)
1443			continue;
1444
1445		/* Now try delta past middle */
1446		if (ocfs2_refcount_rec_no_intersect(
1447					&rl->rl_recs[middle + delta],
1448					&rl->rl_recs[middle + delta + 1])) {
1449			*split_index = middle + delta + 1;
1450			break;
1451		}
1452	}
1453
1454	if (delta >= middle)
1455		return -ENOSPC;
1456
1457	*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
1458	return 0;
1459}
1460
1461static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1462					    struct buffer_head *new_bh,
1463					    u32 *split_cpos)
1464{
1465	int split_index = 0, num_moved, ret;
1466	u32 cpos = 0;
1467	struct ocfs2_refcount_block *rb =
1468			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1469	struct ocfs2_refcount_list *rl = &rb->rf_records;
1470	struct ocfs2_refcount_block *new_rb =
1471			(struct ocfs2_refcount_block *)new_bh->b_data;
1472	struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1473
1474	trace_ocfs2_divide_leaf_refcount_block(
1475		(unsigned long long)ref_leaf_bh->b_blocknr,
1476		le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
1477
1478	/*
1479	 * XXX: Improvement later.
1480	 * If we know all the high 32 bit cpos is the same, no need to sort.
1481	 *
1482	 * In order to make the whole process safe, we do:
1483	 * 1. sort the entries by their low 32 bit cpos first so that we can
1484	 *    find the split cpos easily.
1485	 * 2. call ocfs2_insert_extent to insert the new refcount block.
1486	 * 3. move the refcount rec to the new block.
1487	 * 4. sort the entries by their 64 bit cpos.
1488	 * 5. dirty the new_rb and rb.
1489	 */
1490	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1491	     sizeof(struct ocfs2_refcount_rec),
1492	     cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
1493
1494	ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1495	if (ret) {
1496		mlog_errno(ret);
1497		return ret;
1498	}
1499
1500	new_rb->rf_cpos = cpu_to_le32(cpos);
1501
1502	/* move refcount records starting from split_index to the new block. */
1503	num_moved = le16_to_cpu(rl->rl_used) - split_index;
1504	memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
1505	       num_moved * sizeof(struct ocfs2_refcount_rec));
1506
1507	/*ok, remove the entries we just moved over to the other block. */
1508	memset(&rl->rl_recs[split_index], 0,
1509	       num_moved * sizeof(struct ocfs2_refcount_rec));
1510
1511	/* change old and new rl_used accordingly. */
1512	le16_add_cpu(&rl->rl_used, -num_moved);
1513	new_rl->rl_used = cpu_to_le16(num_moved);
1514
1515	sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1516	     sizeof(struct ocfs2_refcount_rec),
1517	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1518
1519	sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
1520	     sizeof(struct ocfs2_refcount_rec),
1521	     cmp_refcount_rec_by_cpos, swap_refcount_rec);
1522
1523	*split_cpos = cpos;
1524	return 0;
1525}
1526
1527static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1528					 struct ocfs2_caching_info *ci,
1529					 struct buffer_head *ref_root_bh,
1530					 struct buffer_head *ref_leaf_bh,
1531					 struct ocfs2_alloc_context *meta_ac)
1532{
1533	int ret;
1534	u16 suballoc_bit_start;
1535	u32 num_got, new_cpos;
1536	u64 suballoc_loc, blkno;
1537	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1538	struct ocfs2_refcount_block *root_rb =
1539			(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1540	struct buffer_head *new_bh = NULL;
1541	struct ocfs2_refcount_block *new_rb;
1542	struct ocfs2_extent_tree ref_et;
1543
1544	BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
1545
1546	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1547				      OCFS2_JOURNAL_ACCESS_WRITE);
1548	if (ret) {
1549		mlog_errno(ret);
1550		goto out;
1551	}
1552
1553	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1554				      OCFS2_JOURNAL_ACCESS_WRITE);
1555	if (ret) {
1556		mlog_errno(ret);
1557		goto out;
1558	}
1559
1560	ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
1561				   &suballoc_bit_start, &num_got,
1562				   &blkno);
1563	if (ret) {
1564		mlog_errno(ret);
1565		goto out;
1566	}
1567
1568	new_bh = sb_getblk(sb, blkno);
1569	if (new_bh == NULL) {
1570		ret = -ENOMEM;
1571		mlog_errno(ret);
1572		goto out;
1573	}
1574	ocfs2_set_new_buffer_uptodate(ci, new_bh);
1575
1576	ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1577				      OCFS2_JOURNAL_ACCESS_CREATE);
1578	if (ret) {
1579		mlog_errno(ret);
1580		goto out;
1581	}
1582
1583	/* Initialize ocfs2_refcount_block. */
1584	new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1585	memset(new_rb, 0, sb->s_blocksize);
1586	strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1587	new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
1588	new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
1589	new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1590	new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1591	new_rb->rf_blkno = cpu_to_le64(blkno);
1592	new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1593	new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1594	new_rb->rf_records.rl_count =
1595				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
1596	new_rb->rf_generation = root_rb->rf_generation;
1597
1598	ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
1599	if (ret) {
1600		mlog_errno(ret);
1601		goto out;
1602	}
1603
1604	ocfs2_journal_dirty(handle, ref_leaf_bh);
1605	ocfs2_journal_dirty(handle, new_bh);
1606
1607	ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1608
1609	trace_ocfs2_new_leaf_refcount_block(
1610			(unsigned long long)new_bh->b_blocknr, new_cpos);
1611
1612	/* Insert the new leaf block with the specific offset cpos. */
1613	ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1614				  1, 0, meta_ac);
1615	if (ret)
1616		mlog_errno(ret);
1617
1618out:
1619	brelse(new_bh);
1620	return ret;
1621}
1622
1623static int ocfs2_expand_refcount_tree(handle_t *handle,
1624				      struct ocfs2_caching_info *ci,
1625				      struct buffer_head *ref_root_bh,
1626				      struct buffer_head *ref_leaf_bh,
1627				      struct ocfs2_alloc_context *meta_ac)
1628{
1629	int ret;
1630	struct buffer_head *expand_bh = NULL;
1631
1632	if (ref_root_bh == ref_leaf_bh) {
1633		/*
1634		 * the old root bh hasn't been expanded to a b-tree,
1635		 * so expand it first.
1636		 */
1637		ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
1638						   &expand_bh, meta_ac);
1639		if (ret) {
1640			mlog_errno(ret);
1641			goto out;
1642		}
1643	} else {
1644		expand_bh = ref_leaf_bh;
1645		get_bh(expand_bh);
1646	}
1647
1648
1649	/* Now add a new refcount block into the tree.*/
1650	ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
1651					    expand_bh, meta_ac);
1652	if (ret)
1653		mlog_errno(ret);
1654out:
1655	brelse(expand_bh);
1656	return ret;
1657}
1658
1659/*
1660 * Adjust the extent rec in b-tree representing ref_leaf_bh.
1661 *
1662 * Only called when we have inserted a new refcount rec at index 0
1663 * which means ocfs2_extent_rec.e_cpos may need some change.
1664 */
1665static int ocfs2_adjust_refcount_rec(handle_t *handle,
1666				     struct ocfs2_caching_info *ci,
1667				     struct buffer_head *ref_root_bh,
1668				     struct buffer_head *ref_leaf_bh,
1669				     struct ocfs2_refcount_rec *rec)
1670{
1671	int ret = 0, i;
1672	u32 new_cpos, old_cpos;
1673	struct ocfs2_path *path = NULL;
1674	struct ocfs2_extent_tree et;
1675	struct ocfs2_refcount_block *rb =
1676		(struct ocfs2_refcount_block *)ref_root_bh->b_data;
1677	struct ocfs2_extent_list *el;
1678
1679	if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
1680		goto out;
1681
1682	rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1683	old_cpos = le32_to_cpu(rb->rf_cpos);
1684	new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
1685	if (old_cpos <= new_cpos)
1686		goto out;
1687
1688	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1689
1690	path = ocfs2_new_path_from_et(&et);
1691	if (!path) {
1692		ret = -ENOMEM;
1693		mlog_errno(ret);
1694		goto out;
1695	}
1696
1697	ret = ocfs2_find_path(ci, path, old_cpos);
1698	if (ret) {
1699		mlog_errno(ret);
1700		goto out;
1701	}
1702
1703	/*
1704	 * 2 more credits, one for the leaf refcount block, one for
1705	 * the extent block contains the extent rec.
1706	 */
1707	ret = ocfs2_extend_trans(handle, 2);
1708	if (ret < 0) {
1709		mlog_errno(ret);
1710		goto out;
1711	}
1712
1713	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1714				      OCFS2_JOURNAL_ACCESS_WRITE);
1715	if (ret < 0) {
1716		mlog_errno(ret);
1717		goto out;
1718	}
1719
1720	ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
1721				      OCFS2_JOURNAL_ACCESS_WRITE);
1722	if (ret < 0) {
1723		mlog_errno(ret);
1724		goto out;
1725	}
1726
1727	/* change the leaf extent block first. */
1728	el = path_leaf_el(path);
1729
1730	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
1731		if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
1732			break;
1733
1734	BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
1735
1736	el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
1737
1738	/* change the r_cpos in the leaf block. */
1739	rb->rf_cpos = cpu_to_le32(new_cpos);
1740
1741	ocfs2_journal_dirty(handle, path_leaf_bh(path));
1742	ocfs2_journal_dirty(handle, ref_leaf_bh);
1743
1744out:
1745	ocfs2_free_path(path);
1746	return ret;
1747}
1748
1749static int ocfs2_insert_refcount_rec(handle_t *handle,
1750				     struct ocfs2_caching_info *ci,
1751				     struct buffer_head *ref_root_bh,
1752				     struct buffer_head *ref_leaf_bh,
1753				     struct ocfs2_refcount_rec *rec,
1754				     int index, int merge,
1755				     struct ocfs2_alloc_context *meta_ac)
1756{
1757	int ret;
1758	struct ocfs2_refcount_block *rb =
1759			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1760	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1761	struct buffer_head *new_bh = NULL;
1762
1763	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1764
1765	if (rf_list->rl_used == rf_list->rl_count) {
1766		u64 cpos = le64_to_cpu(rec->r_cpos);
1767		u32 len = le32_to_cpu(rec->r_clusters);
1768
1769		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1770						 ref_leaf_bh, meta_ac);
1771		if (ret) {
1772			mlog_errno(ret);
1773			goto out;
1774		}
1775
1776		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1777					     cpos, len, NULL, &index,
1778					     &new_bh);
1779		if (ret) {
1780			mlog_errno(ret);
1781			goto out;
1782		}
1783
1784		ref_leaf_bh = new_bh;
1785		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1786		rf_list = &rb->rf_records;
1787	}
1788
1789	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1790				      OCFS2_JOURNAL_ACCESS_WRITE);
1791	if (ret) {
1792		mlog_errno(ret);
1793		goto out;
1794	}
1795
1796	if (index < le16_to_cpu(rf_list->rl_used))
1797		memmove(&rf_list->rl_recs[index + 1],
1798			&rf_list->rl_recs[index],
1799			(le16_to_cpu(rf_list->rl_used) - index) *
1800			 sizeof(struct ocfs2_refcount_rec));
1801
1802	trace_ocfs2_insert_refcount_rec(
1803		(unsigned long long)ref_leaf_bh->b_blocknr, index,
1804		(unsigned long long)le64_to_cpu(rec->r_cpos),
1805		le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
1806
1807	rf_list->rl_recs[index] = *rec;
1808
1809	le16_add_cpu(&rf_list->rl_used, 1);
1810
1811	if (merge)
1812		ocfs2_refcount_rec_merge(rb, index);
1813
1814	ocfs2_journal_dirty(handle, ref_leaf_bh);
1815
1816	if (index == 0) {
1817		ret = ocfs2_adjust_refcount_rec(handle, ci,
1818						ref_root_bh,
1819						ref_leaf_bh, rec);
1820		if (ret)
1821			mlog_errno(ret);
1822	}
1823out:
1824	brelse(new_bh);
1825	return ret;
1826}
1827
1828/*
1829 * Split the refcount_rec indexed by "index" in ref_leaf_bh.
1830 * This is much simple than our b-tree code.
1831 * split_rec is the new refcount rec we want to insert.
1832 * If split_rec->r_refcount > 0, we are changing the refcount(in case we
1833 * increase refcount or decrease a refcount to non-zero).
1834 * If split_rec->r_refcount == 0, we are punching a hole in current refcount
1835 * rec( in case we decrease a refcount to zero).
1836 */
1837static int ocfs2_split_refcount_rec(handle_t *handle,
1838				    struct ocfs2_caching_info *ci,
1839				    struct buffer_head *ref_root_bh,
1840				    struct buffer_head *ref_leaf_bh,
1841				    struct ocfs2_refcount_rec *split_rec,
1842				    int index, int merge,
1843				    struct ocfs2_alloc_context *meta_ac,
1844				    struct ocfs2_cached_dealloc_ctxt *dealloc)
1845{
1846	int ret, recs_need;
1847	u32 len;
1848	struct ocfs2_refcount_block *rb =
1849			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1850	struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1851	struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
1852	struct ocfs2_refcount_rec *tail_rec = NULL;
1853	struct buffer_head *new_bh = NULL;
1854
1855	BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1856
1857	trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
1858		le32_to_cpu(orig_rec->r_clusters),
1859		le32_to_cpu(orig_rec->r_refcount),
1860		le64_to_cpu(split_rec->r_cpos),
1861		le32_to_cpu(split_rec->r_clusters),
1862		le32_to_cpu(split_rec->r_refcount));
1863
1864	/*
1865	 * If we just need to split the header or tail clusters,
1866	 * no more recs are needed, just split is OK.
1867	 * Otherwise we at least need one new recs.
1868	 */
1869	if (!split_rec->r_refcount &&
1870	    (split_rec->r_cpos == orig_rec->r_cpos ||
1871	     le64_to_cpu(split_rec->r_cpos) +
1872	     le32_to_cpu(split_rec->r_clusters) ==
1873	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1874		recs_need = 0;
1875	else
1876		recs_need = 1;
1877
1878	/*
1879	 * We need one more rec if we split in the middle and the new rec have
1880	 * some refcount in it.
1881	 */
1882	if (split_rec->r_refcount &&
1883	    (split_rec->r_cpos != orig_rec->r_cpos &&
1884	     le64_to_cpu(split_rec->r_cpos) +
1885	     le32_to_cpu(split_rec->r_clusters) !=
1886	     le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1887		recs_need++;
1888
1889	/* If the leaf block don't have enough record, expand it. */
1890	if (le16_to_cpu(rf_list->rl_used) + recs_need >
1891					 le16_to_cpu(rf_list->rl_count)) {
1892		struct ocfs2_refcount_rec tmp_rec;
1893		u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1894		len = le32_to_cpu(orig_rec->r_clusters);
1895		ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1896						 ref_leaf_bh, meta_ac);
1897		if (ret) {
1898			mlog_errno(ret);
1899			goto out;
1900		}
1901
1902		/*
1903		 * We have to re-get it since now cpos may be moved to
1904		 * another leaf block.
1905		 */
1906		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1907					     cpos, len, &tmp_rec, &index,
1908					     &new_bh);
1909		if (ret) {
1910			mlog_errno(ret);
1911			goto out;
1912		}
1913
1914		ref_leaf_bh = new_bh;
1915		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1916		rf_list = &rb->rf_records;
1917		orig_rec = &rf_list->rl_recs[index];
1918	}
1919
1920	ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1921				      OCFS2_JOURNAL_ACCESS_WRITE);
1922	if (ret) {
1923		mlog_errno(ret);
1924		goto out;
1925	}
1926
1927	/*
1928	 * We have calculated out how many new records we need and store
1929	 * in recs_need, so spare enough space first by moving the records
1930	 * after "index" to the end.
1931	 */
1932	if (index != le16_to_cpu(rf_list->rl_used) - 1)
1933		memmove(&rf_list->rl_recs[index + 1 + recs_need],
1934			&rf_list->rl_recs[index + 1],
1935			(le16_to_cpu(rf_list->rl_used) - index - 1) *
1936			 sizeof(struct ocfs2_refcount_rec));
1937
1938	len = (le64_to_cpu(orig_rec->r_cpos) +
1939	      le32_to_cpu(orig_rec->r_clusters)) -
1940	      (le64_to_cpu(split_rec->r_cpos) +
1941	      le32_to_cpu(split_rec->r_clusters));
1942
1943	/*
1944	 * If we have "len", the we will split in the tail and move it
1945	 * to the end of the space we have just spared.
1946	 */
1947	if (len) {
1948		tail_rec = &rf_list->rl_recs[index + recs_need];
1949
1950		memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
1951		le64_add_cpu(&tail_rec->r_cpos,
1952			     le32_to_cpu(tail_rec->r_clusters) - len);
1953		tail_rec->r_clusters = cpu_to_le32(len);
1954	}
1955
1956	/*
1957	 * If the split pos isn't the same as the original one, we need to
1958	 * split in the head.
1959	 *
1960	 * Note: We have the chance that split_rec.r_refcount = 0,
1961	 * recs_need = 0 and len > 0, which means we just cut the head from
1962	 * the orig_rec and in that case we have done some modification in
1963	 * orig_rec above, so the check for r_cpos is faked.
1964	 */
1965	if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
1966		len = le64_to_cpu(split_rec->r_cpos) -
1967		      le64_to_cpu(orig_rec->r_cpos);
1968		orig_rec->r_clusters = cpu_to_le32(len);
1969		index++;
1970	}
1971
1972	le16_add_cpu(&rf_list->rl_used, recs_need);
1973
1974	if (split_rec->r_refcount) {
1975		rf_list->rl_recs[index] = *split_rec;
1976		trace_ocfs2_split_refcount_rec_insert(
1977			(unsigned long long)ref_leaf_bh->b_blocknr, index,
1978			(unsigned long long)le64_to_cpu(split_rec->r_cpos),
1979			le32_to_cpu(split_rec->r_clusters),
1980			le32_to_cpu(split_rec->r_refcount));
1981
1982		if (merge)
1983			ocfs2_refcount_rec_merge(rb, index);
1984	}
1985
1986	ocfs2_journal_dirty(handle, ref_leaf_bh);
1987
1988out:
1989	brelse(new_bh);
1990	return ret;
1991}
1992
1993static int __ocfs2_increase_refcount(handle_t *handle,
1994				     struct ocfs2_caching_info *ci,
1995				     struct buffer_head *ref_root_bh,
1996				     u64 cpos, u32 len, int merge,
1997				     struct ocfs2_alloc_context *meta_ac,
1998				     struct ocfs2_cached_dealloc_ctxt *dealloc)
1999{
2000	int ret = 0, index;
2001	struct buffer_head *ref_leaf_bh = NULL;
2002	struct ocfs2_refcount_rec rec;
2003	unsigned int set_len = 0;
2004
2005	trace_ocfs2_increase_refcount_begin(
2006	     (unsigned long long)ocfs2_metadata_cache_owner(ci),
2007	     (unsigned long long)cpos, len);
2008
2009	while (len) {
2010		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2011					     cpos, len, &rec, &index,
2012					     &ref_leaf_bh);
2013		if (ret) {
2014			mlog_errno(ret);
2015			goto out;
2016		}
2017
2018		set_len = le32_to_cpu(rec.r_clusters);
2019
2020		/*
2021		 * Here we may meet with 3 situations:
2022		 *
2023		 * 1. If we find an already existing record, and the length
2024		 *    is the same, cool, we just need to increase the r_refcount
2025		 *    and it is OK.
2026		 * 2. If we find a hole, just insert it with r_refcount = 1.
2027		 * 3. If we are in the middle of one extent record, split
2028		 *    it.
2029		 */
2030		if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
2031		    set_len <= len) {
2032			trace_ocfs2_increase_refcount_change(
2033				(unsigned long long)cpos, set_len,
2034				le32_to_cpu(rec.r_refcount));
2035			ret = ocfs2_change_refcount_rec(handle, ci,
2036							ref_leaf_bh, index,
2037							merge, 1);
2038			if (ret) {
2039				mlog_errno(ret);
2040				goto out;
2041			}
2042		} else if (!rec.r_refcount) {
2043			rec.r_refcount = cpu_to_le32(1);
2044
2045			trace_ocfs2_increase_refcount_insert(
2046			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2047			     set_len);
2048			ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
2049							ref_leaf_bh,
2050							&rec, index,
2051							merge, meta_ac);
2052			if (ret) {
2053				mlog_errno(ret);
2054				goto out;
2055			}
2056		} else  {
2057			set_len = min((u64)(cpos + len),
2058				      le64_to_cpu(rec.r_cpos) + set_len) - cpos;
2059			rec.r_cpos = cpu_to_le64(cpos);
2060			rec.r_clusters = cpu_to_le32(set_len);
2061			le32_add_cpu(&rec.r_refcount, 1);
2062
2063			trace_ocfs2_increase_refcount_split(
2064			     (unsigned long long)le64_to_cpu(rec.r_cpos),
2065			     set_len, le32_to_cpu(rec.r_refcount));
2066			ret = ocfs2_split_refcount_rec(handle, ci,
2067						       ref_root_bh, ref_leaf_bh,
2068						       &rec, index, merge,
2069						       meta_ac, dealloc);
2070			if (ret) {
2071				mlog_errno(ret);
2072				goto out;
2073			}
2074		}
2075
2076		cpos += set_len;
2077		len -= set_len;
2078		brelse(ref_leaf_bh);
2079		ref_leaf_bh = NULL;
2080	}
2081
2082out:
2083	brelse(ref_leaf_bh);
2084	return ret;
2085}
2086
2087static int ocfs2_remove_refcount_extent(handle_t *handle,
2088				struct ocfs2_caching_info *ci,
2089				struct buffer_head *ref_root_bh,
2090				struct buffer_head *ref_leaf_bh,
2091				struct ocfs2_alloc_context *meta_ac,
2092				struct ocfs2_cached_dealloc_ctxt *dealloc)
2093{
2094	int ret;
2095	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2096	struct ocfs2_refcount_block *rb =
2097			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2098	struct ocfs2_extent_tree et;
2099
2100	BUG_ON(rb->rf_records.rl_used);
2101
2102	trace_ocfs2_remove_refcount_extent(
2103		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2104		(unsigned long long)ref_leaf_bh->b_blocknr,
2105		le32_to_cpu(rb->rf_cpos));
2106
2107	ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2108	ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
2109				  1, meta_ac, dealloc);
2110	if (ret) {
2111		mlog_errno(ret);
2112		goto out;
2113	}
2114
2115	ocfs2_remove_from_cache(ci, ref_leaf_bh);
2116
2117	/*
2118	 * add the freed block to the dealloc so that it will be freed
2119	 * when we run dealloc.
2120	 */
2121	ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
2122					le16_to_cpu(rb->rf_suballoc_slot),
2123					le64_to_cpu(rb->rf_suballoc_loc),
2124					le64_to_cpu(rb->rf_blkno),
2125					le16_to_cpu(rb->rf_suballoc_bit));
2126	if (ret) {
2127		mlog_errno(ret);
2128		goto out;
2129	}
2130
2131	ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
2132				      OCFS2_JOURNAL_ACCESS_WRITE);
2133	if (ret) {
2134		mlog_errno(ret);
2135		goto out;
2136	}
2137
2138	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2139
2140	le32_add_cpu(&rb->rf_clusters, -1);
2141
2142	/*
2143	 * check whether we need to restore the root refcount block if
2144	 * there is no leaf extent block at atll.
2145	 */
2146	if (!rb->rf_list.l_next_free_rec) {
2147		BUG_ON(rb->rf_clusters);
2148
2149		trace_ocfs2_restore_refcount_block(
2150		     (unsigned long long)ref_root_bh->b_blocknr);
2151
2152		rb->rf_flags = 0;
2153		rb->rf_parent = 0;
2154		rb->rf_cpos = 0;
2155		memset(&rb->rf_records, 0, sb->s_blocksize -
2156		       offsetof(struct ocfs2_refcount_block, rf_records));
2157		rb->rf_records.rl_count =
2158				cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
2159	}
2160
2161	ocfs2_journal_dirty(handle, ref_root_bh);
2162
2163out:
2164	return ret;
2165}
2166
2167int ocfs2_increase_refcount(handle_t *handle,
2168			    struct ocfs2_caching_info *ci,
2169			    struct buffer_head *ref_root_bh,
2170			    u64 cpos, u32 len,
2171			    struct ocfs2_alloc_context *meta_ac,
2172			    struct ocfs2_cached_dealloc_ctxt *dealloc)
2173{
2174	return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
2175					 cpos, len, 1,
2176					 meta_ac, dealloc);
2177}
2178
2179static int ocfs2_decrease_refcount_rec(handle_t *handle,
2180				struct ocfs2_caching_info *ci,
2181				struct buffer_head *ref_root_bh,
2182				struct buffer_head *ref_leaf_bh,
2183				int index, u64 cpos, unsigned int len,
2184				struct ocfs2_alloc_context *meta_ac,
2185				struct ocfs2_cached_dealloc_ctxt *dealloc)
2186{
2187	int ret;
2188	struct ocfs2_refcount_block *rb =
2189			(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2190	struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
2191
2192	BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2193	BUG_ON(cpos + len >
2194	       le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2195
2196	trace_ocfs2_decrease_refcount_rec(
2197		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2198		(unsigned long long)cpos, len);
2199
2200	if (cpos == le64_to_cpu(rec->r_cpos) &&
2201	    len == le32_to_cpu(rec->r_clusters))
2202		ret = ocfs2_change_refcount_rec(handle, ci,
2203						ref_leaf_bh, index, 1, -1);
2204	else {
2205		struct ocfs2_refcount_rec split = *rec;
2206		split.r_cpos = cpu_to_le64(cpos);
2207		split.r_clusters = cpu_to_le32(len);
2208
2209		le32_add_cpu(&split.r_refcount, -1);
2210
2211		ret = ocfs2_split_refcount_rec(handle, ci,
2212					       ref_root_bh, ref_leaf_bh,
2213					       &split, index, 1,
2214					       meta_ac, dealloc);
2215	}
2216
2217	if (ret) {
2218		mlog_errno(ret);
2219		goto out;
2220	}
2221
2222	/* Remove the leaf refcount block if it contains no refcount record. */
2223	if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
2224		ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
2225						   ref_leaf_bh, meta_ac,
2226						   dealloc);
2227		if (ret)
2228			mlog_errno(ret);
2229	}
2230
2231out:
2232	return ret;
2233}
2234
2235static int __ocfs2_decrease_refcount(handle_t *handle,
2236				     struct ocfs2_caching_info *ci,
2237				     struct buffer_head *ref_root_bh,
2238				     u64 cpos, u32 len,
2239				     struct ocfs2_alloc_context *meta_ac,
2240				     struct ocfs2_cached_dealloc_ctxt *dealloc,
2241				     int delete)
2242{
2243	int ret = 0, index = 0;
2244	struct ocfs2_refcount_rec rec;
2245	unsigned int r_count = 0, r_len;
2246	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2247	struct buffer_head *ref_leaf_bh = NULL;
2248
2249	trace_ocfs2_decrease_refcount(
2250		(unsigned long long)ocfs2_metadata_cache_owner(ci),
2251		(unsigned long long)cpos, len, delete);
2252
2253	while (len) {
2254		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2255					     cpos, len, &rec, &index,
2256					     &ref_leaf_bh);
2257		if (ret) {
2258			mlog_errno(ret);
2259			goto out;
2260		}
2261
2262		r_count = le32_to_cpu(rec.r_refcount);
2263		BUG_ON(r_count == 0);
2264		if (!delete)
2265			BUG_ON(r_count > 1);
2266
2267		r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2268			      le32_to_cpu(rec.r_clusters)) - cpos;
2269
2270		ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
2271						  ref_leaf_bh, index,
2272						  cpos, r_len,
2273						  meta_ac, dealloc);
2274		if (ret) {
2275			mlog_errno(ret);
2276			goto out;
2277		}
2278
2279		if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
2280			ret = ocfs2_cache_cluster_dealloc(dealloc,
2281					  ocfs2_clusters_to_blocks(sb, cpos),
2282							  r_len);
2283			if (ret) {
2284				mlog_errno(ret);
2285				goto out;
2286			}
2287		}
2288
2289		cpos += r_len;
2290		len -= r_len;
2291		brelse(ref_leaf_bh);
2292		ref_leaf_bh = NULL;
2293	}
2294
2295out:
2296	brelse(ref_leaf_bh);
2297	return ret;
2298}
2299
2300/* Caller must hold refcount tree lock. */
2301int ocfs2_decrease_refcount(struct inode *inode,
2302			    handle_t *handle, u32 cpos, u32 len,
2303			    struct ocfs2_alloc_context *meta_ac,
2304			    struct ocfs2_cached_dealloc_ctxt *dealloc,
2305			    int delete)
2306{
2307	int ret;
2308	u64 ref_blkno;
2309	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2310	struct buffer_head *ref_root_bh = NULL;
2311	struct ocfs2_refcount_tree *tree;
2312
2313	BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
2314
2315	ret = ocfs2_get_refcount_block(inode, &ref_blkno);
2316	if (ret) {
2317		mlog_errno(ret);
2318		goto out;
2319	}
2320
2321	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
2322	if (ret) {
2323		mlog_errno(ret);
2324		goto out;
2325	}
2326
2327	ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
2328					&ref_root_bh);
2329	if (ret) {
2330		mlog_errno(ret);
2331		goto out;
2332	}
2333
2334	ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
2335					cpos, len, meta_ac, dealloc, delete);
2336	if (ret)
2337		mlog_errno(ret);
2338out:
2339	brelse(ref_root_bh);
2340	return ret;
2341}
2342
2343/*
2344 * Mark the already-existing extent at cpos as refcounted for len clusters.
2345 * This adds the refcount extent flag.
2346 *
2347 * If the existing extent is larger than the request, initiate a
2348 * split. An attempt will be made at merging with adjacent extents.
2349 *
2350 * The caller is responsible for passing down meta_ac if we'll need it.
2351 */
2352static int ocfs2_mark_extent_refcounted(struct inode *inode,
2353				struct ocfs2_extent_tree *et,
2354				handle_t *handle, u32 cpos,
2355				u32 len, u32 phys,
2356				struct ocfs2_alloc_context *meta_ac,
2357				struct ocfs2_cached_dealloc_ctxt *dealloc)
2358{
2359	int ret;
2360
2361	trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
2362					   cpos, len, phys);
2363
2364	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2365		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
2366			    "tree, but the feature bit is not set in the "
2367			    "super block.", inode->i_ino);
2368		ret = -EROFS;
2369		goto out;
2370	}
2371
2372	ret = ocfs2_change_extent_flag(handle, et, cpos,
2373				       len, phys, meta_ac, dealloc,
2374				       OCFS2_EXT_REFCOUNTED, 0);
2375	if (ret)
2376		mlog_errno(ret);
2377
2378out:
2379	return ret;
2380}
2381
2382/*
2383 * Given some contiguous physical clusters, calculate what we need
2384 * for modifying their refcount.
2385 */
2386static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2387					    struct ocfs2_caching_info *ci,
2388					    struct buffer_head *ref_root_bh,
2389					    u64 start_cpos,
2390					    u32 clusters,
2391					    int *meta_add,
2392					    int *credits)
2393{
2394	int ret = 0, index, ref_blocks = 0, recs_add = 0;
2395	u64 cpos = start_cpos;
2396	struct ocfs2_refcount_block *rb;
2397	struct ocfs2_refcount_rec rec;
2398	struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2399	u32 len;
2400
2401	while (clusters) {
2402		ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2403					     cpos, clusters, &rec,
2404					     &index, &ref_leaf_bh);
2405		if (ret) {
2406			mlog_errno(ret);
2407			goto out;
2408		}
2409
2410		if (ref_leaf_bh != prev_bh) {
2411			/*
2412			 * Now we encounter a new leaf block, so calculate
2413			 * whether we need to extend the old leaf.
2414			 */
2415			if (prev_bh) {
2416				rb = (struct ocfs2_refcount_block *)
2417							prev_bh->b_data;
2418
2419				if (le16_to_cpu(rb->rf_records.rl_used) +
2420				    recs_add >
2421				    le16_to_cpu(rb->rf_records.rl_count))
2422					ref_blocks++;
2423			}
2424
2425			recs_add = 0;
2426			*credits += 1;
2427			brelse(prev_bh);
2428			prev_bh = ref_leaf_bh;
2429			get_bh(prev_bh);
2430		}
2431
2432		rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2433
2434		trace_ocfs2_calc_refcount_meta_credits_iterate(
2435				recs_add, (unsigned long long)cpos, clusters,
2436				(unsigned long long)le64_to_cpu(rec.r_cpos),
2437				le32_to_cpu(rec.r_clusters),
2438				le32_to_cpu(rec.r_refcount), index);
2439
2440		len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2441			  le32_to_cpu(rec.r_clusters)) - cpos;
2442		/*
2443		 * We record all the records which will be inserted to the
2444		 * same refcount block, so that we can tell exactly whether
2445		 * we need a new refcount block or not.
2446		 *
2447		 * If we will insert a new one, this is easy and only happens
2448		 * during adding refcounted flag to the extent, so we don't
2449		 * have a chance of spliting. We just need one record.
2450		 *
2451		 * If the refcount rec already exists, that would be a little
2452		 * complicated. we may have to:
2453		 * 1) split at the beginning if the start pos isn't aligned.
2454		 *    we need 1 more record in this case.
2455		 * 2) split int the end if the end pos isn't aligned.
2456		 *    we need 1 more record in this case.
2457		 * 3) split in the middle because of file system fragmentation.
2458		 *    we need 2 more records in this case(we can't detect this
2459		 *    beforehand, so always think of the worst case).
2460		 */
2461		if (rec.r_refcount) {
2462			recs_add += 2;
2463			/* Check whether we need a split at the beginning. */
2464			if (cpos == start_cpos &&
2465			    cpos != le64_to_cpu(rec.r_cpos))
2466				recs_add++;
2467
2468			/* Check whether we need a split in the end. */
2469			if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2470			    le32_to_cpu(rec.r_clusters))
2471				recs_add++;
2472		} else
2473			recs_add++;
2474
2475		brelse(ref_leaf_bh);
2476		ref_leaf_bh = NULL;
2477		clusters -= len;
2478		cpos += len;
2479	}
2480
2481	if (prev_bh) {
2482		rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
2483
2484		if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
2485		    le16_to_cpu(rb->rf_records.rl_count))
2486			ref_blocks++;
2487
2488		*credits += 1;
2489	}
2490
2491	if (!ref_blocks)
2492		goto out;
2493
2494	*meta_add += ref_blocks;
2495	*credits += ref_blocks;
2496
2497	/*
2498	 * So we may need ref_blocks to insert into the tree.
2499	 * That also means we need to change the b-tree and add that number
2500	 * of records since we never merge them.
2501	 * We need one more block for expansion since the new created leaf
2502	 * block is also full and needs split.
2503	 */
2504	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2505	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
2506		struct ocfs2_extent_tree et;
2507
2508		ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2509		*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2510		*credits += ocfs2_calc_extend_credits(sb,
2511						      et.et_root_el);
2512	} else {
2513		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2514		*meta_add += 1;
2515	}
2516
2517out:
2518
2519	trace_ocfs2_calc_refcount_meta_credits(
2520		(unsigned long long)start_cpos, clusters,
2521		*meta_add, *credits);
2522	brelse(ref_leaf_bh);
2523	brelse(prev_bh);
2524	return ret;
2525}
2526
2527/*
2528 * For refcount tree, we will decrease some contiguous clusters
2529 * refcount count, so just go through it to see how many blocks
2530 * we gonna touch and whether we need to create new blocks.
2531 *
2532 * Normally the refcount blocks store these refcount should be
2533 * contiguous also, so that we can get the number easily.
2534 * We will at most add split 2 refcount records and 2 more
2535 * refcount blocks, so just check it in a rough way.
2536 *
2537 * Caller must hold refcount tree lock.
2538 */
2539int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2540					  u64 refcount_loc,
2541					  u64 phys_blkno,
2542					  u32 clusters,
2543					  int *credits,
2544					  int *ref_blocks)
2545{
2546	int ret;
2547	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2548	struct buffer_head *ref_root_bh = NULL;
2549	struct ocfs2_refcount_tree *tree;
2550	u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
2551
2552	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2553		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
2554			    "tree, but the feature bit is not set in the "
2555			    "super block.", inode->i_ino);
2556		ret = -EROFS;
2557		goto out;
2558	}
2559
2560	BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
2561
2562	ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
2563				      refcount_loc, &tree);
2564	if (ret) {
2565		mlog_errno(ret);
2566		goto out;
2567	}
2568
2569	ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
2570					&ref_root_bh);
2571	if (ret) {
2572		mlog_errno(ret);
2573		goto out;
2574	}
2575
2576	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
2577					       &tree->rf_ci,
2578					       ref_root_bh,
2579					       start_cpos, clusters,
2580					       ref_blocks, credits);
2581	if (ret) {
2582		mlog_errno(ret);
2583		goto out;
2584	}
2585
2586	trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
2587
2588out:
2589	brelse(ref_root_bh);
2590	return ret;
2591}
2592
2593#define	MAX_CONTIG_BYTES	1048576
2594
2595static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
2596{
2597	return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
2598}
2599
2600static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
2601{
2602	return ~(ocfs2_cow_contig_clusters(sb) - 1);
2603}
2604
2605/*
2606 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2607 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2608 * while still being less than or equal to it.
2609 *
2610 * The goal is to break the extent at a multiple of contig_clusters.
2611 */
2612static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
2613						 unsigned int start,
2614						 unsigned int cpos)
2615{
2616	BUG_ON(start > cpos);
2617
2618	return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2619}
2620
2621/*
2622 * Given a cluster count of len, pad it out so that it is a multiple
2623 * of contig_clusters.
2624 */
2625static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
2626						  unsigned int len)
2627{
2628	unsigned int padded =
2629		(len + (ocfs2_cow_contig_clusters(sb) - 1)) &
2630		ocfs2_cow_contig_mask(sb);
2631
2632	/* Did we wrap? */
2633	if (padded < len)
2634		padded = UINT_MAX;
2635
2636	return padded;
2637}
2638
2639/*
2640 * Calculate out the start and number of virtual clusters we need to to CoW.
2641 *
2642 * cpos is vitual start cluster position we want to do CoW in a
2643 * file and write_len is the cluster length.
2644 * max_cpos is the place where we want to stop CoW intentionally.
2645 *
2646 * Normal we will start CoW from the beginning of extent record cotaining cpos.
2647 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
2648 * get good I/O from the resulting extent tree.
2649 */
2650static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
2651					   struct ocfs2_extent_list *el,
2652					   u32 cpos,
2653					   u32 write_len,
2654					   u32 max_cpos,
2655					   u32 *cow_start,
2656					   u32 *cow_len)
2657{
2658	int ret = 0;
2659	int tree_height = le16_to_cpu(el->l_tree_depth), i;
2660	struct buffer_head *eb_bh = NULL;
2661	struct ocfs2_extent_block *eb = NULL;
2662	struct ocfs2_extent_rec *rec;
2663	unsigned int want_clusters, rec_end = 0;
2664	int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
2665	int leaf_clusters;
2666
2667	BUG_ON(cpos + write_len > max_cpos);
2668
2669	if (tree_height > 0) {
2670		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2671		if (ret) {
2672			mlog_errno(ret);
2673			goto out;
2674		}
2675
2676		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2677		el = &eb->h_list;
2678
2679		if (el->l_tree_depth) {
2680			ocfs2_error(inode->i_sb,
2681				    "Inode %lu has non zero tree depth in "
2682				    "leaf block %llu\n", inode->i_ino,
2683				    (unsigned long long)eb_bh->b_blocknr);
2684			ret = -EROFS;
2685			goto out;
2686		}
2687	}
2688
2689	*cow_len = 0;
2690	for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2691		rec = &el->l_recs[i];
2692
2693		if (ocfs2_is_empty_extent(rec)) {
2694			mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
2695					"index %d\n", inode->i_ino, i);
2696			continue;
2697		}
2698
2699		if (le32_to_cpu(rec->e_cpos) +
2700		    le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2701			continue;
2702
2703		if (*cow_len == 0) {
2704			/*
2705			 * We should find a refcounted record in the
2706			 * first pass.
2707			 */
2708			BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
2709			*cow_start = le32_to_cpu(rec->e_cpos);
2710		}
2711
2712		/*
2713		 * If we encounter a hole, a non-refcounted record or
2714		 * pass the max_cpos, stop the search.
2715		 */
2716		if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
2717		    (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
2718		    (max_cpos <= le32_to_cpu(rec->e_cpos)))
2719			break;
2720
2721		leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
2722		rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
2723		if (rec_end > max_cpos) {
2724			rec_end = max_cpos;
2725			leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
2726		}
2727
2728		/*
2729		 * How many clusters do we actually need from
2730		 * this extent?  First we see how many we actually
2731		 * need to complete the write.  If that's smaller
2732		 * than contig_clusters, we try for contig_clusters.
2733		 */
2734		if (!*cow_len)
2735			want_clusters = write_len;
2736		else
2737			want_clusters = (cpos + write_len) -
2738				(*cow_start + *cow_len);
2739		if (want_clusters < contig_clusters)
2740			want_clusters = contig_clusters;
2741
2742		/*
2743		 * If the write does not cover the whole extent, we
2744		 * need to calculate how we're going to split the extent.
2745		 * We try to do it on contig_clusters boundaries.
2746		 *
2747		 * Any extent smaller than contig_clusters will be
2748		 * CoWed in its entirety.
2749		 */
2750		if (leaf_clusters <= contig_clusters)
2751			*cow_len += leaf_clusters;
2752		else if (*cow_len || (*cow_start == cpos)) {
2753			/*
2754			 * This extent needs to be CoW'd from its
2755			 * beginning, so all we have to do is compute
2756			 * how many clusters to grab.  We align
2757			 * want_clusters to the edge of contig_clusters
2758			 * to get better I/O.
2759			 */
2760			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2761							       want_clusters);
2762
2763			if (leaf_clusters < want_clusters)
2764				*cow_len += leaf_clusters;
2765			else
2766				*cow_len += want_clusters;
2767		} else if ((*cow_start + contig_clusters) >=
2768			   (cpos + write_len)) {
2769			/*
2770			 * Breaking off contig_clusters at the front
2771			 * of the extent will cover our write.  That's
2772			 * easy.
2773			 */
2774			*cow_len = contig_clusters;
2775		} else if ((rec_end - cpos) <= contig_clusters) {
2776			/*
2777			 * Breaking off contig_clusters at the tail of
2778			 * this extent will cover cpos.
2779			 */
2780			*cow_start = rec_end - contig_clusters;
2781			*cow_len = contig_clusters;
2782		} else if ((rec_end - cpos) <= want_clusters) {
2783			/*
2784			 * While we can't fit the entire write in this
2785			 * extent, we know that the write goes from cpos
2786			 * to the end of the extent.  Break that off.
2787			 * We try to break it at some multiple of
2788			 * contig_clusters from the front of the extent.
2789			 * Failing that (ie, cpos is within
2790			 * contig_clusters of the front), we'll CoW the
2791			 * entire extent.
2792			 */
2793			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2794							   *cow_start, cpos);
2795			*cow_len = rec_end - *cow_start;
2796		} else {
2797			/*
2798			 * Ok, the entire write lives in the middle of
2799			 * this extent.  Let's try to slice the extent up
2800			 * nicely.  Optimally, our CoW region starts at
2801			 * m*contig_clusters from the beginning of the
2802			 * extent and goes for n*contig_clusters,
2803			 * covering the entire write.
2804			 */
2805			*cow_start = ocfs2_cow_align_start(inode->i_sb,
2806							   *cow_start, cpos);
2807
2808			want_clusters = (cpos + write_len) - *cow_start;
2809			want_clusters = ocfs2_cow_align_length(inode->i_sb,
2810							       want_clusters);
2811			if (*cow_start + want_clusters <= rec_end)
2812				*cow_len = want_clusters;
2813			else
2814				*cow_len = rec_end - *cow_start;
2815		}
2816
2817		/* Have we covered our entire write yet? */
2818		if ((*cow_start + *cow_len) >= (cpos + write_len))
2819			break;
2820
2821		/*
2822		 * If we reach the end of the extent block and don't get enough
2823		 * clusters, continue with the next extent block if possible.
2824		 */
2825		if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
2826		    eb && eb->h_next_leaf_blk) {
2827			brelse(eb_bh);
2828			eb_bh = NULL;
2829
2830			ret = ocfs2_read_extent_block(INODE_CACHE(inode),
2831					       le64_to_cpu(eb->h_next_leaf_blk),
2832					       &eb_bh);
2833			if (ret) {
2834				mlog_errno(ret);
2835				goto out;
2836			}
2837
2838			eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2839			el = &eb->h_list;
2840			i = -1;
2841		}
2842	}
2843
2844out:
2845	brelse(eb_bh);
2846	return ret;
2847}
2848
2849/*
2850 * Prepare meta_ac, data_ac and calculate credits when we want to add some
2851 * num_clusters in data_tree "et" and change the refcount for the old
2852 * clusters(starting form p_cluster) in the refcount tree.
2853 *
2854 * Note:
2855 * 1. since we may split the old tree, so we at most will need num_clusters + 2
2856 *    more new leaf records.
2857 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
2858 *    just give data_ac = NULL.
2859 */
2860static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2861					u32 p_cluster, u32 num_clusters,
2862					struct ocfs2_extent_tree *et,
2863					struct ocfs2_caching_info *ref_ci,
2864					struct buffer_head *ref_root_bh,
2865					struct ocfs2_alloc_context **meta_ac,
2866					struct ocfs2_alloc_context **data_ac,
2867					int *credits)
2868{
2869	int ret = 0, meta_add = 0;
2870	int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
2871
2872	if (num_free_extents < 0) {
2873		ret = num_free_extents;
2874		mlog_errno(ret);
2875		goto out;
2876	}
2877
2878	if (num_free_extents < num_clusters + 2)
2879		meta_add =
2880			ocfs2_extend_meta_needed(et->et_root_el);
2881
2882	*credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
2883
2884	ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2885					       p_cluster, num_clusters,
2886					       &meta_add, credits);
2887	if (ret) {
2888		mlog_errno(ret);
2889		goto out;
2890	}
2891
2892	trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
2893	ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2894						meta_ac);
2895	if (ret) {
2896		mlog_errno(ret);
2897		goto out;
2898	}
2899
2900	if (data_ac) {
2901		ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
2902					     data_ac);
2903		if (ret)
2904			mlog_errno(ret);
2905	}
2906
2907out:
2908	if (ret) {
2909		if (*meta_ac) {
2910			ocfs2_free_alloc_context(*meta_ac);
2911			*meta_ac = NULL;
2912		}
2913	}
2914
2915	return ret;
2916}
2917
2918static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2919{
2920	BUG_ON(buffer_dirty(bh));
2921
2922	clear_buffer_mapped(bh);
2923
2924	return 0;
2925}
2926
2927int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2928				     struct inode *inode,
2929				     u32 cpos, u32 old_cluster,
2930				     u32 new_cluster, u32 new_len)
2931{
2932	int ret = 0, partial;
2933	struct super_block *sb = inode->i_sb;
2934	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2935	struct page *page;
2936	pgoff_t page_index;
2937	unsigned int from, to, readahead_pages;
2938	loff_t offset, end, map_end;
2939	struct address_space *mapping = inode->i_mapping;
2940
2941	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2942					       new_cluster, new_len);
2943
2944	readahead_pages =
2945		(ocfs2_cow_contig_clusters(sb) <<
2946		 OCFS2_SB(sb)->s_clustersize_bits) >> PAGE_CACHE_SHIFT;
2947	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2948	end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
2949	/*
2950	 * We only duplicate pages until we reach the page contains i_size - 1.
2951	 * So trim 'end' to i_size.
2952	 */
2953	if (end > i_size_read(inode))
2954		end = i_size_read(inode);
2955
2956	while (offset < end) {
2957		page_index = offset >> PAGE_CACHE_SHIFT;
2958		map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
2959		if (map_end > end)
2960			map_end = end;
2961
2962		/* from, to is the offset within the page. */
2963		from = offset & (PAGE_CACHE_SIZE - 1);
2964		to = PAGE_CACHE_SIZE;
2965		if (map_end & (PAGE_CACHE_SIZE - 1))
2966			to = map_end & (PAGE_CACHE_SIZE - 1);
2967
 
2968		page = find_or_create_page(mapping, page_index, GFP_NOFS);
2969		if (!page) {
2970			ret = -ENOMEM;
2971			mlog_errno(ret);
2972			break;
2973		}
2974
2975		/*
2976		 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
2977		 * can't be dirtied before we CoW it out.
2978		 */
2979		if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2980			BUG_ON(PageDirty(page));
 
 
 
 
 
 
 
2981
2982		if (!PageUptodate(page)) {
2983			ret = block_read_full_page(page, ocfs2_get_block);
2984			if (ret) {
2985				mlog_errno(ret);
2986				goto unlock;
2987			}
2988			lock_page(page);
2989		}
2990
2991		if (page_has_buffers(page)) {
2992			ret = walk_page_buffers(handle, page_buffers(page),
2993						from, to, &partial,
2994						ocfs2_clear_cow_buffer);
2995			if (ret) {
2996				mlog_errno(ret);
2997				goto unlock;
2998			}
2999		}
3000
3001		ocfs2_map_and_dirty_page(inode,
3002					 handle, from, to,
3003					 page, 0, &new_block);
3004		mark_page_accessed(page);
3005unlock:
3006		unlock_page(page);
3007		page_cache_release(page);
3008		page = NULL;
3009		offset = map_end;
3010		if (ret)
3011			break;
3012	}
3013
3014	return ret;
3015}
3016
3017int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3018				    struct inode *inode,
3019				    u32 cpos, u32 old_cluster,
3020				    u32 new_cluster, u32 new_len)
3021{
3022	int ret = 0;
3023	struct super_block *sb = inode->i_sb;
3024	struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3025	int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
3026	u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
3027	u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
3028	struct ocfs2_super *osb = OCFS2_SB(sb);
3029	struct buffer_head *old_bh = NULL;
3030	struct buffer_head *new_bh = NULL;
3031
3032	trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
3033					       new_cluster, new_len);
3034
3035	for (i = 0; i < blocks; i++, old_block++, new_block++) {
3036		new_bh = sb_getblk(osb->sb, new_block);
3037		if (new_bh == NULL) {
3038			ret = -ENOMEM;
3039			mlog_errno(ret);
3040			break;
3041		}
3042
3043		ocfs2_set_new_buffer_uptodate(ci, new_bh);
3044
3045		ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
3046		if (ret) {
3047			mlog_errno(ret);
3048			break;
3049		}
3050
3051		ret = ocfs2_journal_access(handle, ci, new_bh,
3052					   OCFS2_JOURNAL_ACCESS_CREATE);
3053		if (ret) {
3054			mlog_errno(ret);
3055			break;
3056		}
3057
3058		memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
3059		ocfs2_journal_dirty(handle, new_bh);
3060
3061		brelse(new_bh);
3062		brelse(old_bh);
3063		new_bh = NULL;
3064		old_bh = NULL;
3065	}
3066
3067	brelse(new_bh);
3068	brelse(old_bh);
3069	return ret;
3070}
3071
3072static int ocfs2_clear_ext_refcount(handle_t *handle,
3073				    struct ocfs2_extent_tree *et,
3074				    u32 cpos, u32 p_cluster, u32 len,
3075				    unsigned int ext_flags,
3076				    struct ocfs2_alloc_context *meta_ac,
3077				    struct ocfs2_cached_dealloc_ctxt *dealloc)
3078{
3079	int ret, index;
3080	struct ocfs2_extent_rec replace_rec;
3081	struct ocfs2_path *path = NULL;
3082	struct ocfs2_extent_list *el;
3083	struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
3084	u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
3085
3086	trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
3087				       cpos, len, p_cluster, ext_flags);
3088
3089	memset(&replace_rec, 0, sizeof(replace_rec));
3090	replace_rec.e_cpos = cpu_to_le32(cpos);
3091	replace_rec.e_leaf_clusters = cpu_to_le16(len);
3092	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
3093								   p_cluster));
3094	replace_rec.e_flags = ext_flags;
3095	replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
3096
3097	path = ocfs2_new_path_from_et(et);
3098	if (!path) {
3099		ret = -ENOMEM;
3100		mlog_errno(ret);
3101		goto out;
3102	}
3103
3104	ret = ocfs2_find_path(et->et_ci, path, cpos);
3105	if (ret) {
3106		mlog_errno(ret);
3107		goto out;
3108	}
3109
3110	el = path_leaf_el(path);
3111
3112	index = ocfs2_search_extent_list(el, cpos);
3113	if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
3114		ocfs2_error(sb,
3115			    "Inode %llu has an extent at cpos %u which can no "
3116			    "longer be found.\n",
3117			    (unsigned long long)ino, cpos);
3118		ret = -EROFS;
3119		goto out;
3120	}
3121
3122	ret = ocfs2_split_extent(handle, et, path, index,
3123				 &replace_rec, meta_ac, dealloc);
3124	if (ret)
3125		mlog_errno(ret);
3126
3127out:
3128	ocfs2_free_path(path);
3129	return ret;
3130}
3131
3132static int ocfs2_replace_clusters(handle_t *handle,
3133				  struct ocfs2_cow_context *context,
3134				  u32 cpos, u32 old,
3135				  u32 new, u32 len,
3136				  unsigned int ext_flags)
3137{
3138	int ret;
3139	struct ocfs2_caching_info *ci = context->data_et.et_ci;
3140	u64 ino = ocfs2_metadata_cache_owner(ci);
3141
3142	trace_ocfs2_replace_clusters((unsigned long long)ino,
3143				     cpos, old, new, len, ext_flags);
3144
3145	/*If the old clusters is unwritten, no need to duplicate. */
3146	if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3147		ret = context->cow_duplicate_clusters(handle, context->inode,
3148						      cpos, old, new, len);
3149		if (ret) {
3150			mlog_errno(ret);
3151			goto out;
3152		}
3153	}
3154
3155	ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
3156				       cpos, new, len, ext_flags,
3157				       context->meta_ac, &context->dealloc);
3158	if (ret)
3159		mlog_errno(ret);
3160out:
3161	return ret;
3162}
3163
3164int ocfs2_cow_sync_writeback(struct super_block *sb,
3165			     struct inode *inode,
3166			     u32 cpos, u32 num_clusters)
3167{
3168	int ret = 0;
3169	loff_t offset, end, map_end;
3170	pgoff_t page_index;
3171	struct page *page;
3172
3173	if (ocfs2_should_order_data(inode))
3174		return 0;
3175
3176	offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
3177	end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
3178
3179	ret = filemap_fdatawrite_range(inode->i_mapping,
3180				       offset, end - 1);
3181	if (ret < 0) {
3182		mlog_errno(ret);
3183		return ret;
3184	}
3185
3186	while (offset < end) {
3187		page_index = offset >> PAGE_CACHE_SHIFT;
3188		map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
3189		if (map_end > end)
3190			map_end = end;
3191
3192		page = find_or_create_page(inode->i_mapping,
3193					   page_index, GFP_NOFS);
3194		BUG_ON(!page);
3195
3196		wait_on_page_writeback(page);
3197		if (PageError(page)) {
3198			ret = -EIO;
3199			mlog_errno(ret);
3200		} else
3201			mark_page_accessed(page);
3202
3203		unlock_page(page);
3204		page_cache_release(page);
3205		page = NULL;
3206		offset = map_end;
3207		if (ret)
3208			break;
3209	}
3210
3211	return ret;
3212}
3213
3214static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
3215				 u32 v_cluster, u32 *p_cluster,
3216				 u32 *num_clusters,
3217				 unsigned int *extent_flags)
3218{
3219	return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
3220				  num_clusters, extent_flags);
3221}
3222
3223static int ocfs2_make_clusters_writable(struct super_block *sb,
3224					struct ocfs2_cow_context *context,
3225					u32 cpos, u32 p_cluster,
3226					u32 num_clusters, unsigned int e_flags)
3227{
3228	int ret, delete, index, credits =  0;
3229	u32 new_bit, new_len, orig_num_clusters;
3230	unsigned int set_len;
3231	struct ocfs2_super *osb = OCFS2_SB(sb);
3232	handle_t *handle;
3233	struct buffer_head *ref_leaf_bh = NULL;
3234	struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
3235	struct ocfs2_refcount_rec rec;
3236
3237	trace_ocfs2_make_clusters_writable(cpos, p_cluster,
3238					   num_clusters, e_flags);
3239
3240	ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
3241					     &context->data_et,
3242					     ref_ci,
3243					     context->ref_root_bh,
3244					     &context->meta_ac,
3245					     &context->data_ac, &credits);
3246	if (ret) {
3247		mlog_errno(ret);
3248		return ret;
3249	}
3250
3251	if (context->post_refcount)
3252		credits += context->post_refcount->credits;
3253
3254	credits += context->extra_credits;
3255	handle = ocfs2_start_trans(osb, credits);
3256	if (IS_ERR(handle)) {
3257		ret = PTR_ERR(handle);
3258		mlog_errno(ret);
3259		goto out;
3260	}
3261
3262	orig_num_clusters = num_clusters;
3263
3264	while (num_clusters) {
3265		ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3266					     p_cluster, num_clusters,
3267					     &rec, &index, &ref_leaf_bh);
3268		if (ret) {
3269			mlog_errno(ret);
3270			goto out_commit;
3271		}
3272
3273		BUG_ON(!rec.r_refcount);
3274		set_len = min((u64)p_cluster + num_clusters,
3275			      le64_to_cpu(rec.r_cpos) +
3276			      le32_to_cpu(rec.r_clusters)) - p_cluster;
3277
3278		/*
3279		 * There are many different situation here.
3280		 * 1. If refcount == 1, remove the flag and don't COW.
3281		 * 2. If refcount > 1, allocate clusters.
3282		 *    Here we may not allocate r_len once at a time, so continue
3283		 *    until we reach num_clusters.
3284		 */
3285		if (le32_to_cpu(rec.r_refcount) == 1) {
3286			delete = 0;
3287			ret = ocfs2_clear_ext_refcount(handle,
3288						       &context->data_et,
3289						       cpos, p_cluster,
3290						       set_len, e_flags,
3291						       context->meta_ac,
3292						       &context->dealloc);
3293			if (ret) {
3294				mlog_errno(ret);
3295				goto out_commit;
3296			}
3297		} else {
3298			delete = 1;
3299
3300			ret = __ocfs2_claim_clusters(handle,
3301						     context->data_ac,
3302						     1, set_len,
3303						     &new_bit, &new_len);
3304			if (ret) {
3305				mlog_errno(ret);
3306				goto out_commit;
3307			}
3308
3309			ret = ocfs2_replace_clusters(handle, context,
3310						     cpos, p_cluster, new_bit,
3311						     new_len, e_flags);
3312			if (ret) {
3313				mlog_errno(ret);
3314				goto out_commit;
3315			}
3316			set_len = new_len;
3317		}
3318
3319		ret = __ocfs2_decrease_refcount(handle, ref_ci,
3320						context->ref_root_bh,
3321						p_cluster, set_len,
3322						context->meta_ac,
3323						&context->dealloc, delete);
3324		if (ret) {
3325			mlog_errno(ret);
3326			goto out_commit;
3327		}
3328
3329		cpos += set_len;
3330		p_cluster += set_len;
3331		num_clusters -= set_len;
3332		brelse(ref_leaf_bh);
3333		ref_leaf_bh = NULL;
3334	}
3335
3336	/* handle any post_cow action. */
3337	if (context->post_refcount && context->post_refcount->func) {
3338		ret = context->post_refcount->func(context->inode, handle,
3339						context->post_refcount->para);
3340		if (ret) {
3341			mlog_errno(ret);
3342			goto out_commit;
3343		}
3344	}
3345
3346	/*
3347	 * Here we should write the new page out first if we are
3348	 * in write-back mode.
3349	 */
3350	if (context->get_clusters == ocfs2_di_get_clusters) {
3351		ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
3352					       orig_num_clusters);
3353		if (ret)
3354			mlog_errno(ret);
3355	}
3356
3357out_commit:
3358	ocfs2_commit_trans(osb, handle);
3359
3360out:
3361	if (context->data_ac) {
3362		ocfs2_free_alloc_context(context->data_ac);
3363		context->data_ac = NULL;
3364	}
3365	if (context->meta_ac) {
3366		ocfs2_free_alloc_context(context->meta_ac);
3367		context->meta_ac = NULL;
3368	}
3369	brelse(ref_leaf_bh);
3370
3371	return ret;
3372}
3373
3374static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3375{
3376	int ret = 0;
3377	struct inode *inode = context->inode;
3378	u32 cow_start = context->cow_start, cow_len = context->cow_len;
3379	u32 p_cluster, num_clusters;
3380	unsigned int ext_flags;
3381	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3382
3383	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
3384		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
3385			    "tree, but the feature bit is not set in the "
3386			    "super block.", inode->i_ino);
3387		return -EROFS;
3388	}
3389
3390	ocfs2_init_dealloc_ctxt(&context->dealloc);
3391
3392	while (cow_len) {
3393		ret = context->get_clusters(context, cow_start, &p_cluster,
3394					    &num_clusters, &ext_flags);
3395		if (ret) {
3396			mlog_errno(ret);
3397			break;
3398		}
3399
3400		BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
3401
3402		if (cow_len < num_clusters)
3403			num_clusters = cow_len;
3404
3405		ret = ocfs2_make_clusters_writable(inode->i_sb, context,
3406						   cow_start, p_cluster,
3407						   num_clusters, ext_flags);
3408		if (ret) {
3409			mlog_errno(ret);
3410			break;
3411		}
3412
3413		cow_len -= num_clusters;
3414		cow_start += num_clusters;
3415	}
3416
3417	if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
3418		ocfs2_schedule_truncate_log_flush(osb, 1);
3419		ocfs2_run_deallocs(osb, &context->dealloc);
3420	}
3421
3422	return ret;
3423}
3424
3425/*
3426 * Starting at cpos, try to CoW write_len clusters.  Don't CoW
3427 * past max_cpos.  This will stop when it runs into a hole or an
3428 * unrefcounted extent.
3429 */
3430static int ocfs2_refcount_cow_hunk(struct inode *inode,
3431				   struct buffer_head *di_bh,
3432				   u32 cpos, u32 write_len, u32 max_cpos)
3433{
3434	int ret;
3435	u32 cow_start = 0, cow_len = 0;
3436	struct ocfs2_inode_info *oi = OCFS2_I(inode);
3437	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3438	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3439	struct buffer_head *ref_root_bh = NULL;
3440	struct ocfs2_refcount_tree *ref_tree;
3441	struct ocfs2_cow_context *context = NULL;
3442
3443	BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
3444
3445	ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
3446					      cpos, write_len, max_cpos,
3447					      &cow_start, &cow_len);
3448	if (ret) {
3449		mlog_errno(ret);
3450		goto out;
3451	}
3452
3453	trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
3454				      cpos, write_len, max_cpos,
3455				      cow_start, cow_len);
3456
3457	BUG_ON(cow_len == 0);
3458
3459	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3460	if (!context) {
3461		ret = -ENOMEM;
3462		mlog_errno(ret);
3463		goto out;
3464	}
3465
3466	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3467				       1, &ref_tree, &ref_root_bh);
3468	if (ret) {
3469		mlog_errno(ret);
3470		goto out;
3471	}
3472
3473	context->inode = inode;
3474	context->cow_start = cow_start;
3475	context->cow_len = cow_len;
3476	context->ref_tree = ref_tree;
3477	context->ref_root_bh = ref_root_bh;
3478	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3479	context->get_clusters = ocfs2_di_get_clusters;
3480
3481	ocfs2_init_dinode_extent_tree(&context->data_et,
3482				      INODE_CACHE(inode), di_bh);
3483
3484	ret = ocfs2_replace_cow(context);
3485	if (ret)
3486		mlog_errno(ret);
3487
3488	/*
3489	 * truncate the extent map here since no matter whether we meet with
3490	 * any error during the action, we shouldn't trust cached extent map
3491	 * any more.
3492	 */
3493	ocfs2_extent_map_trunc(inode, cow_start);
3494
3495	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3496	brelse(ref_root_bh);
3497out:
3498	kfree(context);
3499	return ret;
3500}
3501
3502/*
3503 * CoW any and all clusters between cpos and cpos+write_len.
3504 * Don't CoW past max_cpos.  If this returns successfully, all
3505 * clusters between cpos and cpos+write_len are safe to modify.
3506 */
3507int ocfs2_refcount_cow(struct inode *inode,
3508		       struct buffer_head *di_bh,
3509		       u32 cpos, u32 write_len, u32 max_cpos)
3510{
3511	int ret = 0;
3512	u32 p_cluster, num_clusters;
3513	unsigned int ext_flags;
3514
3515	while (write_len) {
3516		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3517					 &num_clusters, &ext_flags);
3518		if (ret) {
3519			mlog_errno(ret);
3520			break;
3521		}
3522
3523		if (write_len < num_clusters)
3524			num_clusters = write_len;
3525
3526		if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3527			ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3528						      num_clusters, max_cpos);
3529			if (ret) {
3530				mlog_errno(ret);
3531				break;
3532			}
3533		}
3534
3535		write_len -= num_clusters;
3536		cpos += num_clusters;
3537	}
3538
3539	return ret;
3540}
3541
3542static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
3543					  u32 v_cluster, u32 *p_cluster,
3544					  u32 *num_clusters,
3545					  unsigned int *extent_flags)
3546{
3547	struct inode *inode = context->inode;
3548	struct ocfs2_xattr_value_root *xv = context->cow_object;
3549
3550	return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
3551					num_clusters, &xv->xr_list,
3552					extent_flags);
3553}
3554
3555/*
3556 * Given a xattr value root, calculate the most meta/credits we need for
3557 * refcount tree change if we truncate it to 0.
3558 */
3559int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
3560				       struct ocfs2_caching_info *ref_ci,
3561				       struct buffer_head *ref_root_bh,
3562				       struct ocfs2_xattr_value_root *xv,
3563				       int *meta_add, int *credits)
3564{
3565	int ret = 0, index, ref_blocks = 0;
3566	u32 p_cluster, num_clusters;
3567	u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
3568	struct ocfs2_refcount_block *rb;
3569	struct ocfs2_refcount_rec rec;
3570	struct buffer_head *ref_leaf_bh = NULL;
3571
3572	while (cpos < clusters) {
3573		ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
3574					       &num_clusters, &xv->xr_list,
3575					       NULL);
3576		if (ret) {
3577			mlog_errno(ret);
3578			goto out;
3579		}
3580
3581		cpos += num_clusters;
3582
3583		while (num_clusters) {
3584			ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
3585						     p_cluster, num_clusters,
3586						     &rec, &index,
3587						     &ref_leaf_bh);
3588			if (ret) {
3589				mlog_errno(ret);
3590				goto out;
3591			}
3592
3593			BUG_ON(!rec.r_refcount);
3594
3595			rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
3596
3597			/*
3598			 * We really don't know whether the other clusters is in
3599			 * this refcount block or not, so just take the worst
3600			 * case that all the clusters are in this block and each
3601			 * one will split a refcount rec, so totally we need
3602			 * clusters * 2 new refcount rec.
3603			 */
3604			if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
3605			    le16_to_cpu(rb->rf_records.rl_count))
3606				ref_blocks++;
3607
3608			*credits += 1;
3609			brelse(ref_leaf_bh);
3610			ref_leaf_bh = NULL;
3611
3612			if (num_clusters <= le32_to_cpu(rec.r_clusters))
3613				break;
3614			else
3615				num_clusters -= le32_to_cpu(rec.r_clusters);
3616			p_cluster += num_clusters;
3617		}
3618	}
3619
3620	*meta_add += ref_blocks;
3621	if (!ref_blocks)
3622		goto out;
3623
3624	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
3625	if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
3626		*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
3627	else {
3628		struct ocfs2_extent_tree et;
3629
3630		ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
3631		*credits += ocfs2_calc_extend_credits(inode->i_sb,
3632						      et.et_root_el);
3633	}
3634
3635out:
3636	brelse(ref_leaf_bh);
3637	return ret;
3638}
3639
3640/*
3641 * Do CoW for xattr.
3642 */
3643int ocfs2_refcount_cow_xattr(struct inode *inode,
3644			     struct ocfs2_dinode *di,
3645			     struct ocfs2_xattr_value_buf *vb,
3646			     struct ocfs2_refcount_tree *ref_tree,
3647			     struct buffer_head *ref_root_bh,
3648			     u32 cpos, u32 write_len,
3649			     struct ocfs2_post_refcount *post)
3650{
3651	int ret;
3652	struct ocfs2_xattr_value_root *xv = vb->vb_xv;
3653	struct ocfs2_inode_info *oi = OCFS2_I(inode);
3654	struct ocfs2_cow_context *context = NULL;
3655	u32 cow_start, cow_len;
3656
3657	BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
3658
3659	ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
3660					      cpos, write_len, UINT_MAX,
3661					      &cow_start, &cow_len);
3662	if (ret) {
3663		mlog_errno(ret);
3664		goto out;
3665	}
3666
3667	BUG_ON(cow_len == 0);
3668
3669	context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3670	if (!context) {
3671		ret = -ENOMEM;
3672		mlog_errno(ret);
3673		goto out;
3674	}
3675
3676	context->inode = inode;
3677	context->cow_start = cow_start;
3678	context->cow_len = cow_len;
3679	context->ref_tree = ref_tree;
3680	context->ref_root_bh = ref_root_bh;
3681	context->cow_object = xv;
3682
3683	context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
3684	/* We need the extra credits for duplicate_clusters by jbd. */
3685	context->extra_credits =
3686		ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
3687	context->get_clusters = ocfs2_xattr_value_get_clusters;
3688	context->post_refcount = post;
3689
3690	ocfs2_init_xattr_value_extent_tree(&context->data_et,
3691					   INODE_CACHE(inode), vb);
3692
3693	ret = ocfs2_replace_cow(context);
3694	if (ret)
3695		mlog_errno(ret);
3696
3697out:
3698	kfree(context);
3699	return ret;
3700}
3701
3702/*
3703 * Insert a new extent into refcount tree and mark a extent rec
3704 * as refcounted in the dinode tree.
3705 */
3706int ocfs2_add_refcount_flag(struct inode *inode,
3707			    struct ocfs2_extent_tree *data_et,
3708			    struct ocfs2_caching_info *ref_ci,
3709			    struct buffer_head *ref_root_bh,
3710			    u32 cpos, u32 p_cluster, u32 num_clusters,
3711			    struct ocfs2_cached_dealloc_ctxt *dealloc,
3712			    struct ocfs2_post_refcount *post)
3713{
3714	int ret;
3715	handle_t *handle;
3716	int credits = 1, ref_blocks = 0;
3717	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3718	struct ocfs2_alloc_context *meta_ac = NULL;
3719
 
 
 
3720	ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
3721					       ref_ci, ref_root_bh,
3722					       p_cluster, num_clusters,
3723					       &ref_blocks, &credits);
3724	if (ret) {
3725		mlog_errno(ret);
3726		goto out;
3727	}
3728
3729	trace_ocfs2_add_refcount_flag(ref_blocks, credits);
3730
3731	if (ref_blocks) {
3732		ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
3733							ref_blocks, &meta_ac);
3734		if (ret) {
3735			mlog_errno(ret);
3736			goto out;
3737		}
3738	}
3739
3740	if (post)
3741		credits += post->credits;
3742
3743	handle = ocfs2_start_trans(osb, credits);
3744	if (IS_ERR(handle)) {
3745		ret = PTR_ERR(handle);
3746		mlog_errno(ret);
3747		goto out;
3748	}
3749
3750	ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
3751					   cpos, num_clusters, p_cluster,
3752					   meta_ac, dealloc);
3753	if (ret) {
3754		mlog_errno(ret);
3755		goto out_commit;
3756	}
3757
3758	ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3759					p_cluster, num_clusters, 0,
3760					meta_ac, dealloc);
3761	if (ret) {
3762		mlog_errno(ret);
3763		goto out_commit;
3764	}
3765
3766	if (post && post->func) {
3767		ret = post->func(inode, handle, post->para);
3768		if (ret)
3769			mlog_errno(ret);
3770	}
3771
3772out_commit:
3773	ocfs2_commit_trans(osb, handle);
3774out:
3775	if (meta_ac)
3776		ocfs2_free_alloc_context(meta_ac);
3777	return ret;
3778}
3779
3780static int ocfs2_change_ctime(struct inode *inode,
3781			      struct buffer_head *di_bh)
3782{
3783	int ret;
3784	handle_t *handle;
3785	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3786
3787	handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
3788				   OCFS2_INODE_UPDATE_CREDITS);
3789	if (IS_ERR(handle)) {
3790		ret = PTR_ERR(handle);
3791		mlog_errno(ret);
3792		goto out;
3793	}
3794
3795	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
3796				      OCFS2_JOURNAL_ACCESS_WRITE);
3797	if (ret) {
3798		mlog_errno(ret);
3799		goto out_commit;
3800	}
3801
3802	inode->i_ctime = CURRENT_TIME;
3803	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3804	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3805
3806	ocfs2_journal_dirty(handle, di_bh);
3807
3808out_commit:
3809	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
3810out:
3811	return ret;
3812}
3813
3814static int ocfs2_attach_refcount_tree(struct inode *inode,
3815				      struct buffer_head *di_bh)
3816{
3817	int ret, data_changed = 0;
3818	struct buffer_head *ref_root_bh = NULL;
3819	struct ocfs2_inode_info *oi = OCFS2_I(inode);
3820	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3821	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3822	struct ocfs2_refcount_tree *ref_tree;
3823	unsigned int ext_flags;
3824	loff_t size;
3825	u32 cpos, num_clusters, clusters, p_cluster;
3826	struct ocfs2_cached_dealloc_ctxt dealloc;
3827	struct ocfs2_extent_tree di_et;
3828
3829	ocfs2_init_dealloc_ctxt(&dealloc);
3830
3831	if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
3832		ret = ocfs2_create_refcount_tree(inode, di_bh);
3833		if (ret) {
3834			mlog_errno(ret);
3835			goto out;
3836		}
3837	}
3838
3839	BUG_ON(!di->i_refcount_loc);
3840	ret = ocfs2_lock_refcount_tree(osb,
3841				       le64_to_cpu(di->i_refcount_loc), 1,
3842				       &ref_tree, &ref_root_bh);
3843	if (ret) {
3844		mlog_errno(ret);
3845		goto out;
3846	}
3847
3848	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3849		goto attach_xattr;
3850
3851	ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3852
3853	size = i_size_read(inode);
3854	clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
3855
3856	cpos = 0;
3857	while (cpos < clusters) {
3858		ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3859					 &num_clusters, &ext_flags);
3860		if (ret) {
3861			mlog_errno(ret);
3862			goto unlock;
3863		}
3864		if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
3865			ret = ocfs2_add_refcount_flag(inode, &di_et,
3866						      &ref_tree->rf_ci,
3867						      ref_root_bh, cpos,
3868						      p_cluster, num_clusters,
3869						      &dealloc, NULL);
3870			if (ret) {
3871				mlog_errno(ret);
3872				goto unlock;
3873			}
3874
3875			data_changed = 1;
3876		}
3877		cpos += num_clusters;
3878	}
3879
3880attach_xattr:
3881	if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3882		ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3883						       &ref_tree->rf_ci,
3884						       ref_root_bh,
3885						       &dealloc);
3886		if (ret) {
3887			mlog_errno(ret);
3888			goto unlock;
3889		}
3890	}
3891
3892	if (data_changed) {
3893		ret = ocfs2_change_ctime(inode, di_bh);
3894		if (ret)
3895			mlog_errno(ret);
3896	}
3897
3898unlock:
3899	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3900	brelse(ref_root_bh);
3901
3902	if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
3903		ocfs2_schedule_truncate_log_flush(osb, 1);
3904		ocfs2_run_deallocs(osb, &dealloc);
3905	}
3906out:
3907	/*
3908	 * Empty the extent map so that we may get the right extent
3909	 * record from the disk.
3910	 */
3911	ocfs2_extent_map_trunc(inode, 0);
3912
3913	return ret;
3914}
3915
3916static int ocfs2_add_refcounted_extent(struct inode *inode,
3917				   struct ocfs2_extent_tree *et,
3918				   struct ocfs2_caching_info *ref_ci,
3919				   struct buffer_head *ref_root_bh,
3920				   u32 cpos, u32 p_cluster, u32 num_clusters,
3921				   unsigned int ext_flags,
3922				   struct ocfs2_cached_dealloc_ctxt *dealloc)
3923{
3924	int ret;
3925	handle_t *handle;
3926	int credits = 0;
3927	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3928	struct ocfs2_alloc_context *meta_ac = NULL;
3929
3930	ret = ocfs2_lock_refcount_allocators(inode->i_sb,
3931					     p_cluster, num_clusters,
3932					     et, ref_ci,
3933					     ref_root_bh, &meta_ac,
3934					     NULL, &credits);
3935	if (ret) {
3936		mlog_errno(ret);
3937		goto out;
3938	}
3939
3940	handle = ocfs2_start_trans(osb, credits);
3941	if (IS_ERR(handle)) {
3942		ret = PTR_ERR(handle);
3943		mlog_errno(ret);
3944		goto out;
3945	}
3946
3947	ret = ocfs2_insert_extent(handle, et, cpos,
3948			ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
3949			num_clusters, ext_flags, meta_ac);
3950	if (ret) {
3951		mlog_errno(ret);
3952		goto out_commit;
3953	}
3954
3955	ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3956				      p_cluster, num_clusters,
3957				      meta_ac, dealloc);
 
 
 
 
 
 
 
3958	if (ret)
3959		mlog_errno(ret);
3960
3961out_commit:
3962	ocfs2_commit_trans(osb, handle);
3963out:
3964	if (meta_ac)
3965		ocfs2_free_alloc_context(meta_ac);
3966	return ret;
3967}
3968
3969static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3970				       struct buffer_head *s_bh,
3971				       struct inode *t_inode,
3972				       struct buffer_head *t_bh)
3973{
3974	int ret;
3975	handle_t *handle;
3976	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3977	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3978	struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3979
3980	BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3981
3982	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3983	if (IS_ERR(handle)) {
3984		ret = PTR_ERR(handle);
3985		mlog_errno(ret);
3986		goto out;
3987	}
3988
3989	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3990				      OCFS2_JOURNAL_ACCESS_WRITE);
3991	if (ret) {
3992		mlog_errno(ret);
3993		goto out_commit;
3994	}
3995
3996	t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3997	memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3998	       le16_to_cpu(s_di->id2.i_data.id_count));
3999	spin_lock(&OCFS2_I(t_inode)->ip_lock);
4000	OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
4001	t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
4002	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
4003
4004	ocfs2_journal_dirty(handle, t_bh);
4005
4006out_commit:
4007	ocfs2_commit_trans(osb, handle);
4008out:
4009	return ret;
4010}
4011
4012static int ocfs2_duplicate_extent_list(struct inode *s_inode,
4013				struct inode *t_inode,
4014				struct buffer_head *t_bh,
4015				struct ocfs2_caching_info *ref_ci,
4016				struct buffer_head *ref_root_bh,
4017				struct ocfs2_cached_dealloc_ctxt *dealloc)
4018{
4019	int ret = 0;
4020	u32 p_cluster, num_clusters, clusters, cpos;
4021	loff_t size;
4022	unsigned int ext_flags;
4023	struct ocfs2_extent_tree et;
4024
4025	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
4026
4027	size = i_size_read(s_inode);
4028	clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
4029
4030	cpos = 0;
4031	while (cpos < clusters) {
4032		ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
4033					 &num_clusters, &ext_flags);
4034		if (ret) {
4035			mlog_errno(ret);
4036			goto out;
4037		}
4038		if (p_cluster) {
4039			ret = ocfs2_add_refcounted_extent(t_inode, &et,
4040							  ref_ci, ref_root_bh,
4041							  cpos, p_cluster,
4042							  num_clusters,
4043							  ext_flags,
4044							  dealloc);
4045			if (ret) {
4046				mlog_errno(ret);
4047				goto out;
4048			}
4049		}
4050
4051		cpos += num_clusters;
4052	}
4053
4054out:
4055	return ret;
4056}
4057
4058/*
4059 * change the new file's attributes to the src.
4060 *
4061 * reflink creates a snapshot of a file, that means the attributes
4062 * must be identical except for three exceptions - nlink, ino, and ctime.
4063 */
4064static int ocfs2_complete_reflink(struct inode *s_inode,
4065				  struct buffer_head *s_bh,
4066				  struct inode *t_inode,
4067				  struct buffer_head *t_bh,
4068				  bool preserve)
4069{
4070	int ret;
4071	handle_t *handle;
4072	struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
4073	struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
4074	loff_t size = i_size_read(s_inode);
4075
4076	handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
4077				   OCFS2_INODE_UPDATE_CREDITS);
4078	if (IS_ERR(handle)) {
4079		ret = PTR_ERR(handle);
4080		mlog_errno(ret);
4081		return ret;
4082	}
4083
4084	ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
4085				      OCFS2_JOURNAL_ACCESS_WRITE);
4086	if (ret) {
4087		mlog_errno(ret);
4088		goto out_commit;
4089	}
4090
4091	spin_lock(&OCFS2_I(t_inode)->ip_lock);
4092	OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
4093	OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
4094	OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
4095	spin_unlock(&OCFS2_I(t_inode)->ip_lock);
4096	i_size_write(t_inode, size);
4097	t_inode->i_blocks = s_inode->i_blocks;
4098
4099	di->i_xattr_inline_size = s_di->i_xattr_inline_size;
4100	di->i_clusters = s_di->i_clusters;
4101	di->i_size = s_di->i_size;
4102	di->i_dyn_features = s_di->i_dyn_features;
4103	di->i_attr = s_di->i_attr;
4104
4105	if (preserve) {
4106		t_inode->i_uid = s_inode->i_uid;
4107		t_inode->i_gid = s_inode->i_gid;
4108		t_inode->i_mode = s_inode->i_mode;
4109		di->i_uid = s_di->i_uid;
4110		di->i_gid = s_di->i_gid;
4111		di->i_mode = s_di->i_mode;
4112
4113		/*
4114		 * update time.
4115		 * we want mtime to appear identical to the source and
4116		 * update ctime.
4117		 */
4118		t_inode->i_ctime = CURRENT_TIME;
4119
4120		di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
4121		di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
4122
4123		t_inode->i_mtime = s_inode->i_mtime;
4124		di->i_mtime = s_di->i_mtime;
4125		di->i_mtime_nsec = s_di->i_mtime_nsec;
4126	}
4127
4128	ocfs2_journal_dirty(handle, t_bh);
4129
4130out_commit:
4131	ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
4132	return ret;
4133}
4134
4135static int ocfs2_create_reflink_node(struct inode *s_inode,
4136				     struct buffer_head *s_bh,
4137				     struct inode *t_inode,
4138				     struct buffer_head *t_bh,
4139				     bool preserve)
4140{
4141	int ret;
4142	struct buffer_head *ref_root_bh = NULL;
4143	struct ocfs2_cached_dealloc_ctxt dealloc;
4144	struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
4145	struct ocfs2_refcount_block *rb;
4146	struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
4147	struct ocfs2_refcount_tree *ref_tree;
4148
4149	ocfs2_init_dealloc_ctxt(&dealloc);
4150
4151	ret = ocfs2_set_refcount_tree(t_inode, t_bh,
4152				      le64_to_cpu(di->i_refcount_loc));
4153	if (ret) {
4154		mlog_errno(ret);
4155		goto out;
4156	}
4157
4158	if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4159		ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4160						  t_inode, t_bh);
4161		if (ret)
4162			mlog_errno(ret);
4163		goto out;
4164	}
4165
4166	ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4167				       1, &ref_tree, &ref_root_bh);
4168	if (ret) {
4169		mlog_errno(ret);
4170		goto out;
4171	}
4172	rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
4173
4174	ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
4175					  &ref_tree->rf_ci, ref_root_bh,
4176					  &dealloc);
4177	if (ret) {
4178		mlog_errno(ret);
4179		goto out_unlock_refcount;
4180	}
4181
4182out_unlock_refcount:
4183	ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4184	brelse(ref_root_bh);
4185out:
4186	if (ocfs2_dealloc_has_cluster(&dealloc)) {
4187		ocfs2_schedule_truncate_log_flush(osb, 1);
4188		ocfs2_run_deallocs(osb, &dealloc);
4189	}
4190
4191	return ret;
4192}
4193
4194static int __ocfs2_reflink(struct dentry *old_dentry,
4195			   struct buffer_head *old_bh,
4196			   struct inode *new_inode,
4197			   bool preserve)
4198{
4199	int ret;
4200	struct inode *inode = old_dentry->d_inode;
4201	struct buffer_head *new_bh = NULL;
4202
4203	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
4204		ret = -EINVAL;
4205		mlog_errno(ret);
4206		goto out;
4207	}
4208
4209	ret = filemap_fdatawrite(inode->i_mapping);
4210	if (ret) {
4211		mlog_errno(ret);
4212		goto out;
4213	}
4214
4215	ret = ocfs2_attach_refcount_tree(inode, old_bh);
4216	if (ret) {
4217		mlog_errno(ret);
4218		goto out;
4219	}
4220
4221	mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
4222	ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4223				      OI_LS_REFLINK_TARGET);
4224	if (ret) {
4225		mlog_errno(ret);
4226		goto out_unlock;
4227	}
4228
4229	ret = ocfs2_create_reflink_node(inode, old_bh,
4230					new_inode, new_bh, preserve);
4231	if (ret) {
4232		mlog_errno(ret);
4233		goto inode_unlock;
4234	}
4235
4236	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
4237		ret = ocfs2_reflink_xattrs(inode, old_bh,
4238					   new_inode, new_bh,
4239					   preserve);
4240		if (ret) {
4241			mlog_errno(ret);
4242			goto inode_unlock;
4243		}
4244	}
4245
4246	ret = ocfs2_complete_reflink(inode, old_bh,
4247				     new_inode, new_bh, preserve);
4248	if (ret)
4249		mlog_errno(ret);
4250
4251inode_unlock:
4252	ocfs2_inode_unlock(new_inode, 1);
4253	brelse(new_bh);
4254out_unlock:
4255	mutex_unlock(&new_inode->i_mutex);
4256out:
4257	if (!ret) {
4258		ret = filemap_fdatawait(inode->i_mapping);
4259		if (ret)
4260			mlog_errno(ret);
4261	}
4262	return ret;
4263}
4264
4265static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4266			 struct dentry *new_dentry, bool preserve)
4267{
4268	int error;
4269	struct inode *inode = old_dentry->d_inode;
4270	struct buffer_head *old_bh = NULL;
4271	struct inode *new_orphan_inode = NULL;
4272	struct posix_acl *default_acl, *acl;
4273	umode_t mode;
4274
4275	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4276		return -EOPNOTSUPP;
4277
4278	mode = inode->i_mode;
4279	error = posix_acl_create(dir, &mode, &default_acl, &acl);
 
4280	if (error) {
4281		mlog_errno(error);
4282		goto out;
4283	}
4284
4285	error = ocfs2_create_inode_in_orphan(dir, mode,
4286					     &new_orphan_inode);
4287	if (error) {
4288		mlog_errno(error);
4289		goto out;
4290	}
4291
4292	error = ocfs2_inode_lock(inode, &old_bh, 1);
4293	if (error) {
4294		mlog_errno(error);
 
4295		goto out;
4296	}
4297
4298	down_write(&OCFS2_I(inode)->ip_xattr_sem);
4299	down_write(&OCFS2_I(inode)->ip_alloc_sem);
4300	error = __ocfs2_reflink(old_dentry, old_bh,
4301				new_orphan_inode, preserve);
4302	up_write(&OCFS2_I(inode)->ip_alloc_sem);
4303	up_write(&OCFS2_I(inode)->ip_xattr_sem);
4304
4305	ocfs2_inode_unlock(inode, 1);
 
4306	brelse(old_bh);
4307
4308	if (error) {
4309		mlog_errno(error);
4310		goto out;
4311	}
4312
 
 
 
 
 
 
 
 
4313	/* If the security isn't preserved, we need to re-initialize them. */
4314	if (!preserve) {
4315		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
4316						    &new_dentry->d_name,
4317						    default_acl, acl);
4318		if (error)
4319			mlog_errno(error);
4320	}
4321out:
4322	if (default_acl)
4323		posix_acl_release(default_acl);
4324	if (acl)
4325		posix_acl_release(acl);
4326	if (!error) {
4327		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
4328						       new_dentry);
4329		if (error)
4330			mlog_errno(error);
4331	}
 
4332
 
4333	if (new_orphan_inode) {
4334		/*
4335		 * We need to open_unlock the inode no matter whether we
4336		 * succeed or not, so that other nodes can delete it later.
4337		 */
4338		ocfs2_open_unlock(new_orphan_inode);
4339		if (error)
4340			iput(new_orphan_inode);
4341	}
4342
4343	return error;
4344}
4345
4346/*
4347 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
4348 * sys_reflink().  This will go away when vfs_reflink() exists in
4349 * fs/namei.c.
4350 */
4351
4352/* copied from may_create in VFS. */
4353static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
4354{
4355	if (child->d_inode)
4356		return -EEXIST;
4357	if (IS_DEADDIR(dir))
4358		return -ENOENT;
4359	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
4360}
4361
4362/**
4363 * ocfs2_vfs_reflink - Create a reference-counted link
4364 *
4365 * @old_dentry:        source dentry + inode
4366 * @dir:       directory to create the target
4367 * @new_dentry:        target dentry
4368 * @preserve:  if true, preserve all file attributes
4369 */
4370static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
4371			     struct dentry *new_dentry, bool preserve)
4372{
4373	struct inode *inode = old_dentry->d_inode;
4374	int error;
4375
4376	if (!inode)
4377		return -ENOENT;
4378
4379	error = ocfs2_may_create(dir, new_dentry);
4380	if (error)
4381		return error;
4382
4383	if (dir->i_sb != inode->i_sb)
4384		return -EXDEV;
4385
4386	/*
4387	 * A reflink to an append-only or immutable file cannot be created.
4388	 */
4389	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4390		return -EPERM;
4391
4392	/* Only regular files can be reflinked. */
4393	if (!S_ISREG(inode->i_mode))
4394		return -EPERM;
4395
4396	/*
4397	 * If the caller wants to preserve ownership, they require the
4398	 * rights to do so.
4399	 */
4400	if (preserve) {
4401		if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
4402			return -EPERM;
4403		if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
4404			return -EPERM;
4405	}
4406
4407	/*
4408	 * If the caller is modifying any aspect of the attributes, they
4409	 * are not creating a snapshot.  They need read permission on the
4410	 * file.
4411	 */
4412	if (!preserve) {
4413		error = inode_permission(inode, MAY_READ);
4414		if (error)
4415			return error;
4416	}
4417
4418	mutex_lock(&inode->i_mutex);
4419	dquot_initialize(dir);
4420	error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
4421	mutex_unlock(&inode->i_mutex);
 
4422	if (!error)
4423		fsnotify_create(dir, new_dentry);
4424	return error;
4425}
4426/*
4427 * Most codes are copied from sys_linkat.
4428 */
4429int ocfs2_reflink_ioctl(struct inode *inode,
4430			const char __user *oldname,
4431			const char __user *newname,
4432			bool preserve)
4433{
4434	struct dentry *new_dentry;
4435	struct path old_path, new_path;
4436	int error;
4437
4438	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
4439		return -EOPNOTSUPP;
4440
4441	error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
4442	if (error) {
4443		mlog_errno(error);
4444		return error;
4445	}
4446
4447	new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0);
4448	error = PTR_ERR(new_dentry);
4449	if (IS_ERR(new_dentry)) {
4450		mlog_errno(error);
4451		goto out;
4452	}
4453
4454	error = -EXDEV;
4455	if (old_path.mnt != new_path.mnt) {
4456		mlog_errno(error);
4457		goto out_dput;
4458	}
4459
4460	error = ocfs2_vfs_reflink(old_path.dentry,
4461				  new_path.dentry->d_inode,
4462				  new_dentry, preserve);
4463out_dput:
4464	done_path_create(&new_path, new_dentry);
4465out:
4466	path_put(&old_path);
4467
4468	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4469}