Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * move_extents.c
   4 *
   5 * Copyright (C) 2011 Oracle.  All rights reserved.
   6 */
   7#include <linux/fs.h>
   8#include <linux/types.h>
   9#include <linux/mount.h>
  10#include <linux/swap.h>
  11
  12#include <cluster/masklog.h>
  13
  14#include "ocfs2.h"
  15#include "ocfs2_ioctl.h"
  16
  17#include "alloc.h"
  18#include "localalloc.h"
  19#include "aops.h"
  20#include "dlmglue.h"
  21#include "extent_map.h"
  22#include "inode.h"
  23#include "journal.h"
  24#include "suballoc.h"
  25#include "uptodate.h"
  26#include "super.h"
  27#include "dir.h"
  28#include "buffer_head_io.h"
  29#include "sysfile.h"
  30#include "refcounttree.h"
  31#include "move_extents.h"
  32
  33struct ocfs2_move_extents_context {
  34	struct inode *inode;
  35	struct file *file;
  36	int auto_defrag;
  37	int partial;
  38	int credits;
  39	u32 new_phys_cpos;
  40	u32 clusters_moved;
  41	u64 refcount_loc;
  42	struct ocfs2_move_extents *range;
  43	struct ocfs2_extent_tree et;
  44	struct ocfs2_alloc_context *meta_ac;
  45	struct ocfs2_alloc_context *data_ac;
  46	struct ocfs2_cached_dealloc_ctxt dealloc;
  47};
  48
  49static int __ocfs2_move_extent(handle_t *handle,
  50			       struct ocfs2_move_extents_context *context,
  51			       u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
  52			       int ext_flags)
  53{
  54	int ret = 0, index;
  55	struct inode *inode = context->inode;
  56	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  57	struct ocfs2_extent_rec *rec, replace_rec;
  58	struct ocfs2_path *path = NULL;
  59	struct ocfs2_extent_list *el;
  60	u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
  61	u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
  62
  63	ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
  64					       p_cpos, new_p_cpos, len);
  65	if (ret) {
  66		mlog_errno(ret);
  67		goto out;
  68	}
  69
  70	memset(&replace_rec, 0, sizeof(replace_rec));
  71	replace_rec.e_cpos = cpu_to_le32(cpos);
  72	replace_rec.e_leaf_clusters = cpu_to_le16(len);
  73	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  74								   new_p_cpos));
  75
  76	path = ocfs2_new_path_from_et(&context->et);
  77	if (!path) {
  78		ret = -ENOMEM;
  79		mlog_errno(ret);
  80		goto out;
  81	}
  82
  83	ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
  84	if (ret) {
  85		mlog_errno(ret);
  86		goto out;
  87	}
  88
  89	el = path_leaf_el(path);
  90
  91	index = ocfs2_search_extent_list(el, cpos);
  92	if (index == -1) {
  93		ret = ocfs2_error(inode->i_sb,
  94				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
  95				  (unsigned long long)ino, cpos);
  96		goto out;
  97	}
  98
  99	rec = &el->l_recs[index];
 100
 101	BUG_ON(ext_flags != rec->e_flags);
 102	/*
 103	 * after moving/defraging to new location, the extent is not going
 104	 * to be refcounted anymore.
 105	 */
 106	replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
 107
 108	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
 109				      context->et.et_root_bh,
 110				      OCFS2_JOURNAL_ACCESS_WRITE);
 111	if (ret) {
 112		mlog_errno(ret);
 113		goto out;
 114	}
 115
 116	ret = ocfs2_split_extent(handle, &context->et, path, index,
 117				 &replace_rec, context->meta_ac,
 118				 &context->dealloc);
 119	if (ret) {
 120		mlog_errno(ret);
 121		goto out;
 122	}
 123
 124	ocfs2_journal_dirty(handle, context->et.et_root_bh);
 125
 126	context->new_phys_cpos = new_p_cpos;
 127
 128	/*
 129	 * need I to append truncate log for old clusters?
 130	 */
 131	if (old_blkno) {
 132		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 133			ret = ocfs2_decrease_refcount(inode, handle,
 134					ocfs2_blocks_to_clusters(osb->sb,
 135								 old_blkno),
 136					len, context->meta_ac,
 137					&context->dealloc, 1);
 138		else
 139			ret = ocfs2_truncate_log_append(osb, handle,
 140							old_blkno, len);
 141	}
 142
 143	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 144out:
 145	ocfs2_free_path(path);
 146	return ret;
 147}
 148
 149/*
 150 * lock allocator, and reserve appropriate number of bits for
 151 * meta blocks.
 152 */
 153static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
 154					struct ocfs2_extent_tree *et,
 155					u32 clusters_to_move,
 156					u32 extents_to_split,
 157					struct ocfs2_alloc_context **meta_ac,
 158					int extra_blocks,
 159					int *credits)
 160{
 161	int ret, num_free_extents;
 162	unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
 163	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 164
 165	num_free_extents = ocfs2_num_free_extents(et);
 166	if (num_free_extents < 0) {
 167		ret = num_free_extents;
 168		mlog_errno(ret);
 169		goto out;
 170	}
 171
 172	if (!num_free_extents ||
 173	    (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
 174		extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
 175
 176	ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
 177	if (ret) {
 178		mlog_errno(ret);
 179		goto out;
 180	}
 181
 182
 183	*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
 184
 185	mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
 186	     extra_blocks, clusters_to_move, *credits);
 187out:
 188	if (ret) {
 189		if (*meta_ac) {
 190			ocfs2_free_alloc_context(*meta_ac);
 191			*meta_ac = NULL;
 192		}
 193	}
 194
 195	return ret;
 196}
 197
 198/*
 199 * Using one journal handle to guarantee the data consistency in case
 200 * crash happens anywhere.
 201 *
 202 *  XXX: defrag can end up with finishing partial extent as requested,
 203 * due to not enough contiguous clusters can be found in allocator.
 204 */
 205static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
 206			       u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
 207{
 208	int ret, credits = 0, extra_blocks = 0, partial = context->partial;
 209	handle_t *handle;
 210	struct inode *inode = context->inode;
 211	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 212	struct inode *tl_inode = osb->osb_tl_inode;
 213	struct ocfs2_refcount_tree *ref_tree = NULL;
 214	u32 new_phys_cpos, new_len;
 215	u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
 216	int need_free = 0;
 217
 218	if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
 219		BUG_ON(!ocfs2_is_refcount_inode(inode));
 220		BUG_ON(!context->refcount_loc);
 221
 222		ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
 223					       &ref_tree, NULL);
 224		if (ret) {
 225			mlog_errno(ret);
 226			return ret;
 227		}
 228
 229		ret = ocfs2_prepare_refcount_change_for_del(inode,
 230							context->refcount_loc,
 231							phys_blkno,
 232							*len,
 233							&credits,
 234							&extra_blocks);
 235		if (ret) {
 236			mlog_errno(ret);
 237			goto out;
 238		}
 239	}
 240
 241	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
 242						*len, 1,
 243						&context->meta_ac,
 244						extra_blocks, &credits);
 245	if (ret) {
 246		mlog_errno(ret);
 247		goto out;
 248	}
 249
 250	/*
 251	 * should be using allocation reservation strategy there?
 252	 *
 253	 * if (context->data_ac)
 254	 *	context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
 255	 */
 256
 257	inode_lock(tl_inode);
 258
 259	if (ocfs2_truncate_log_needs_flush(osb)) {
 260		ret = __ocfs2_flush_truncate_log(osb);
 261		if (ret < 0) {
 262			mlog_errno(ret);
 263			goto out_unlock_mutex;
 264		}
 265	}
 266
 267	/*
 268	 * Make sure ocfs2_reserve_cluster is called after
 269	 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
 270	 *
 271	 * If ocfs2_reserve_cluster is called
 272	 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
 273	 * may happen.
 274	 *
 275	 */
 276	ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
 277	if (ret) {
 278		mlog_errno(ret);
 279		goto out_unlock_mutex;
 280	}
 281
 282	handle = ocfs2_start_trans(osb, credits);
 283	if (IS_ERR(handle)) {
 284		ret = PTR_ERR(handle);
 285		mlog_errno(ret);
 286		goto out_unlock_mutex;
 287	}
 288
 289	ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
 290				     &new_phys_cpos, &new_len);
 291	if (ret) {
 292		mlog_errno(ret);
 293		goto out_commit;
 294	}
 295
 296	/*
 297	 * allowing partial extent moving is kind of 'pros and cons', it makes
 298	 * whole defragmentation less likely to fail, on the contrary, the bad
 299	 * thing is it may make the fs even more fragmented after moving, let
 300	 * userspace make a good decision here.
 301	 */
 302	if (new_len != *len) {
 303		mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
 304		if (!partial) {
 305			context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
 306			ret = -ENOSPC;
 307			need_free = 1;
 308			goto out_commit;
 309		}
 310	}
 311
 312	mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
 313	     phys_cpos, new_phys_cpos);
 314
 315	ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
 316				  new_phys_cpos, ext_flags);
 317	if (ret)
 318		mlog_errno(ret);
 319
 320	if (partial && (new_len != *len))
 321		*len = new_len;
 322
 323	/*
 324	 * Here we should write the new page out first if we are
 325	 * in write-back mode.
 326	 */
 327	ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
 328	if (ret)
 329		mlog_errno(ret);
 330
 331out_commit:
 332	if (need_free && context->data_ac) {
 333		struct ocfs2_alloc_context *data_ac = context->data_ac;
 334
 335		if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
 336			ocfs2_free_local_alloc_bits(osb, handle, data_ac,
 337					new_phys_cpos, new_len);
 338		else
 339			ocfs2_free_clusters(handle,
 340					data_ac->ac_inode,
 341					data_ac->ac_bh,
 342					ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
 343					new_len);
 344	}
 345
 346	ocfs2_commit_trans(osb, handle);
 347
 348out_unlock_mutex:
 349	inode_unlock(tl_inode);
 350
 351	if (context->data_ac) {
 352		ocfs2_free_alloc_context(context->data_ac);
 353		context->data_ac = NULL;
 354	}
 355
 356	if (context->meta_ac) {
 357		ocfs2_free_alloc_context(context->meta_ac);
 358		context->meta_ac = NULL;
 359	}
 360
 361out:
 362	if (ref_tree)
 363		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 364
 365	return ret;
 366}
 367
 368/*
 369 * find the victim alloc group, where #blkno fits.
 370 */
 371static int ocfs2_find_victim_alloc_group(struct inode *inode,
 372					 u64 vict_blkno,
 373					 int type, int slot,
 374					 int *vict_bit,
 375					 struct buffer_head **ret_bh)
 376{
 377	int ret, i, bits_per_unit = 0;
 378	u64 blkno;
 379	char namebuf[40];
 380
 381	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 382	struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
 383	struct ocfs2_chain_list *cl;
 384	struct ocfs2_chain_rec *rec;
 385	struct ocfs2_dinode *ac_dinode;
 386	struct ocfs2_group_desc *bg;
 387
 388	ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
 389	ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
 390					 strlen(namebuf), &blkno);
 391	if (ret) {
 392		ret = -ENOENT;
 393		goto out;
 394	}
 395
 396	ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
 397	if (ret) {
 398		mlog_errno(ret);
 399		goto out;
 400	}
 401
 402	ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
 403	cl = &(ac_dinode->id2.i_chain);
 404	rec = &(cl->cl_recs[0]);
 405
 406	if (type == GLOBAL_BITMAP_SYSTEM_INODE)
 407		bits_per_unit = osb->s_clustersize_bits -
 408					inode->i_sb->s_blocksize_bits;
 409	/*
 410	 * 'vict_blkno' was out of the valid range.
 411	 */
 412	if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
 413	    (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
 414				bits_per_unit))) {
 415		ret = -EINVAL;
 416		goto out;
 417	}
 418
 419	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
 420
 421		rec = &(cl->cl_recs[i]);
 422		if (!rec)
 423			continue;
 424
 425		bg = NULL;
 426
 427		do {
 428			if (!bg)
 429				blkno = le64_to_cpu(rec->c_blkno);
 430			else
 431				blkno = le64_to_cpu(bg->bg_next_group);
 432
 433			if (gd_bh) {
 434				brelse(gd_bh);
 435				gd_bh = NULL;
 436			}
 437
 438			ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
 439			if (ret) {
 440				mlog_errno(ret);
 441				goto out;
 442			}
 443
 444			bg = (struct ocfs2_group_desc *)gd_bh->b_data;
 445
 446			if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
 447						le16_to_cpu(bg->bg_bits))) {
 448
 449				*ret_bh = gd_bh;
 450				*vict_bit = (vict_blkno - blkno) >>
 451							bits_per_unit;
 452				mlog(0, "find the victim group: #%llu, "
 453				     "total_bits: %u, vict_bit: %u\n",
 454				     blkno, le16_to_cpu(bg->bg_bits),
 455				     *vict_bit);
 456				goto out;
 457			}
 458
 459		} while (le64_to_cpu(bg->bg_next_group));
 460	}
 461
 462	ret = -EINVAL;
 463out:
 464	brelse(ac_bh);
 465
 466	/*
 467	 * caller has to release the gd_bh properly.
 468	 */
 469	return ret;
 470}
 471
 472/*
 473 * XXX: helper to validate and adjust moving goal.
 474 */
 475static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
 476					       struct ocfs2_move_extents *range)
 477{
 478	int ret, goal_bit = 0;
 479
 480	struct buffer_head *gd_bh = NULL;
 481	struct ocfs2_group_desc *bg;
 482	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 483	int c_to_b = 1 << (osb->s_clustersize_bits -
 484					inode->i_sb->s_blocksize_bits);
 485
 486	/*
 487	 * make goal become cluster aligned.
 488	 */
 489	range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
 490						      range->me_goal);
 491	/*
 492	 * validate goal sits within global_bitmap, and return the victim
 493	 * group desc
 494	 */
 495	ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
 496					    GLOBAL_BITMAP_SYSTEM_INODE,
 497					    OCFS2_INVALID_SLOT,
 498					    &goal_bit, &gd_bh);
 499	if (ret)
 500		goto out;
 501
 502	bg = (struct ocfs2_group_desc *)gd_bh->b_data;
 503
 504	/*
 505	 * moving goal is not allowd to start with a group desc blok(#0 blk)
 506	 * let's compromise to the latter cluster.
 507	 */
 508	if (range->me_goal == le64_to_cpu(bg->bg_blkno))
 509		range->me_goal += c_to_b;
 510
 511	/*
 512	 * movement is not gonna cross two groups.
 513	 */
 514	if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
 515								range->me_len) {
 516		ret = -EINVAL;
 517		goto out;
 518	}
 519	/*
 520	 * more exact validations/adjustments will be performed later during
 521	 * moving operation for each extent range.
 522	 */
 523	mlog(0, "extents get ready to be moved to #%llu block\n",
 524	     range->me_goal);
 525
 526out:
 527	brelse(gd_bh);
 528
 529	return ret;
 530}
 531
 532static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
 533				    int *goal_bit, u32 move_len, u32 max_hop,
 534				    u32 *phys_cpos)
 535{
 536	int i, used, last_free_bits = 0, base_bit = *goal_bit;
 537	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 538	u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
 539						 le64_to_cpu(gd->bg_blkno));
 540
 541	for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
 542
 543		used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
 544		if (used) {
 545			/*
 546			 * we even tried searching the free chunk by jumping
 547			 * a 'max_hop' distance, but still failed.
 548			 */
 549			if ((i - base_bit) > max_hop) {
 550				*phys_cpos = 0;
 551				break;
 552			}
 553
 554			if (last_free_bits)
 555				last_free_bits = 0;
 556
 557			continue;
 558		} else
 559			last_free_bits++;
 560
 561		if (last_free_bits == move_len) {
 
 562			*goal_bit = i;
 563			*phys_cpos = base_cpos + i;
 564			break;
 565		}
 566	}
 567
 568	mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
 569}
 570
 571static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
 572			     u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
 573			     u32 len, int ext_flags)
 574{
 575	int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
 576	handle_t *handle;
 577	struct inode *inode = context->inode;
 578	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 579	struct inode *tl_inode = osb->osb_tl_inode;
 580	struct inode *gb_inode = NULL;
 581	struct buffer_head *gb_bh = NULL;
 582	struct buffer_head *gd_bh = NULL;
 583	struct ocfs2_group_desc *gd;
 584	struct ocfs2_refcount_tree *ref_tree = NULL;
 585	u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
 586						    context->range->me_threshold);
 587	u64 phys_blkno, new_phys_blkno;
 588
 589	phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
 590
 591	if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
 592		BUG_ON(!ocfs2_is_refcount_inode(inode));
 593		BUG_ON(!context->refcount_loc);
 594
 595		ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
 596					       &ref_tree, NULL);
 597		if (ret) {
 598			mlog_errno(ret);
 599			return ret;
 600		}
 601
 602		ret = ocfs2_prepare_refcount_change_for_del(inode,
 603							context->refcount_loc,
 604							phys_blkno,
 605							len,
 606							&credits,
 607							&extra_blocks);
 608		if (ret) {
 609			mlog_errno(ret);
 610			goto out;
 611		}
 612	}
 613
 614	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
 615						len, 1,
 616						&context->meta_ac,
 617						extra_blocks, &credits);
 618	if (ret) {
 619		mlog_errno(ret);
 620		goto out;
 621	}
 622
 623	/*
 624	 * need to count 2 extra credits for global_bitmap inode and
 625	 * group descriptor.
 626	 */
 627	credits += OCFS2_INODE_UPDATE_CREDITS + 1;
 628
 629	/*
 630	 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
 631	 * logic, while we still need to lock the global_bitmap.
 632	 */
 633	gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
 634					       OCFS2_INVALID_SLOT);
 635	if (!gb_inode) {
 636		mlog(ML_ERROR, "unable to get global_bitmap inode\n");
 637		ret = -EIO;
 638		goto out;
 639	}
 640
 641	inode_lock(gb_inode);
 642
 643	ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
 644	if (ret) {
 645		mlog_errno(ret);
 646		goto out_unlock_gb_mutex;
 647	}
 648
 649	inode_lock(tl_inode);
 650
 651	handle = ocfs2_start_trans(osb, credits);
 652	if (IS_ERR(handle)) {
 653		ret = PTR_ERR(handle);
 654		mlog_errno(ret);
 655		goto out_unlock_tl_inode;
 656	}
 657
 658	new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
 659	ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
 660					    GLOBAL_BITMAP_SYSTEM_INODE,
 661					    OCFS2_INVALID_SLOT,
 662					    &goal_bit, &gd_bh);
 663	if (ret) {
 664		mlog_errno(ret);
 665		goto out_commit;
 666	}
 667
 668	/*
 669	 * probe the victim cluster group to find a proper
 670	 * region to fit wanted movement, it even will perfrom
 671	 * a best-effort attempt by compromising to a threshold
 672	 * around the goal.
 673	 */
 674	ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
 675				new_phys_cpos);
 676	if (!*new_phys_cpos) {
 677		ret = -ENOSPC;
 678		goto out_commit;
 679	}
 680
 681	ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
 682				  *new_phys_cpos, ext_flags);
 683	if (ret) {
 684		mlog_errno(ret);
 685		goto out_commit;
 686	}
 687
 688	gd = (struct ocfs2_group_desc *)gd_bh->b_data;
 689	ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
 690					       le16_to_cpu(gd->bg_chain));
 691	if (ret) {
 692		mlog_errno(ret);
 693		goto out_commit;
 694	}
 695
 696	ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
 697					 goal_bit, len);
 698	if (ret) {
 699		ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
 700					       le16_to_cpu(gd->bg_chain));
 701		mlog_errno(ret);
 702	}
 703
 704	/*
 705	 * Here we should write the new page out first if we are
 706	 * in write-back mode.
 707	 */
 708	ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
 709	if (ret)
 710		mlog_errno(ret);
 711
 712out_commit:
 713	ocfs2_commit_trans(osb, handle);
 714	brelse(gd_bh);
 715
 716out_unlock_tl_inode:
 717	inode_unlock(tl_inode);
 718
 719	ocfs2_inode_unlock(gb_inode, 1);
 720out_unlock_gb_mutex:
 721	inode_unlock(gb_inode);
 722	brelse(gb_bh);
 723	iput(gb_inode);
 724
 725out:
 726	if (context->meta_ac) {
 727		ocfs2_free_alloc_context(context->meta_ac);
 728		context->meta_ac = NULL;
 729	}
 730
 731	if (ref_tree)
 732		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 733
 734	return ret;
 735}
 736
 737/*
 738 * Helper to calculate the defraging length in one run according to threshold.
 739 */
 740static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
 741					 u32 threshold, int *skip)
 742{
 743	if ((*alloc_size + *len_defraged) < threshold) {
 744		/*
 745		 * proceed defragmentation until we meet the thresh
 746		 */
 747		*len_defraged += *alloc_size;
 748	} else if (*len_defraged == 0) {
 749		/*
 750		 * XXX: skip a large extent.
 751		 */
 752		*skip = 1;
 753	} else {
 754		/*
 755		 * split this extent to coalesce with former pieces as
 756		 * to reach the threshold.
 757		 *
 758		 * we're done here with one cycle of defragmentation
 759		 * in a size of 'thresh', resetting 'len_defraged'
 760		 * forces a new defragmentation.
 761		 */
 762		*alloc_size = threshold - *len_defraged;
 763		*len_defraged = 0;
 764	}
 765}
 766
 767static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
 768				struct ocfs2_move_extents_context *context)
 769{
 770	int ret = 0, flags, do_defrag, skip = 0;
 771	u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
 772	u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
 773
 774	struct inode *inode = context->inode;
 775	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 776	struct ocfs2_move_extents *range = context->range;
 777	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 778
 779	if ((i_size_read(inode) == 0) || (range->me_len == 0))
 780		return 0;
 781
 782	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 783		return 0;
 784
 785	context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
 786
 787	ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
 788	ocfs2_init_dealloc_ctxt(&context->dealloc);
 789
 790	/*
 791	 * TO-DO XXX:
 792	 *
 793	 * - xattr extents.
 794	 */
 795
 796	do_defrag = context->auto_defrag;
 797
 798	/*
 799	 * extents moving happens in unit of clusters, for the sake
 800	 * of simplicity, we may ignore two clusters where 'byte_start'
 801	 * and 'byte_start + len' were within.
 802	 */
 803	move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
 804	len_to_move = (range->me_start + range->me_len) >>
 805						osb->s_clustersize_bits;
 806	if (len_to_move >= move_start)
 807		len_to_move -= move_start;
 808	else
 809		len_to_move = 0;
 810
 811	if (do_defrag) {
 812		defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
 813		if (defrag_thresh <= 1)
 814			goto done;
 815	} else
 816		new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
 817							 range->me_goal);
 818
 819	mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
 820	     "thresh: %u\n",
 821	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
 822	     (unsigned long long)range->me_start,
 823	     (unsigned long long)range->me_len,
 824	     move_start, len_to_move, defrag_thresh);
 825
 826	cpos = move_start;
 827	while (len_to_move) {
 828		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
 829					 &flags);
 830		if (ret) {
 831			mlog_errno(ret);
 832			goto out;
 833		}
 834
 835		if (alloc_size > len_to_move)
 836			alloc_size = len_to_move;
 837
 838		/*
 839		 * XXX: how to deal with a hole:
 840		 *
 841		 * - skip the hole of course
 842		 * - force a new defragmentation
 843		 */
 844		if (!phys_cpos) {
 845			if (do_defrag)
 846				len_defraged = 0;
 847
 848			goto next;
 849		}
 850
 851		if (do_defrag) {
 852			ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
 853						     defrag_thresh, &skip);
 854			/*
 855			 * skip large extents
 856			 */
 857			if (skip) {
 858				skip = 0;
 859				goto next;
 860			}
 861
 862			mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
 863			     "alloc_size: %u, len_defraged: %u\n",
 864			     cpos, phys_cpos, alloc_size, len_defraged);
 865
 866			ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
 867						  &alloc_size, flags);
 868		} else {
 869			ret = ocfs2_move_extent(context, cpos, phys_cpos,
 870						&new_phys_cpos, alloc_size,
 871						flags);
 872
 873			new_phys_cpos += alloc_size;
 874		}
 875
 876		if (ret < 0) {
 877			mlog_errno(ret);
 878			goto out;
 879		}
 880
 881		context->clusters_moved += alloc_size;
 882next:
 883		cpos += alloc_size;
 884		len_to_move -= alloc_size;
 885	}
 886
 887done:
 888	range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
 889
 890out:
 891	range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
 892						      context->clusters_moved);
 893	range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
 894						       context->new_phys_cpos);
 895
 896	ocfs2_schedule_truncate_log_flush(osb, 1);
 897	ocfs2_run_deallocs(osb, &context->dealloc);
 898
 899	return ret;
 900}
 901
 902static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
 903{
 904	int status;
 905	handle_t *handle;
 906	struct inode *inode = context->inode;
 907	struct ocfs2_dinode *di;
 908	struct buffer_head *di_bh = NULL;
 909	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 910
 911	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 912		return -EROFS;
 913
 914	inode_lock(inode);
 915
 916	/*
 917	 * This prevents concurrent writes from other nodes
 918	 */
 919	status = ocfs2_rw_lock(inode, 1);
 920	if (status) {
 921		mlog_errno(status);
 922		goto out;
 923	}
 924
 925	status = ocfs2_inode_lock(inode, &di_bh, 1);
 926	if (status) {
 927		mlog_errno(status);
 928		goto out_rw_unlock;
 929	}
 930
 931	/*
 932	 * rememer ip_xattr_sem also needs to be held if necessary
 933	 */
 934	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 935
 936	status = __ocfs2_move_extents_range(di_bh, context);
 937
 938	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 939	if (status) {
 940		mlog_errno(status);
 941		goto out_inode_unlock;
 942	}
 943
 944	/*
 945	 * We update ctime for these changes
 946	 */
 947	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 948	if (IS_ERR(handle)) {
 949		status = PTR_ERR(handle);
 950		mlog_errno(status);
 951		goto out_inode_unlock;
 952	}
 953
 954	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 955					 OCFS2_JOURNAL_ACCESS_WRITE);
 956	if (status) {
 957		mlog_errno(status);
 958		goto out_commit;
 959	}
 960
 961	di = (struct ocfs2_dinode *)di_bh->b_data;
 962	inode->i_ctime = current_time(inode);
 963	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 964	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 965	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 966
 967	ocfs2_journal_dirty(handle, di_bh);
 968
 969out_commit:
 970	ocfs2_commit_trans(osb, handle);
 971
 972out_inode_unlock:
 973	brelse(di_bh);
 974	ocfs2_inode_unlock(inode, 1);
 975out_rw_unlock:
 976	ocfs2_rw_unlock(inode, 1);
 977out:
 978	inode_unlock(inode);
 979
 980	return status;
 981}
 982
 983int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
 984{
 985	int status;
 986
 987	struct inode *inode = file_inode(filp);
 988	struct ocfs2_move_extents range;
 989	struct ocfs2_move_extents_context *context;
 990
 991	if (!argp)
 992		return -EINVAL;
 993
 994	status = mnt_want_write_file(filp);
 995	if (status)
 996		return status;
 997
 998	if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
 999		status = -EPERM;
1000		goto out_drop;
1001	}
1002
1003	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1004		status = -EPERM;
1005		goto out_drop;
1006	}
1007
1008	context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
1009	if (!context) {
1010		status = -ENOMEM;
1011		mlog_errno(status);
1012		goto out_drop;
1013	}
1014
1015	context->inode = inode;
1016	context->file = filp;
1017
1018	if (copy_from_user(&range, argp, sizeof(range))) {
1019		status = -EFAULT;
1020		goto out_free;
1021	}
1022
1023	if (range.me_start > i_size_read(inode)) {
1024		status = -EINVAL;
1025		goto out_free;
1026	}
1027
1028	if (range.me_start + range.me_len > i_size_read(inode))
1029			range.me_len = i_size_read(inode) - range.me_start;
1030
1031	context->range = &range;
1032
 
 
 
 
 
 
 
 
 
 
 
1033	if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
1034		context->auto_defrag = 1;
1035		/*
1036		 * ok, the default theshold for the defragmentation
1037		 * is 1M, since our maximum clustersize was 1M also.
1038		 * any thought?
1039		 */
1040		if (!range.me_threshold)
1041			range.me_threshold = 1024 * 1024;
1042
1043		if (range.me_threshold > i_size_read(inode))
1044			range.me_threshold = i_size_read(inode);
1045
1046		if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
1047			context->partial = 1;
1048	} else {
1049		/*
1050		 * first best-effort attempt to validate and adjust the goal
1051		 * (physical address in block), while it can't guarantee later
1052		 * operation can succeed all the time since global_bitmap may
1053		 * change a bit over time.
1054		 */
1055
1056		status = ocfs2_validate_and_adjust_move_goal(inode, &range);
1057		if (status)
1058			goto out_copy;
1059	}
1060
1061	status = ocfs2_move_extents(context);
1062	if (status)
1063		mlog_errno(status);
1064out_copy:
1065	/*
1066	 * movement/defragmentation may end up being partially completed,
1067	 * that's the reason why we need to return userspace the finished
1068	 * length and new_offset even if failure happens somewhere.
1069	 */
1070	if (copy_to_user(argp, &range, sizeof(range)))
1071		status = -EFAULT;
1072
1073out_free:
1074	kfree(context);
1075out_drop:
1076	mnt_drop_write_file(filp);
1077
1078	return status;
1079}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * move_extents.c
   4 *
   5 * Copyright (C) 2011 Oracle.  All rights reserved.
   6 */
   7#include <linux/fs.h>
   8#include <linux/types.h>
   9#include <linux/mount.h>
  10#include <linux/swap.h>
  11
  12#include <cluster/masklog.h>
  13
  14#include "ocfs2.h"
  15#include "ocfs2_ioctl.h"
  16
  17#include "alloc.h"
  18#include "localalloc.h"
  19#include "aops.h"
  20#include "dlmglue.h"
  21#include "extent_map.h"
  22#include "inode.h"
  23#include "journal.h"
  24#include "suballoc.h"
  25#include "uptodate.h"
  26#include "super.h"
  27#include "dir.h"
  28#include "buffer_head_io.h"
  29#include "sysfile.h"
  30#include "refcounttree.h"
  31#include "move_extents.h"
  32
  33struct ocfs2_move_extents_context {
  34	struct inode *inode;
  35	struct file *file;
  36	int auto_defrag;
  37	int partial;
  38	int credits;
  39	u32 new_phys_cpos;
  40	u32 clusters_moved;
  41	u64 refcount_loc;
  42	struct ocfs2_move_extents *range;
  43	struct ocfs2_extent_tree et;
  44	struct ocfs2_alloc_context *meta_ac;
  45	struct ocfs2_alloc_context *data_ac;
  46	struct ocfs2_cached_dealloc_ctxt dealloc;
  47};
  48
  49static int __ocfs2_move_extent(handle_t *handle,
  50			       struct ocfs2_move_extents_context *context,
  51			       u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
  52			       int ext_flags)
  53{
  54	int ret = 0, index;
  55	struct inode *inode = context->inode;
  56	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  57	struct ocfs2_extent_rec *rec, replace_rec;
  58	struct ocfs2_path *path = NULL;
  59	struct ocfs2_extent_list *el;
  60	u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
  61	u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
  62
  63	ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
  64					       p_cpos, new_p_cpos, len);
  65	if (ret) {
  66		mlog_errno(ret);
  67		goto out;
  68	}
  69
  70	memset(&replace_rec, 0, sizeof(replace_rec));
  71	replace_rec.e_cpos = cpu_to_le32(cpos);
  72	replace_rec.e_leaf_clusters = cpu_to_le16(len);
  73	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  74								   new_p_cpos));
  75
  76	path = ocfs2_new_path_from_et(&context->et);
  77	if (!path) {
  78		ret = -ENOMEM;
  79		mlog_errno(ret);
  80		goto out;
  81	}
  82
  83	ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
  84	if (ret) {
  85		mlog_errno(ret);
  86		goto out;
  87	}
  88
  89	el = path_leaf_el(path);
  90
  91	index = ocfs2_search_extent_list(el, cpos);
  92	if (index == -1) {
  93		ret = ocfs2_error(inode->i_sb,
  94				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
  95				  (unsigned long long)ino, cpos);
  96		goto out;
  97	}
  98
  99	rec = &el->l_recs[index];
 100
 101	BUG_ON(ext_flags != rec->e_flags);
 102	/*
 103	 * after moving/defraging to new location, the extent is not going
 104	 * to be refcounted anymore.
 105	 */
 106	replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
 107
 
 
 
 
 
 
 
 
 108	ret = ocfs2_split_extent(handle, &context->et, path, index,
 109				 &replace_rec, context->meta_ac,
 110				 &context->dealloc);
 111	if (ret) {
 112		mlog_errno(ret);
 113		goto out;
 114	}
 115
 
 
 116	context->new_phys_cpos = new_p_cpos;
 117
 118	/*
 119	 * need I to append truncate log for old clusters?
 120	 */
 121	if (old_blkno) {
 122		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 123			ret = ocfs2_decrease_refcount(inode, handle,
 124					ocfs2_blocks_to_clusters(osb->sb,
 125								 old_blkno),
 126					len, context->meta_ac,
 127					&context->dealloc, 1);
 128		else
 129			ret = ocfs2_truncate_log_append(osb, handle,
 130							old_blkno, len);
 131	}
 132
 133	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 134out:
 135	ocfs2_free_path(path);
 136	return ret;
 137}
 138
 139/*
 140 * lock allocator, and reserve appropriate number of bits for
 141 * meta blocks.
 142 */
 143static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
 144					struct ocfs2_extent_tree *et,
 145					u32 clusters_to_move,
 146					u32 extents_to_split,
 147					struct ocfs2_alloc_context **meta_ac,
 148					int extra_blocks,
 149					int *credits)
 150{
 151	int ret, num_free_extents;
 152	unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
 153	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 154
 155	num_free_extents = ocfs2_num_free_extents(et);
 156	if (num_free_extents < 0) {
 157		ret = num_free_extents;
 158		mlog_errno(ret);
 159		goto out;
 160	}
 161
 162	if (!num_free_extents ||
 163	    (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
 164		extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
 165
 166	ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
 167	if (ret) {
 168		mlog_errno(ret);
 169		goto out;
 170	}
 171
 172
 173	*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
 174
 175	mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
 176	     extra_blocks, clusters_to_move, *credits);
 177out:
 178	if (ret) {
 179		if (*meta_ac) {
 180			ocfs2_free_alloc_context(*meta_ac);
 181			*meta_ac = NULL;
 182		}
 183	}
 184
 185	return ret;
 186}
 187
 188/*
 189 * Using one journal handle to guarantee the data consistency in case
 190 * crash happens anywhere.
 191 *
 192 *  XXX: defrag can end up with finishing partial extent as requested,
 193 * due to not enough contiguous clusters can be found in allocator.
 194 */
 195static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
 196			       u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
 197{
 198	int ret, credits = 0, extra_blocks = 0, partial = context->partial;
 199	handle_t *handle;
 200	struct inode *inode = context->inode;
 201	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 202	struct inode *tl_inode = osb->osb_tl_inode;
 203	struct ocfs2_refcount_tree *ref_tree = NULL;
 204	u32 new_phys_cpos, new_len;
 205	u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
 206	int need_free = 0;
 207
 208	if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
 209		BUG_ON(!ocfs2_is_refcount_inode(inode));
 210		BUG_ON(!context->refcount_loc);
 211
 212		ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
 213					       &ref_tree, NULL);
 214		if (ret) {
 215			mlog_errno(ret);
 216			return ret;
 217		}
 218
 219		ret = ocfs2_prepare_refcount_change_for_del(inode,
 220							context->refcount_loc,
 221							phys_blkno,
 222							*len,
 223							&credits,
 224							&extra_blocks);
 225		if (ret) {
 226			mlog_errno(ret);
 227			goto out;
 228		}
 229	}
 230
 231	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
 232						*len, 1,
 233						&context->meta_ac,
 234						extra_blocks, &credits);
 235	if (ret) {
 236		mlog_errno(ret);
 237		goto out;
 238	}
 239
 240	/*
 241	 * should be using allocation reservation strategy there?
 242	 *
 243	 * if (context->data_ac)
 244	 *	context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
 245	 */
 246
 247	inode_lock(tl_inode);
 248
 249	if (ocfs2_truncate_log_needs_flush(osb)) {
 250		ret = __ocfs2_flush_truncate_log(osb);
 251		if (ret < 0) {
 252			mlog_errno(ret);
 253			goto out_unlock_mutex;
 254		}
 255	}
 256
 257	/*
 258	 * Make sure ocfs2_reserve_cluster is called after
 259	 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
 260	 *
 261	 * If ocfs2_reserve_cluster is called
 262	 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
 263	 * may happen.
 264	 *
 265	 */
 266	ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
 267	if (ret) {
 268		mlog_errno(ret);
 269		goto out_unlock_mutex;
 270	}
 271
 272	handle = ocfs2_start_trans(osb, credits);
 273	if (IS_ERR(handle)) {
 274		ret = PTR_ERR(handle);
 275		mlog_errno(ret);
 276		goto out_unlock_mutex;
 277	}
 278
 279	ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
 280				     &new_phys_cpos, &new_len);
 281	if (ret) {
 282		mlog_errno(ret);
 283		goto out_commit;
 284	}
 285
 286	/*
 287	 * allowing partial extent moving is kind of 'pros and cons', it makes
 288	 * whole defragmentation less likely to fail, on the contrary, the bad
 289	 * thing is it may make the fs even more fragmented after moving, let
 290	 * userspace make a good decision here.
 291	 */
 292	if (new_len != *len) {
 293		mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
 294		if (!partial) {
 295			context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
 296			ret = -ENOSPC;
 297			need_free = 1;
 298			goto out_commit;
 299		}
 300	}
 301
 302	mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
 303	     phys_cpos, new_phys_cpos);
 304
 305	ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
 306				  new_phys_cpos, ext_flags);
 307	if (ret)
 308		mlog_errno(ret);
 309
 310	if (partial && (new_len != *len))
 311		*len = new_len;
 312
 313	/*
 314	 * Here we should write the new page out first if we are
 315	 * in write-back mode.
 316	 */
 317	ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
 318	if (ret)
 319		mlog_errno(ret);
 320
 321out_commit:
 322	if (need_free && context->data_ac) {
 323		struct ocfs2_alloc_context *data_ac = context->data_ac;
 324
 325		if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
 326			ocfs2_free_local_alloc_bits(osb, handle, data_ac,
 327					new_phys_cpos, new_len);
 328		else
 329			ocfs2_free_clusters(handle,
 330					data_ac->ac_inode,
 331					data_ac->ac_bh,
 332					ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
 333					new_len);
 334	}
 335
 336	ocfs2_commit_trans(osb, handle);
 337
 338out_unlock_mutex:
 339	inode_unlock(tl_inode);
 340
 341	if (context->data_ac) {
 342		ocfs2_free_alloc_context(context->data_ac);
 343		context->data_ac = NULL;
 344	}
 345
 346	if (context->meta_ac) {
 347		ocfs2_free_alloc_context(context->meta_ac);
 348		context->meta_ac = NULL;
 349	}
 350
 351out:
 352	if (ref_tree)
 353		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 354
 355	return ret;
 356}
 357
 358/*
 359 * find the victim alloc group, where #blkno fits.
 360 */
 361static int ocfs2_find_victim_alloc_group(struct inode *inode,
 362					 u64 vict_blkno,
 363					 int type, int slot,
 364					 int *vict_bit,
 365					 struct buffer_head **ret_bh)
 366{
 367	int ret, i, bits_per_unit = 0;
 368	u64 blkno;
 369	char namebuf[40];
 370
 371	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 372	struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
 373	struct ocfs2_chain_list *cl;
 374	struct ocfs2_chain_rec *rec;
 375	struct ocfs2_dinode *ac_dinode;
 376	struct ocfs2_group_desc *bg;
 377
 378	ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
 379	ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
 380					 strlen(namebuf), &blkno);
 381	if (ret) {
 382		ret = -ENOENT;
 383		goto out;
 384	}
 385
 386	ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
 387	if (ret) {
 388		mlog_errno(ret);
 389		goto out;
 390	}
 391
 392	ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
 393	cl = &(ac_dinode->id2.i_chain);
 394	rec = &(cl->cl_recs[0]);
 395
 396	if (type == GLOBAL_BITMAP_SYSTEM_INODE)
 397		bits_per_unit = osb->s_clustersize_bits -
 398					inode->i_sb->s_blocksize_bits;
 399	/*
 400	 * 'vict_blkno' was out of the valid range.
 401	 */
 402	if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
 403	    (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
 404				bits_per_unit))) {
 405		ret = -EINVAL;
 406		goto out;
 407	}
 408
 409	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
 410
 411		rec = &(cl->cl_recs[i]);
 412		if (!rec)
 413			continue;
 414
 415		bg = NULL;
 416
 417		do {
 418			if (!bg)
 419				blkno = le64_to_cpu(rec->c_blkno);
 420			else
 421				blkno = le64_to_cpu(bg->bg_next_group);
 422
 423			if (gd_bh) {
 424				brelse(gd_bh);
 425				gd_bh = NULL;
 426			}
 427
 428			ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
 429			if (ret) {
 430				mlog_errno(ret);
 431				goto out;
 432			}
 433
 434			bg = (struct ocfs2_group_desc *)gd_bh->b_data;
 435
 436			if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
 437						(le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
 438
 439				*ret_bh = gd_bh;
 440				*vict_bit = (vict_blkno - blkno) >>
 441							bits_per_unit;
 442				mlog(0, "find the victim group: #%llu, "
 443				     "total_bits: %u, vict_bit: %u\n",
 444				     blkno, le16_to_cpu(bg->bg_bits),
 445				     *vict_bit);
 446				goto out;
 447			}
 448
 449		} while (le64_to_cpu(bg->bg_next_group));
 450	}
 451
 452	ret = -EINVAL;
 453out:
 454	brelse(ac_bh);
 455
 456	/*
 457	 * caller has to release the gd_bh properly.
 458	 */
 459	return ret;
 460}
 461
 462/*
 463 * XXX: helper to validate and adjust moving goal.
 464 */
 465static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
 466					       struct ocfs2_move_extents *range)
 467{
 468	int ret, goal_bit = 0;
 469
 470	struct buffer_head *gd_bh = NULL;
 471	struct ocfs2_group_desc *bg;
 472	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 473	int c_to_b = 1 << (osb->s_clustersize_bits -
 474					inode->i_sb->s_blocksize_bits);
 475
 476	/*
 477	 * make goal become cluster aligned.
 478	 */
 479	range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
 480						      range->me_goal);
 481	/*
 482	 * validate goal sits within global_bitmap, and return the victim
 483	 * group desc
 484	 */
 485	ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
 486					    GLOBAL_BITMAP_SYSTEM_INODE,
 487					    OCFS2_INVALID_SLOT,
 488					    &goal_bit, &gd_bh);
 489	if (ret)
 490		goto out;
 491
 492	bg = (struct ocfs2_group_desc *)gd_bh->b_data;
 493
 494	/*
 495	 * moving goal is not allowd to start with a group desc blok(#0 blk)
 496	 * let's compromise to the latter cluster.
 497	 */
 498	if (range->me_goal == le64_to_cpu(bg->bg_blkno))
 499		range->me_goal += c_to_b;
 500
 501	/*
 502	 * movement is not gonna cross two groups.
 503	 */
 504	if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
 505								range->me_len) {
 506		ret = -EINVAL;
 507		goto out;
 508	}
 509	/*
 510	 * more exact validations/adjustments will be performed later during
 511	 * moving operation for each extent range.
 512	 */
 513	mlog(0, "extents get ready to be moved to #%llu block\n",
 514	     range->me_goal);
 515
 516out:
 517	brelse(gd_bh);
 518
 519	return ret;
 520}
 521
 522static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
 523				    int *goal_bit, u32 move_len, u32 max_hop,
 524				    u32 *phys_cpos)
 525{
 526	int i, used, last_free_bits = 0, base_bit = *goal_bit;
 527	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 528	u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
 529						 le64_to_cpu(gd->bg_blkno));
 530
 531	for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
 532
 533		used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
 534		if (used) {
 535			/*
 536			 * we even tried searching the free chunk by jumping
 537			 * a 'max_hop' distance, but still failed.
 538			 */
 539			if ((i - base_bit) > max_hop) {
 540				*phys_cpos = 0;
 541				break;
 542			}
 543
 544			if (last_free_bits)
 545				last_free_bits = 0;
 546
 547			continue;
 548		} else
 549			last_free_bits++;
 550
 551		if (last_free_bits == move_len) {
 552			i -= move_len;
 553			*goal_bit = i;
 554			*phys_cpos = base_cpos + i;
 555			break;
 556		}
 557	}
 558
 559	mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
 560}
 561
 562static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
 563			     u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
 564			     u32 len, int ext_flags)
 565{
 566	int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
 567	handle_t *handle;
 568	struct inode *inode = context->inode;
 569	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 570	struct inode *tl_inode = osb->osb_tl_inode;
 571	struct inode *gb_inode = NULL;
 572	struct buffer_head *gb_bh = NULL;
 573	struct buffer_head *gd_bh = NULL;
 574	struct ocfs2_group_desc *gd;
 575	struct ocfs2_refcount_tree *ref_tree = NULL;
 576	u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
 577						    context->range->me_threshold);
 578	u64 phys_blkno, new_phys_blkno;
 579
 580	phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
 581
 582	if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
 583		BUG_ON(!ocfs2_is_refcount_inode(inode));
 584		BUG_ON(!context->refcount_loc);
 585
 586		ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
 587					       &ref_tree, NULL);
 588		if (ret) {
 589			mlog_errno(ret);
 590			return ret;
 591		}
 592
 593		ret = ocfs2_prepare_refcount_change_for_del(inode,
 594							context->refcount_loc,
 595							phys_blkno,
 596							len,
 597							&credits,
 598							&extra_blocks);
 599		if (ret) {
 600			mlog_errno(ret);
 601			goto out;
 602		}
 603	}
 604
 605	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
 606						len, 1,
 607						&context->meta_ac,
 608						extra_blocks, &credits);
 609	if (ret) {
 610		mlog_errno(ret);
 611		goto out;
 612	}
 613
 614	/*
 615	 * need to count 2 extra credits for global_bitmap inode and
 616	 * group descriptor.
 617	 */
 618	credits += OCFS2_INODE_UPDATE_CREDITS + 1;
 619
 620	/*
 621	 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
 622	 * logic, while we still need to lock the global_bitmap.
 623	 */
 624	gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
 625					       OCFS2_INVALID_SLOT);
 626	if (!gb_inode) {
 627		mlog(ML_ERROR, "unable to get global_bitmap inode\n");
 628		ret = -EIO;
 629		goto out;
 630	}
 631
 632	inode_lock(gb_inode);
 633
 634	ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
 635	if (ret) {
 636		mlog_errno(ret);
 637		goto out_unlock_gb_mutex;
 638	}
 639
 640	inode_lock(tl_inode);
 641
 642	handle = ocfs2_start_trans(osb, credits);
 643	if (IS_ERR(handle)) {
 644		ret = PTR_ERR(handle);
 645		mlog_errno(ret);
 646		goto out_unlock_tl_inode;
 647	}
 648
 649	new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
 650	ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
 651					    GLOBAL_BITMAP_SYSTEM_INODE,
 652					    OCFS2_INVALID_SLOT,
 653					    &goal_bit, &gd_bh);
 654	if (ret) {
 655		mlog_errno(ret);
 656		goto out_commit;
 657	}
 658
 659	/*
 660	 * probe the victim cluster group to find a proper
 661	 * region to fit wanted movement, it even will perfrom
 662	 * a best-effort attempt by compromising to a threshold
 663	 * around the goal.
 664	 */
 665	ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
 666				new_phys_cpos);
 667	if (!*new_phys_cpos) {
 668		ret = -ENOSPC;
 669		goto out_commit;
 670	}
 671
 672	ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
 673				  *new_phys_cpos, ext_flags);
 674	if (ret) {
 675		mlog_errno(ret);
 676		goto out_commit;
 677	}
 678
 679	gd = (struct ocfs2_group_desc *)gd_bh->b_data;
 680	ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
 681					       le16_to_cpu(gd->bg_chain));
 682	if (ret) {
 683		mlog_errno(ret);
 684		goto out_commit;
 685	}
 686
 687	ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
 688					 goal_bit, len);
 689	if (ret) {
 690		ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
 691					       le16_to_cpu(gd->bg_chain));
 692		mlog_errno(ret);
 693	}
 694
 695	/*
 696	 * Here we should write the new page out first if we are
 697	 * in write-back mode.
 698	 */
 699	ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
 700	if (ret)
 701		mlog_errno(ret);
 702
 703out_commit:
 704	ocfs2_commit_trans(osb, handle);
 705	brelse(gd_bh);
 706
 707out_unlock_tl_inode:
 708	inode_unlock(tl_inode);
 709
 710	ocfs2_inode_unlock(gb_inode, 1);
 711out_unlock_gb_mutex:
 712	inode_unlock(gb_inode);
 713	brelse(gb_bh);
 714	iput(gb_inode);
 715
 716out:
 717	if (context->meta_ac) {
 718		ocfs2_free_alloc_context(context->meta_ac);
 719		context->meta_ac = NULL;
 720	}
 721
 722	if (ref_tree)
 723		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
 724
 725	return ret;
 726}
 727
 728/*
 729 * Helper to calculate the defraging length in one run according to threshold.
 730 */
 731static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
 732					 u32 threshold, int *skip)
 733{
 734	if ((*alloc_size + *len_defraged) < threshold) {
 735		/*
 736		 * proceed defragmentation until we meet the thresh
 737		 */
 738		*len_defraged += *alloc_size;
 739	} else if (*len_defraged == 0) {
 740		/*
 741		 * XXX: skip a large extent.
 742		 */
 743		*skip = 1;
 744	} else {
 745		/*
 746		 * split this extent to coalesce with former pieces as
 747		 * to reach the threshold.
 748		 *
 749		 * we're done here with one cycle of defragmentation
 750		 * in a size of 'thresh', resetting 'len_defraged'
 751		 * forces a new defragmentation.
 752		 */
 753		*alloc_size = threshold - *len_defraged;
 754		*len_defraged = 0;
 755	}
 756}
 757
 758static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
 759				struct ocfs2_move_extents_context *context)
 760{
 761	int ret = 0, flags, do_defrag, skip = 0;
 762	u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
 763	u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
 764
 765	struct inode *inode = context->inode;
 766	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 767	struct ocfs2_move_extents *range = context->range;
 768	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 769
 770	if ((i_size_read(inode) == 0) || (range->me_len == 0))
 771		return 0;
 772
 773	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 774		return 0;
 775
 776	context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
 777
 778	ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
 779	ocfs2_init_dealloc_ctxt(&context->dealloc);
 780
 781	/*
 782	 * TO-DO XXX:
 783	 *
 784	 * - xattr extents.
 785	 */
 786
 787	do_defrag = context->auto_defrag;
 788
 789	/*
 790	 * extents moving happens in unit of clusters, for the sake
 791	 * of simplicity, we may ignore two clusters where 'byte_start'
 792	 * and 'byte_start + len' were within.
 793	 */
 794	move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
 795	len_to_move = (range->me_start + range->me_len) >>
 796						osb->s_clustersize_bits;
 797	if (len_to_move >= move_start)
 798		len_to_move -= move_start;
 799	else
 800		len_to_move = 0;
 801
 802	if (do_defrag) {
 803		defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
 804		if (defrag_thresh <= 1)
 805			goto done;
 806	} else
 807		new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
 808							 range->me_goal);
 809
 810	mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
 811	     "thresh: %u\n",
 812	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
 813	     (unsigned long long)range->me_start,
 814	     (unsigned long long)range->me_len,
 815	     move_start, len_to_move, defrag_thresh);
 816
 817	cpos = move_start;
 818	while (len_to_move) {
 819		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
 820					 &flags);
 821		if (ret) {
 822			mlog_errno(ret);
 823			goto out;
 824		}
 825
 826		if (alloc_size > len_to_move)
 827			alloc_size = len_to_move;
 828
 829		/*
 830		 * XXX: how to deal with a hole:
 831		 *
 832		 * - skip the hole of course
 833		 * - force a new defragmentation
 834		 */
 835		if (!phys_cpos) {
 836			if (do_defrag)
 837				len_defraged = 0;
 838
 839			goto next;
 840		}
 841
 842		if (do_defrag) {
 843			ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
 844						     defrag_thresh, &skip);
 845			/*
 846			 * skip large extents
 847			 */
 848			if (skip) {
 849				skip = 0;
 850				goto next;
 851			}
 852
 853			mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
 854			     "alloc_size: %u, len_defraged: %u\n",
 855			     cpos, phys_cpos, alloc_size, len_defraged);
 856
 857			ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
 858						  &alloc_size, flags);
 859		} else {
 860			ret = ocfs2_move_extent(context, cpos, phys_cpos,
 861						&new_phys_cpos, alloc_size,
 862						flags);
 863
 864			new_phys_cpos += alloc_size;
 865		}
 866
 867		if (ret < 0) {
 868			mlog_errno(ret);
 869			goto out;
 870		}
 871
 872		context->clusters_moved += alloc_size;
 873next:
 874		cpos += alloc_size;
 875		len_to_move -= alloc_size;
 876	}
 877
 878done:
 879	range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
 880
 881out:
 882	range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
 883						      context->clusters_moved);
 884	range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
 885						       context->new_phys_cpos);
 886
 887	ocfs2_schedule_truncate_log_flush(osb, 1);
 888	ocfs2_run_deallocs(osb, &context->dealloc);
 889
 890	return ret;
 891}
 892
 893static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
 894{
 895	int status;
 896	handle_t *handle;
 897	struct inode *inode = context->inode;
 898	struct ocfs2_dinode *di;
 899	struct buffer_head *di_bh = NULL;
 900	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 901
 902	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 903		return -EROFS;
 904
 905	inode_lock(inode);
 906
 907	/*
 908	 * This prevents concurrent writes from other nodes
 909	 */
 910	status = ocfs2_rw_lock(inode, 1);
 911	if (status) {
 912		mlog_errno(status);
 913		goto out;
 914	}
 915
 916	status = ocfs2_inode_lock(inode, &di_bh, 1);
 917	if (status) {
 918		mlog_errno(status);
 919		goto out_rw_unlock;
 920	}
 921
 922	/*
 923	 * rememer ip_xattr_sem also needs to be held if necessary
 924	 */
 925	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 926
 927	status = __ocfs2_move_extents_range(di_bh, context);
 928
 929	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 930	if (status) {
 931		mlog_errno(status);
 932		goto out_inode_unlock;
 933	}
 934
 935	/*
 936	 * We update ctime for these changes
 937	 */
 938	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 939	if (IS_ERR(handle)) {
 940		status = PTR_ERR(handle);
 941		mlog_errno(status);
 942		goto out_inode_unlock;
 943	}
 944
 945	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 946					 OCFS2_JOURNAL_ACCESS_WRITE);
 947	if (status) {
 948		mlog_errno(status);
 949		goto out_commit;
 950	}
 951
 952	di = (struct ocfs2_dinode *)di_bh->b_data;
 953	inode_set_ctime_current(inode);
 954	di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
 955	di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
 956	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 957
 958	ocfs2_journal_dirty(handle, di_bh);
 959
 960out_commit:
 961	ocfs2_commit_trans(osb, handle);
 962
 963out_inode_unlock:
 964	brelse(di_bh);
 965	ocfs2_inode_unlock(inode, 1);
 966out_rw_unlock:
 967	ocfs2_rw_unlock(inode, 1);
 968out:
 969	inode_unlock(inode);
 970
 971	return status;
 972}
 973
 974int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
 975{
 976	int status;
 977
 978	struct inode *inode = file_inode(filp);
 979	struct ocfs2_move_extents range;
 980	struct ocfs2_move_extents_context *context;
 981
 982	if (!argp)
 983		return -EINVAL;
 984
 985	status = mnt_want_write_file(filp);
 986	if (status)
 987		return status;
 988
 989	if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
 990		status = -EPERM;
 991		goto out_drop;
 992	}
 993
 994	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
 995		status = -EPERM;
 996		goto out_drop;
 997	}
 998
 999	context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
1000	if (!context) {
1001		status = -ENOMEM;
1002		mlog_errno(status);
1003		goto out_drop;
1004	}
1005
1006	context->inode = inode;
1007	context->file = filp;
1008
1009	if (copy_from_user(&range, argp, sizeof(range))) {
1010		status = -EFAULT;
1011		goto out_free;
1012	}
1013
1014	if (range.me_start > i_size_read(inode)) {
1015		status = -EINVAL;
1016		goto out_free;
1017	}
1018
1019	if (range.me_start + range.me_len > i_size_read(inode))
1020			range.me_len = i_size_read(inode) - range.me_start;
1021
1022	context->range = &range;
1023
1024	/*
1025	 * ok, the default theshold for the defragmentation
1026	 * is 1M, since our maximum clustersize was 1M also.
1027	 * any thought?
1028	 */
1029	if (!range.me_threshold)
1030		range.me_threshold = 1024 * 1024;
1031
1032	if (range.me_threshold > i_size_read(inode))
1033		range.me_threshold = i_size_read(inode);
1034
1035	if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
1036		context->auto_defrag = 1;
 
 
 
 
 
 
 
 
 
 
1037
1038		if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
1039			context->partial = 1;
1040	} else {
1041		/*
1042		 * first best-effort attempt to validate and adjust the goal
1043		 * (physical address in block), while it can't guarantee later
1044		 * operation can succeed all the time since global_bitmap may
1045		 * change a bit over time.
1046		 */
1047
1048		status = ocfs2_validate_and_adjust_move_goal(inode, &range);
1049		if (status)
1050			goto out_copy;
1051	}
1052
1053	status = ocfs2_move_extents(context);
1054	if (status)
1055		mlog_errno(status);
1056out_copy:
1057	/*
1058	 * movement/defragmentation may end up being partially completed,
1059	 * that's the reason why we need to return userspace the finished
1060	 * length and new_offset even if failure happens somewhere.
1061	 */
1062	if (copy_to_user(argp, &range, sizeof(range)))
1063		status = -EFAULT;
1064
1065out_free:
1066	kfree(context);
1067out_drop:
1068	mnt_drop_write_file(filp);
1069
1070	return status;
1071}