Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* -*- mode: c; c-basic-offset: 8; -*-
   3 * vim: noexpandtab sw=8 ts=8 sts=0:
   4 *
   5 * file.c
   6 *
   7 * File open, close, extend, truncate
   8 *
   9 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
  10 */
  11
  12#include <linux/capability.h>
  13#include <linux/fs.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/highmem.h>
  17#include <linux/pagemap.h>
  18#include <linux/uio.h>
  19#include <linux/sched.h>
  20#include <linux/splice.h>
  21#include <linux/mount.h>
  22#include <linux/writeback.h>
  23#include <linux/falloc.h>
  24#include <linux/quotaops.h>
  25#include <linux/blkdev.h>
  26#include <linux/backing-dev.h>
  27
  28#include <cluster/masklog.h>
  29
  30#include "ocfs2.h"
  31
  32#include "alloc.h"
  33#include "aops.h"
  34#include "dir.h"
  35#include "dlmglue.h"
  36#include "extent_map.h"
  37#include "file.h"
  38#include "sysfile.h"
  39#include "inode.h"
  40#include "ioctl.h"
  41#include "journal.h"
  42#include "locks.h"
  43#include "mmap.h"
  44#include "suballoc.h"
  45#include "super.h"
  46#include "xattr.h"
  47#include "acl.h"
  48#include "quota.h"
  49#include "refcounttree.h"
  50#include "ocfs2_trace.h"
  51
  52#include "buffer_head_io.h"
  53
  54static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  55{
  56	struct ocfs2_file_private *fp;
  57
  58	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  59	if (!fp)
  60		return -ENOMEM;
  61
  62	fp->fp_file = file;
  63	mutex_init(&fp->fp_mutex);
  64	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  65	file->private_data = fp;
  66
  67	return 0;
  68}
  69
  70static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  71{
  72	struct ocfs2_file_private *fp = file->private_data;
  73	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  74
  75	if (fp) {
  76		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  77		ocfs2_lock_res_free(&fp->fp_flock);
  78		kfree(fp);
  79		file->private_data = NULL;
  80	}
  81}
  82
  83static int ocfs2_file_open(struct inode *inode, struct file *file)
  84{
  85	int status;
  86	int mode = file->f_flags;
  87	struct ocfs2_inode_info *oi = OCFS2_I(inode);
  88
  89	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
  90			      (unsigned long long)oi->ip_blkno,
  91			      file->f_path.dentry->d_name.len,
  92			      file->f_path.dentry->d_name.name, mode);
  93
  94	if (file->f_mode & FMODE_WRITE) {
  95		status = dquot_initialize(inode);
  96		if (status)
  97			goto leave;
  98	}
  99
 100	spin_lock(&oi->ip_lock);
 101
 102	/* Check that the inode hasn't been wiped from disk by another
 103	 * node. If it hasn't then we're safe as long as we hold the
 104	 * spin lock until our increment of open count. */
 105	if (oi->ip_flags & OCFS2_INODE_DELETED) {
 106		spin_unlock(&oi->ip_lock);
 107
 108		status = -ENOENT;
 109		goto leave;
 110	}
 111
 112	if (mode & O_DIRECT)
 113		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
 114
 115	oi->ip_open_count++;
 116	spin_unlock(&oi->ip_lock);
 117
 118	status = ocfs2_init_file_private(inode, file);
 119	if (status) {
 120		/*
 121		 * We want to set open count back if we're failing the
 122		 * open.
 123		 */
 124		spin_lock(&oi->ip_lock);
 125		oi->ip_open_count--;
 126		spin_unlock(&oi->ip_lock);
 127	}
 128
 129	file->f_mode |= FMODE_NOWAIT;
 130
 131leave:
 132	return status;
 133}
 134
 135static int ocfs2_file_release(struct inode *inode, struct file *file)
 136{
 137	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 138
 139	spin_lock(&oi->ip_lock);
 140	if (!--oi->ip_open_count)
 141		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
 142
 143	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
 144				 oi->ip_blkno,
 145				 file->f_path.dentry->d_name.len,
 146				 file->f_path.dentry->d_name.name,
 147				 oi->ip_open_count);
 148	spin_unlock(&oi->ip_lock);
 149
 150	ocfs2_free_file_private(inode, file);
 151
 152	return 0;
 153}
 154
 155static int ocfs2_dir_open(struct inode *inode, struct file *file)
 156{
 157	return ocfs2_init_file_private(inode, file);
 158}
 159
 160static int ocfs2_dir_release(struct inode *inode, struct file *file)
 161{
 162	ocfs2_free_file_private(inode, file);
 163	return 0;
 164}
 165
 166static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
 167			   int datasync)
 168{
 169	int err = 0;
 170	struct inode *inode = file->f_mapping->host;
 171	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 172	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 173	journal_t *journal = osb->journal->j_journal;
 174	int ret;
 175	tid_t commit_tid;
 176	bool needs_barrier = false;
 177
 178	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
 179			      oi->ip_blkno,
 180			      file->f_path.dentry->d_name.len,
 181			      file->f_path.dentry->d_name.name,
 182			      (unsigned long long)datasync);
 183
 184	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 185		return -EROFS;
 186
 187	err = file_write_and_wait_range(file, start, end);
 188	if (err)
 189		return err;
 190
 191	commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
 192	if (journal->j_flags & JBD2_BARRIER &&
 193	    !jbd2_trans_will_send_data_barrier(journal, commit_tid))
 194		needs_barrier = true;
 195	err = jbd2_complete_transaction(journal, commit_tid);
 196	if (needs_barrier) {
 197		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
 198		if (!err)
 199			err = ret;
 200	}
 201
 202	if (err)
 203		mlog_errno(err);
 204
 205	return (err < 0) ? -EIO : 0;
 206}
 207
 208int ocfs2_should_update_atime(struct inode *inode,
 209			      struct vfsmount *vfsmnt)
 210{
 211	struct timespec64 now;
 212	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 213
 214	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 215		return 0;
 216
 217	if ((inode->i_flags & S_NOATIME) ||
 218	    ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
 219		return 0;
 220
 221	/*
 222	 * We can be called with no vfsmnt structure - NFSD will
 223	 * sometimes do this.
 224	 *
 225	 * Note that our action here is different than touch_atime() -
 226	 * if we can't tell whether this is a noatime mount, then we
 227	 * don't know whether to trust the value of s_atime_quantum.
 228	 */
 229	if (vfsmnt == NULL)
 230		return 0;
 231
 232	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
 233	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
 234		return 0;
 235
 236	if (vfsmnt->mnt_flags & MNT_RELATIME) {
 237		if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
 238		    (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
 
 
 
 
 239			return 1;
 240
 241		return 0;
 242	}
 243
 244	now = current_time(inode);
 245	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
 246		return 0;
 247	else
 248		return 1;
 249}
 250
 251int ocfs2_update_inode_atime(struct inode *inode,
 252			     struct buffer_head *bh)
 253{
 254	int ret;
 255	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 256	handle_t *handle;
 257	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
 258
 259	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 260	if (IS_ERR(handle)) {
 261		ret = PTR_ERR(handle);
 262		mlog_errno(ret);
 263		goto out;
 264	}
 265
 266	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 267				      OCFS2_JOURNAL_ACCESS_WRITE);
 268	if (ret) {
 269		mlog_errno(ret);
 270		goto out_commit;
 271	}
 272
 273	/*
 274	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
 275	 * have i_mutex to guard against concurrent changes to other
 276	 * inode fields.
 277	 */
 278	inode->i_atime = current_time(inode);
 279	di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 280	di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 281	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 282	ocfs2_journal_dirty(handle, bh);
 283
 284out_commit:
 285	ocfs2_commit_trans(osb, handle);
 286out:
 287	return ret;
 288}
 289
 290int ocfs2_set_inode_size(handle_t *handle,
 291				struct inode *inode,
 292				struct buffer_head *fe_bh,
 293				u64 new_i_size)
 294{
 295	int status;
 296
 297	i_size_write(inode, new_i_size);
 298	inode->i_blocks = ocfs2_inode_sector_count(inode);
 299	inode->i_ctime = inode->i_mtime = current_time(inode);
 300
 301	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 302	if (status < 0) {
 303		mlog_errno(status);
 304		goto bail;
 305	}
 306
 307bail:
 308	return status;
 309}
 310
 311int ocfs2_simple_size_update(struct inode *inode,
 312			     struct buffer_head *di_bh,
 313			     u64 new_i_size)
 314{
 315	int ret;
 316	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 317	handle_t *handle = NULL;
 318
 319	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 320	if (IS_ERR(handle)) {
 321		ret = PTR_ERR(handle);
 322		mlog_errno(ret);
 323		goto out;
 324	}
 325
 326	ret = ocfs2_set_inode_size(handle, inode, di_bh,
 327				   new_i_size);
 328	if (ret < 0)
 329		mlog_errno(ret);
 330
 331	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 332	ocfs2_commit_trans(osb, handle);
 333out:
 334	return ret;
 335}
 336
 337static int ocfs2_cow_file_pos(struct inode *inode,
 338			      struct buffer_head *fe_bh,
 339			      u64 offset)
 340{
 341	int status;
 342	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 343	unsigned int num_clusters = 0;
 344	unsigned int ext_flags = 0;
 345
 346	/*
 347	 * If the new offset is aligned to the range of the cluster, there is
 348	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
 349	 * CoW either.
 350	 */
 351	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
 352		return 0;
 353
 354	status = ocfs2_get_clusters(inode, cpos, &phys,
 355				    &num_clusters, &ext_flags);
 356	if (status) {
 357		mlog_errno(status);
 358		goto out;
 359	}
 360
 361	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
 362		goto out;
 363
 364	return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
 365
 366out:
 367	return status;
 368}
 369
 370static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
 371				     struct inode *inode,
 372				     struct buffer_head *fe_bh,
 373				     u64 new_i_size)
 374{
 375	int status;
 376	handle_t *handle;
 377	struct ocfs2_dinode *di;
 378	u64 cluster_bytes;
 379
 380	/*
 381	 * We need to CoW the cluster contains the offset if it is reflinked
 382	 * since we will call ocfs2_zero_range_for_truncate later which will
 383	 * write "0" from offset to the end of the cluster.
 384	 */
 385	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
 386	if (status) {
 387		mlog_errno(status);
 388		return status;
 389	}
 390
 391	/* TODO: This needs to actually orphan the inode in this
 392	 * transaction. */
 393
 394	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 395	if (IS_ERR(handle)) {
 396		status = PTR_ERR(handle);
 397		mlog_errno(status);
 398		goto out;
 399	}
 400
 401	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
 402					 OCFS2_JOURNAL_ACCESS_WRITE);
 403	if (status < 0) {
 404		mlog_errno(status);
 405		goto out_commit;
 406	}
 407
 408	/*
 409	 * Do this before setting i_size.
 410	 */
 411	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
 412	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
 413					       cluster_bytes);
 414	if (status) {
 415		mlog_errno(status);
 416		goto out_commit;
 417	}
 418
 419	i_size_write(inode, new_i_size);
 420	inode->i_ctime = inode->i_mtime = current_time(inode);
 421
 422	di = (struct ocfs2_dinode *) fe_bh->b_data;
 423	di->i_size = cpu_to_le64(new_i_size);
 424	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
 425	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 426	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 427
 428	ocfs2_journal_dirty(handle, fe_bh);
 429
 430out_commit:
 431	ocfs2_commit_trans(osb, handle);
 432out:
 433	return status;
 434}
 435
 436int ocfs2_truncate_file(struct inode *inode,
 437			       struct buffer_head *di_bh,
 438			       u64 new_i_size)
 439{
 440	int status = 0;
 441	struct ocfs2_dinode *fe = NULL;
 442	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 443
 444	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
 445	 * already validated it */
 446	fe = (struct ocfs2_dinode *) di_bh->b_data;
 447
 448	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
 449				  (unsigned long long)le64_to_cpu(fe->i_size),
 450				  (unsigned long long)new_i_size);
 451
 452	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
 453			"Inode %llu, inode i_size = %lld != di "
 454			"i_size = %llu, i_flags = 0x%x\n",
 455			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 456			i_size_read(inode),
 457			(unsigned long long)le64_to_cpu(fe->i_size),
 458			le32_to_cpu(fe->i_flags));
 459
 460	if (new_i_size > le64_to_cpu(fe->i_size)) {
 461		trace_ocfs2_truncate_file_error(
 462			(unsigned long long)le64_to_cpu(fe->i_size),
 463			(unsigned long long)new_i_size);
 464		status = -EINVAL;
 465		mlog_errno(status);
 466		goto bail;
 467	}
 468
 469	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 470
 471	ocfs2_resv_discard(&osb->osb_la_resmap,
 472			   &OCFS2_I(inode)->ip_la_data_resv);
 473
 474	/*
 475	 * The inode lock forced other nodes to sync and drop their
 476	 * pages, which (correctly) happens even if we have a truncate
 477	 * without allocation change - ocfs2 cluster sizes can be much
 478	 * greater than page size, so we have to truncate them
 479	 * anyway.
 480	 */
 481	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
 482	truncate_inode_pages(inode->i_mapping, new_i_size);
 483
 484	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 
 
 
 485		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
 486					       i_size_read(inode), 1);
 487		if (status)
 488			mlog_errno(status);
 489
 490		goto bail_unlock_sem;
 491	}
 492
 493	/* alright, we're going to need to do a full blown alloc size
 494	 * change. Orphan the inode so that recovery can complete the
 495	 * truncate if necessary. This does the task of marking
 496	 * i_size. */
 497	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
 498	if (status < 0) {
 499		mlog_errno(status);
 500		goto bail_unlock_sem;
 501	}
 502
 
 
 
 503	status = ocfs2_commit_truncate(osb, inode, di_bh);
 504	if (status < 0) {
 505		mlog_errno(status);
 506		goto bail_unlock_sem;
 507	}
 508
 509	/* TODO: orphan dir cleanup here. */
 510bail_unlock_sem:
 511	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 512
 513bail:
 514	if (!status && OCFS2_I(inode)->ip_clusters == 0)
 515		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
 516
 517	return status;
 518}
 519
 520/*
 521 * extend file allocation only here.
 522 * we'll update all the disk stuff, and oip->alloc_size
 523 *
 524 * expect stuff to be locked, a transaction started and enough data /
 525 * metadata reservations in the contexts.
 526 *
 527 * Will return -EAGAIN, and a reason if a restart is needed.
 528 * If passed in, *reason will always be set, even in error.
 529 */
 530int ocfs2_add_inode_data(struct ocfs2_super *osb,
 531			 struct inode *inode,
 532			 u32 *logical_offset,
 533			 u32 clusters_to_add,
 534			 int mark_unwritten,
 535			 struct buffer_head *fe_bh,
 536			 handle_t *handle,
 537			 struct ocfs2_alloc_context *data_ac,
 538			 struct ocfs2_alloc_context *meta_ac,
 539			 enum ocfs2_alloc_restarted *reason_ret)
 540{
 541	int ret;
 542	struct ocfs2_extent_tree et;
 543
 544	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
 545	ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
 546					  clusters_to_add, mark_unwritten,
 547					  data_ac, meta_ac, reason_ret);
 548
 549	return ret;
 550}
 551
 552static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
 553				   u32 clusters_to_add, int mark_unwritten)
 554{
 555	int status = 0;
 556	int restart_func = 0;
 557	int credits;
 558	u32 prev_clusters;
 559	struct buffer_head *bh = NULL;
 560	struct ocfs2_dinode *fe = NULL;
 561	handle_t *handle = NULL;
 562	struct ocfs2_alloc_context *data_ac = NULL;
 563	struct ocfs2_alloc_context *meta_ac = NULL;
 564	enum ocfs2_alloc_restarted why = RESTART_NONE;
 565	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 566	struct ocfs2_extent_tree et;
 567	int did_quota = 0;
 568
 569	/*
 570	 * Unwritten extent only exists for file systems which
 571	 * support holes.
 572	 */
 573	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
 574
 575	status = ocfs2_read_inode_block(inode, &bh);
 576	if (status < 0) {
 577		mlog_errno(status);
 578		goto leave;
 579	}
 580	fe = (struct ocfs2_dinode *) bh->b_data;
 581
 582restart_all:
 583	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 584
 585	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
 586	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
 587				       &data_ac, &meta_ac);
 588	if (status) {
 589		mlog_errno(status);
 590		goto leave;
 591	}
 592
 593	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
 594	handle = ocfs2_start_trans(osb, credits);
 595	if (IS_ERR(handle)) {
 596		status = PTR_ERR(handle);
 597		handle = NULL;
 598		mlog_errno(status);
 599		goto leave;
 600	}
 601
 602restarted_transaction:
 603	trace_ocfs2_extend_allocation(
 604		(unsigned long long)OCFS2_I(inode)->ip_blkno,
 605		(unsigned long long)i_size_read(inode),
 606		le32_to_cpu(fe->i_clusters), clusters_to_add,
 607		why, restart_func);
 608
 609	status = dquot_alloc_space_nodirty(inode,
 610			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 611	if (status)
 612		goto leave;
 613	did_quota = 1;
 614
 615	/* reserve a write to the file entry early on - that we if we
 616	 * run out of credits in the allocation path, we can still
 617	 * update i_size. */
 618	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 619					 OCFS2_JOURNAL_ACCESS_WRITE);
 620	if (status < 0) {
 621		mlog_errno(status);
 622		goto leave;
 623	}
 624
 625	prev_clusters = OCFS2_I(inode)->ip_clusters;
 626
 627	status = ocfs2_add_inode_data(osb,
 628				      inode,
 629				      &logical_start,
 630				      clusters_to_add,
 631				      mark_unwritten,
 632				      bh,
 633				      handle,
 634				      data_ac,
 635				      meta_ac,
 636				      &why);
 637	if ((status < 0) && (status != -EAGAIN)) {
 638		if (status != -ENOSPC)
 639			mlog_errno(status);
 640		goto leave;
 641	}
 642	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 643	ocfs2_journal_dirty(handle, bh);
 644
 645	spin_lock(&OCFS2_I(inode)->ip_lock);
 646	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
 647	spin_unlock(&OCFS2_I(inode)->ip_lock);
 648	/* Release unused quota reservation */
 649	dquot_free_space(inode,
 650			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 651	did_quota = 0;
 652
 653	if (why != RESTART_NONE && clusters_to_add) {
 654		if (why == RESTART_META) {
 655			restart_func = 1;
 656			status = 0;
 657		} else {
 658			BUG_ON(why != RESTART_TRANS);
 659
 660			status = ocfs2_allocate_extend_trans(handle, 1);
 661			if (status < 0) {
 662				/* handle still has to be committed at
 663				 * this point. */
 664				status = -ENOMEM;
 665				mlog_errno(status);
 666				goto leave;
 667			}
 668			goto restarted_transaction;
 669		}
 670	}
 671
 672	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
 673	     le32_to_cpu(fe->i_clusters),
 674	     (unsigned long long)le64_to_cpu(fe->i_size),
 675	     OCFS2_I(inode)->ip_clusters,
 676	     (unsigned long long)i_size_read(inode));
 677
 678leave:
 679	if (status < 0 && did_quota)
 680		dquot_free_space(inode,
 681			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 682	if (handle) {
 683		ocfs2_commit_trans(osb, handle);
 684		handle = NULL;
 685	}
 686	if (data_ac) {
 687		ocfs2_free_alloc_context(data_ac);
 688		data_ac = NULL;
 689	}
 690	if (meta_ac) {
 691		ocfs2_free_alloc_context(meta_ac);
 692		meta_ac = NULL;
 693	}
 694	if ((!status) && restart_func) {
 695		restart_func = 0;
 696		goto restart_all;
 697	}
 698	brelse(bh);
 699	bh = NULL;
 700
 701	return status;
 702}
 703
 704/*
 705 * While a write will already be ordering the data, a truncate will not.
 706 * Thus, we need to explicitly order the zeroed pages.
 707 */
 708static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
 709						      struct buffer_head *di_bh,
 710						      loff_t start_byte,
 711						      loff_t length)
 712{
 713	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 714	handle_t *handle = NULL;
 715	int ret = 0;
 716
 717	if (!ocfs2_should_order_data(inode))
 718		goto out;
 719
 720	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 721	if (IS_ERR(handle)) {
 722		ret = -ENOMEM;
 723		mlog_errno(ret);
 724		goto out;
 725	}
 726
 727	ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
 728	if (ret < 0) {
 729		mlog_errno(ret);
 730		goto out;
 731	}
 732
 733	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 734				      OCFS2_JOURNAL_ACCESS_WRITE);
 735	if (ret)
 736		mlog_errno(ret);
 737	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 738
 739out:
 740	if (ret) {
 741		if (!IS_ERR(handle))
 742			ocfs2_commit_trans(osb, handle);
 743		handle = ERR_PTR(ret);
 744	}
 745	return handle;
 746}
 747
 748/* Some parts of this taken from generic_cont_expand, which turned out
 749 * to be too fragile to do exactly what we need without us having to
 750 * worry about recursive locking in ->write_begin() and ->write_end(). */
 751static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 752				 u64 abs_to, struct buffer_head *di_bh)
 753{
 754	struct address_space *mapping = inode->i_mapping;
 755	struct page *page;
 756	unsigned long index = abs_from >> PAGE_SHIFT;
 757	handle_t *handle;
 758	int ret = 0;
 759	unsigned zero_from, zero_to, block_start, block_end;
 760	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 761
 762	BUG_ON(abs_from >= abs_to);
 763	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
 764	BUG_ON(abs_from & (inode->i_blkbits - 1));
 765
 766	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
 767						      abs_from,
 768						      abs_to - abs_from);
 769	if (IS_ERR(handle)) {
 770		ret = PTR_ERR(handle);
 771		goto out;
 772	}
 773
 774	page = find_or_create_page(mapping, index, GFP_NOFS);
 775	if (!page) {
 776		ret = -ENOMEM;
 
 777		mlog_errno(ret);
 778		goto out_commit_trans;
 779	}
 780
 781	/* Get the offsets within the page that we want to zero */
 782	zero_from = abs_from & (PAGE_SIZE - 1);
 783	zero_to = abs_to & (PAGE_SIZE - 1);
 784	if (!zero_to)
 785		zero_to = PAGE_SIZE;
 786
 787	trace_ocfs2_write_zero_page(
 788			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 789			(unsigned long long)abs_from,
 790			(unsigned long long)abs_to,
 791			index, zero_from, zero_to);
 792
 793	/* We know that zero_from is block aligned */
 794	for (block_start = zero_from; block_start < zero_to;
 795	     block_start = block_end) {
 796		block_end = block_start + i_blocksize(inode);
 797
 798		/*
 799		 * block_start is block-aligned.  Bump it by one to force
 800		 * __block_write_begin and block_commit_write to zero the
 801		 * whole block.
 802		 */
 803		ret = __block_write_begin(page, block_start + 1, 0,
 804					  ocfs2_get_block);
 805		if (ret < 0) {
 806			mlog_errno(ret);
 807			goto out_unlock;
 808		}
 809
 810
 811		/* must not update i_size! */
 812		ret = block_commit_write(page, block_start + 1,
 813					 block_start + 1);
 814		if (ret < 0)
 815			mlog_errno(ret);
 816		else
 817			ret = 0;
 818	}
 819
 820	/*
 821	 * fs-writeback will release the dirty pages without page lock
 822	 * whose offset are over inode size, the release happens at
 823	 * block_write_full_page().
 824	 */
 825	i_size_write(inode, abs_to);
 826	inode->i_blocks = ocfs2_inode_sector_count(inode);
 827	di->i_size = cpu_to_le64((u64)i_size_read(inode));
 828	inode->i_mtime = inode->i_ctime = current_time(inode);
 829	di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
 830	di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 831	di->i_mtime_nsec = di->i_ctime_nsec;
 832	if (handle) {
 833		ocfs2_journal_dirty(handle, di_bh);
 834		ocfs2_update_inode_fsync_trans(handle, inode, 1);
 835	}
 836
 837out_unlock:
 838	unlock_page(page);
 839	put_page(page);
 840out_commit_trans:
 841	if (handle)
 842		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 843out:
 844	return ret;
 845}
 846
 847/*
 848 * Find the next range to zero.  We do this in terms of bytes because
 849 * that's what ocfs2_zero_extend() wants, and it is dealing with the
 850 * pagecache.  We may return multiple extents.
 851 *
 852 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
 853 * needs to be zeroed.  range_start and range_end return the next zeroing
 854 * range.  A subsequent call should pass the previous range_end as its
 855 * zero_start.  If range_end is 0, there's nothing to do.
 856 *
 857 * Unwritten extents are skipped over.  Refcounted extents are CoWd.
 858 */
 859static int ocfs2_zero_extend_get_range(struct inode *inode,
 860				       struct buffer_head *di_bh,
 861				       u64 zero_start, u64 zero_end,
 862				       u64 *range_start, u64 *range_end)
 863{
 864	int rc = 0, needs_cow = 0;
 865	u32 p_cpos, zero_clusters = 0;
 866	u32 zero_cpos =
 867		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 868	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
 869	unsigned int num_clusters = 0;
 870	unsigned int ext_flags = 0;
 871
 872	while (zero_cpos < last_cpos) {
 873		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
 874					&num_clusters, &ext_flags);
 875		if (rc) {
 876			mlog_errno(rc);
 877			goto out;
 878		}
 879
 880		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
 881			zero_clusters = num_clusters;
 882			if (ext_flags & OCFS2_EXT_REFCOUNTED)
 883				needs_cow = 1;
 884			break;
 885		}
 886
 887		zero_cpos += num_clusters;
 888	}
 889	if (!zero_clusters) {
 890		*range_end = 0;
 891		goto out;
 892	}
 893
 894	while ((zero_cpos + zero_clusters) < last_cpos) {
 895		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
 896					&p_cpos, &num_clusters,
 897					&ext_flags);
 898		if (rc) {
 899			mlog_errno(rc);
 900			goto out;
 901		}
 902
 903		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
 904			break;
 905		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 906			needs_cow = 1;
 907		zero_clusters += num_clusters;
 908	}
 909	if ((zero_cpos + zero_clusters) > last_cpos)
 910		zero_clusters = last_cpos - zero_cpos;
 911
 912	if (needs_cow) {
 913		rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
 914					zero_clusters, UINT_MAX);
 915		if (rc) {
 916			mlog_errno(rc);
 917			goto out;
 918		}
 919	}
 920
 921	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
 922	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
 923					     zero_cpos + zero_clusters);
 924
 925out:
 926	return rc;
 927}
 928
 929/*
 930 * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
 931 * has made sure that the entire range needs zeroing.
 932 */
 933static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
 934				   u64 range_end, struct buffer_head *di_bh)
 935{
 936	int rc = 0;
 937	u64 next_pos;
 938	u64 zero_pos = range_start;
 939
 940	trace_ocfs2_zero_extend_range(
 941			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 942			(unsigned long long)range_start,
 943			(unsigned long long)range_end);
 944	BUG_ON(range_start >= range_end);
 945
 946	while (zero_pos < range_end) {
 947		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
 948		if (next_pos > range_end)
 949			next_pos = range_end;
 950		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
 951		if (rc < 0) {
 952			mlog_errno(rc);
 953			break;
 954		}
 955		zero_pos = next_pos;
 956
 957		/*
 958		 * Very large extends have the potential to lock up
 959		 * the cpu for extended periods of time.
 960		 */
 961		cond_resched();
 962	}
 963
 964	return rc;
 965}
 966
 967int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
 968		      loff_t zero_to_size)
 969{
 970	int ret = 0;
 971	u64 zero_start, range_start = 0, range_end = 0;
 972	struct super_block *sb = inode->i_sb;
 973
 974	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
 975	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
 976				(unsigned long long)zero_start,
 977				(unsigned long long)i_size_read(inode));
 978	while (zero_start < zero_to_size) {
 979		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
 980						  zero_to_size,
 981						  &range_start,
 982						  &range_end);
 983		if (ret) {
 984			mlog_errno(ret);
 985			break;
 986		}
 987		if (!range_end)
 988			break;
 989		/* Trim the ends */
 990		if (range_start < zero_start)
 991			range_start = zero_start;
 992		if (range_end > zero_to_size)
 993			range_end = zero_to_size;
 994
 995		ret = ocfs2_zero_extend_range(inode, range_start,
 996					      range_end, di_bh);
 997		if (ret) {
 998			mlog_errno(ret);
 999			break;
1000		}
1001		zero_start = range_end;
1002	}
1003
1004	return ret;
1005}
1006
1007int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1008			  u64 new_i_size, u64 zero_to)
1009{
1010	int ret;
1011	u32 clusters_to_add;
1012	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1013
1014	/*
1015	 * Only quota files call this without a bh, and they can't be
1016	 * refcounted.
1017	 */
1018	BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1019	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1020
1021	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1022	if (clusters_to_add < oi->ip_clusters)
1023		clusters_to_add = 0;
1024	else
1025		clusters_to_add -= oi->ip_clusters;
1026
1027	if (clusters_to_add) {
1028		ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1029					      clusters_to_add, 0);
1030		if (ret) {
1031			mlog_errno(ret);
1032			goto out;
1033		}
1034	}
1035
1036	/*
1037	 * Call this even if we don't add any clusters to the tree. We
1038	 * still need to zero the area between the old i_size and the
1039	 * new i_size.
1040	 */
1041	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1042	if (ret < 0)
1043		mlog_errno(ret);
1044
1045out:
1046	return ret;
1047}
1048
1049static int ocfs2_extend_file(struct inode *inode,
1050			     struct buffer_head *di_bh,
1051			     u64 new_i_size)
1052{
1053	int ret = 0;
1054	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1055
1056	BUG_ON(!di_bh);
1057
1058	/* setattr sometimes calls us like this. */
1059	if (new_i_size == 0)
1060		goto out;
1061
1062	if (i_size_read(inode) == new_i_size)
1063		goto out;
1064	BUG_ON(new_i_size < i_size_read(inode));
1065
1066	/*
1067	 * The alloc sem blocks people in read/write from reading our
1068	 * allocation until we're done changing it. We depend on
1069	 * i_mutex to block other extend/truncate calls while we're
1070	 * here.  We even have to hold it for sparse files because there
1071	 * might be some tail zeroing.
1072	 */
1073	down_write(&oi->ip_alloc_sem);
1074
1075	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1076		/*
1077		 * We can optimize small extends by keeping the inodes
1078		 * inline data.
1079		 */
1080		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1081			up_write(&oi->ip_alloc_sem);
1082			goto out_update_size;
1083		}
1084
1085		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1086		if (ret) {
1087			up_write(&oi->ip_alloc_sem);
1088			mlog_errno(ret);
1089			goto out;
1090		}
1091	}
1092
1093	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1094		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1095	else
1096		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1097					    new_i_size);
1098
1099	up_write(&oi->ip_alloc_sem);
1100
1101	if (ret < 0) {
1102		mlog_errno(ret);
1103		goto out;
1104	}
1105
1106out_update_size:
1107	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1108	if (ret < 0)
1109		mlog_errno(ret);
1110
1111out:
1112	return ret;
1113}
1114
1115int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
 
1116{
1117	int status = 0, size_change;
1118	int inode_locked = 0;
1119	struct inode *inode = d_inode(dentry);
1120	struct super_block *sb = inode->i_sb;
1121	struct ocfs2_super *osb = OCFS2_SB(sb);
1122	struct buffer_head *bh = NULL;
1123	handle_t *handle = NULL;
1124	struct dquot *transfer_to[MAXQUOTAS] = { };
1125	int qtype;
1126	int had_lock;
1127	struct ocfs2_lock_holder oh;
1128
1129	trace_ocfs2_setattr(inode, dentry,
1130			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1131			    dentry->d_name.len, dentry->d_name.name,
1132			    attr->ia_valid, attr->ia_mode,
1133			    from_kuid(&init_user_ns, attr->ia_uid),
1134			    from_kgid(&init_user_ns, attr->ia_gid));
 
 
 
1135
1136	/* ensuring we don't even attempt to truncate a symlink */
1137	if (S_ISLNK(inode->i_mode))
1138		attr->ia_valid &= ~ATTR_SIZE;
1139
1140#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1141			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1142	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1143		return 0;
1144
1145	status = setattr_prepare(dentry, attr);
1146	if (status)
1147		return status;
1148
1149	if (is_quota_modification(inode, attr)) {
1150		status = dquot_initialize(inode);
1151		if (status)
1152			return status;
1153	}
1154	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1155	if (size_change) {
1156		/*
1157		 * Here we should wait dio to finish before inode lock
1158		 * to avoid a deadlock between ocfs2_setattr() and
1159		 * ocfs2_dio_end_io_write()
1160		 */
1161		inode_dio_wait(inode);
1162
1163		status = ocfs2_rw_lock(inode, 1);
1164		if (status < 0) {
1165			mlog_errno(status);
1166			goto bail;
1167		}
1168	}
1169
1170	had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1171	if (had_lock < 0) {
1172		status = had_lock;
1173		goto bail_unlock_rw;
1174	} else if (had_lock) {
1175		/*
1176		 * As far as we know, ocfs2_setattr() could only be the first
1177		 * VFS entry point in the call chain of recursive cluster
1178		 * locking issue.
1179		 *
1180		 * For instance:
1181		 * chmod_common()
1182		 *  notify_change()
1183		 *   ocfs2_setattr()
1184		 *    posix_acl_chmod()
1185		 *     ocfs2_iop_get_acl()
1186		 *
1187		 * But, we're not 100% sure if it's always true, because the
1188		 * ordering of the VFS entry points in the call chain is out
1189		 * of our control. So, we'd better dump the stack here to
1190		 * catch the other cases of recursive locking.
1191		 */
1192		mlog(ML_ERROR, "Another case of recursive locking:\n");
1193		dump_stack();
1194	}
1195	inode_locked = 1;
1196
1197	if (size_change) {
1198		status = inode_newsize_ok(inode, attr->ia_size);
1199		if (status)
1200			goto bail_unlock;
1201
1202		if (i_size_read(inode) >= attr->ia_size) {
1203			if (ocfs2_should_order_data(inode)) {
1204				status = ocfs2_begin_ordered_truncate(inode,
1205								      attr->ia_size);
1206				if (status)
1207					goto bail_unlock;
1208			}
1209			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1210		} else
1211			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1212		if (status < 0) {
1213			if (status != -ENOSPC)
1214				mlog_errno(status);
1215			status = -ENOSPC;
1216			goto bail_unlock;
1217		}
1218	}
1219
1220	if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1221	    (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1222		/*
1223		 * Gather pointers to quota structures so that allocation /
1224		 * freeing of quota structures happens here and not inside
1225		 * dquot_transfer() where we have problems with lock ordering
1226		 */
1227		if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1228		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1229		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1230			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1231			if (IS_ERR(transfer_to[USRQUOTA])) {
1232				status = PTR_ERR(transfer_to[USRQUOTA]);
1233				transfer_to[USRQUOTA] = NULL;
1234				goto bail_unlock;
1235			}
1236		}
1237		if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1238		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1239		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1240			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1241			if (IS_ERR(transfer_to[GRPQUOTA])) {
1242				status = PTR_ERR(transfer_to[GRPQUOTA]);
1243				transfer_to[GRPQUOTA] = NULL;
1244				goto bail_unlock;
1245			}
1246		}
 
1247		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1248					   2 * ocfs2_quota_trans_credits(sb));
1249		if (IS_ERR(handle)) {
1250			status = PTR_ERR(handle);
1251			mlog_errno(status);
1252			goto bail_unlock;
1253		}
1254		status = __dquot_transfer(inode, transfer_to);
1255		if (status < 0)
1256			goto bail_commit;
1257	} else {
 
1258		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1259		if (IS_ERR(handle)) {
1260			status = PTR_ERR(handle);
1261			mlog_errno(status);
1262			goto bail_unlock;
1263		}
1264	}
1265
1266	setattr_copy(inode, attr);
1267	mark_inode_dirty(inode);
1268
1269	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1270	if (status < 0)
1271		mlog_errno(status);
1272
1273bail_commit:
1274	ocfs2_commit_trans(osb, handle);
 
 
1275bail_unlock:
1276	if (status && inode_locked) {
1277		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1278		inode_locked = 0;
1279	}
1280bail_unlock_rw:
1281	if (size_change)
1282		ocfs2_rw_unlock(inode, 1);
1283bail:
1284
1285	/* Release quota pointers in case we acquired them */
1286	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1287		dqput(transfer_to[qtype]);
1288
1289	if (!status && attr->ia_valid & ATTR_MODE) {
1290		status = ocfs2_acl_chmod(inode, bh);
1291		if (status < 0)
1292			mlog_errno(status);
1293	}
1294	if (inode_locked)
1295		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1296
1297	brelse(bh);
1298	return status;
1299}
1300
1301int ocfs2_getattr(const struct path *path, struct kstat *stat,
1302		  u32 request_mask, unsigned int flags)
1303{
1304	struct inode *inode = d_inode(path->dentry);
1305	struct super_block *sb = path->dentry->d_sb;
1306	struct ocfs2_super *osb = sb->s_fs_info;
1307	int err;
1308
1309	err = ocfs2_inode_revalidate(path->dentry);
1310	if (err) {
1311		if (err != -ENOENT)
1312			mlog_errno(err);
1313		goto bail;
1314	}
1315
1316	generic_fillattr(inode, stat);
1317	/*
1318	 * If there is inline data in the inode, the inode will normally not
1319	 * have data blocks allocated (it may have an external xattr block).
1320	 * Report at least one sector for such files, so tools like tar, rsync,
1321	 * others don't incorrectly think the file is completely sparse.
1322	 */
1323	if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1324		stat->blocks += (stat->size + 511)>>9;
1325
1326	/* We set the blksize from the cluster size for performance */
1327	stat->blksize = osb->s_clustersize;
1328
1329bail:
1330	return err;
1331}
1332
1333int ocfs2_permission(struct inode *inode, int mask)
 
1334{
1335	int ret, had_lock;
1336	struct ocfs2_lock_holder oh;
1337
1338	if (mask & MAY_NOT_BLOCK)
1339		return -ECHILD;
1340
1341	had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1342	if (had_lock < 0) {
1343		ret = had_lock;
1344		goto out;
1345	} else if (had_lock) {
1346		/* See comments in ocfs2_setattr() for details.
1347		 * The call chain of this case could be:
1348		 * do_sys_open()
1349		 *  may_open()
1350		 *   inode_permission()
1351		 *    ocfs2_permission()
1352		 *     ocfs2_iop_get_acl()
1353		 */
1354		mlog(ML_ERROR, "Another case of recursive locking:\n");
1355		dump_stack();
1356	}
1357
1358	ret = generic_permission(inode, mask);
1359
1360	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1361out:
1362	return ret;
1363}
1364
1365static int __ocfs2_write_remove_suid(struct inode *inode,
1366				     struct buffer_head *bh)
1367{
1368	int ret;
1369	handle_t *handle;
1370	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1371	struct ocfs2_dinode *di;
1372
1373	trace_ocfs2_write_remove_suid(
1374			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1375			inode->i_mode);
1376
1377	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1378	if (IS_ERR(handle)) {
1379		ret = PTR_ERR(handle);
1380		mlog_errno(ret);
1381		goto out;
1382	}
1383
1384	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1385				      OCFS2_JOURNAL_ACCESS_WRITE);
1386	if (ret < 0) {
1387		mlog_errno(ret);
1388		goto out_trans;
1389	}
1390
1391	inode->i_mode &= ~S_ISUID;
1392	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1393		inode->i_mode &= ~S_ISGID;
1394
1395	di = (struct ocfs2_dinode *) bh->b_data;
1396	di->i_mode = cpu_to_le16(inode->i_mode);
1397	ocfs2_update_inode_fsync_trans(handle, inode, 0);
1398
1399	ocfs2_journal_dirty(handle, bh);
1400
1401out_trans:
1402	ocfs2_commit_trans(osb, handle);
1403out:
1404	return ret;
1405}
1406
1407static int ocfs2_write_remove_suid(struct inode *inode)
1408{
1409	int ret;
1410	struct buffer_head *bh = NULL;
1411
1412	ret = ocfs2_read_inode_block(inode, &bh);
1413	if (ret < 0) {
1414		mlog_errno(ret);
1415		goto out;
1416	}
1417
1418	ret =  __ocfs2_write_remove_suid(inode, bh);
1419out:
1420	brelse(bh);
1421	return ret;
1422}
1423
1424/*
1425 * Allocate enough extents to cover the region starting at byte offset
1426 * start for len bytes. Existing extents are skipped, any extents
1427 * added are marked as "unwritten".
1428 */
1429static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1430					    u64 start, u64 len)
1431{
1432	int ret;
1433	u32 cpos, phys_cpos, clusters, alloc_size;
1434	u64 end = start + len;
1435	struct buffer_head *di_bh = NULL;
1436
1437	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1438		ret = ocfs2_read_inode_block(inode, &di_bh);
1439		if (ret) {
1440			mlog_errno(ret);
1441			goto out;
1442		}
1443
1444		/*
1445		 * Nothing to do if the requested reservation range
1446		 * fits within the inode.
1447		 */
1448		if (ocfs2_size_fits_inline_data(di_bh, end))
1449			goto out;
1450
1451		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1452		if (ret) {
1453			mlog_errno(ret);
1454			goto out;
1455		}
1456	}
1457
1458	/*
1459	 * We consider both start and len to be inclusive.
1460	 */
1461	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1462	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1463	clusters -= cpos;
1464
1465	while (clusters) {
1466		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1467					 &alloc_size, NULL);
1468		if (ret) {
1469			mlog_errno(ret);
1470			goto out;
1471		}
1472
1473		/*
1474		 * Hole or existing extent len can be arbitrary, so
1475		 * cap it to our own allocation request.
1476		 */
1477		if (alloc_size > clusters)
1478			alloc_size = clusters;
1479
1480		if (phys_cpos) {
1481			/*
1482			 * We already have an allocation at this
1483			 * region so we can safely skip it.
1484			 */
1485			goto next;
1486		}
1487
1488		ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1489		if (ret) {
1490			if (ret != -ENOSPC)
1491				mlog_errno(ret);
1492			goto out;
1493		}
1494
1495next:
1496		cpos += alloc_size;
1497		clusters -= alloc_size;
1498	}
1499
1500	ret = 0;
1501out:
1502
1503	brelse(di_bh);
1504	return ret;
1505}
1506
1507/*
1508 * Truncate a byte range, avoiding pages within partial clusters. This
1509 * preserves those pages for the zeroing code to write to.
1510 */
1511static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1512					 u64 byte_len)
1513{
1514	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1515	loff_t start, end;
1516	struct address_space *mapping = inode->i_mapping;
1517
1518	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1519	end = byte_start + byte_len;
1520	end = end & ~(osb->s_clustersize - 1);
1521
1522	if (start < end) {
1523		unmap_mapping_range(mapping, start, end - start, 0);
1524		truncate_inode_pages_range(mapping, start, end - 1);
1525	}
1526}
1527
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1528static int ocfs2_zero_partial_clusters(struct inode *inode,
1529				       u64 start, u64 len)
1530{
1531	int ret = 0;
1532	u64 tmpend = 0;
1533	u64 end = start + len;
1534	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1535	unsigned int csize = osb->s_clustersize;
1536	handle_t *handle;
 
1537
1538	/*
1539	 * The "start" and "end" values are NOT necessarily part of
1540	 * the range whose allocation is being deleted. Rather, this
1541	 * is what the user passed in with the request. We must zero
1542	 * partial clusters here. There's no need to worry about
1543	 * physical allocation - the zeroing code knows to skip holes.
1544	 */
1545	trace_ocfs2_zero_partial_clusters(
1546		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1547		(unsigned long long)start, (unsigned long long)end);
1548
1549	/*
1550	 * If both edges are on a cluster boundary then there's no
1551	 * zeroing required as the region is part of the allocation to
1552	 * be truncated.
1553	 */
1554	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1555		goto out;
1556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1557	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1558	if (IS_ERR(handle)) {
1559		ret = PTR_ERR(handle);
1560		mlog_errno(ret);
1561		goto out;
1562	}
1563
1564	/*
1565	 * If start is on a cluster boundary and end is somewhere in another
1566	 * cluster, we have not COWed the cluster starting at start, unless
1567	 * end is also within the same cluster. So, in this case, we skip this
1568	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1569	 * to the next one.
1570	 */
1571	if ((start & (csize - 1)) != 0) {
1572		/*
1573		 * We want to get the byte offset of the end of the 1st
1574		 * cluster.
1575		 */
1576		tmpend = (u64)osb->s_clustersize +
1577			(start & ~(osb->s_clustersize - 1));
1578		if (tmpend > end)
1579			tmpend = end;
1580
1581		trace_ocfs2_zero_partial_clusters_range1(
1582			(unsigned long long)start,
1583			(unsigned long long)tmpend);
1584
1585		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1586						    tmpend);
1587		if (ret)
1588			mlog_errno(ret);
1589	}
1590
1591	if (tmpend < end) {
1592		/*
1593		 * This may make start and end equal, but the zeroing
1594		 * code will skip any work in that case so there's no
1595		 * need to catch it up here.
1596		 */
1597		start = end & ~(osb->s_clustersize - 1);
1598
1599		trace_ocfs2_zero_partial_clusters_range2(
1600			(unsigned long long)start, (unsigned long long)end);
1601
1602		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1603		if (ret)
1604			mlog_errno(ret);
1605	}
1606	ocfs2_update_inode_fsync_trans(handle, inode, 1);
1607
1608	ocfs2_commit_trans(osb, handle);
1609out:
1610	return ret;
1611}
1612
1613static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1614{
1615	int i;
1616	struct ocfs2_extent_rec *rec = NULL;
1617
1618	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1619
1620		rec = &el->l_recs[i];
1621
1622		if (le32_to_cpu(rec->e_cpos) < pos)
1623			break;
1624	}
1625
1626	return i;
1627}
1628
1629/*
1630 * Helper to calculate the punching pos and length in one run, we handle the
1631 * following three cases in order:
1632 *
1633 * - remove the entire record
1634 * - remove a partial record
1635 * - no record needs to be removed (hole-punching completed)
1636*/
1637static void ocfs2_calc_trunc_pos(struct inode *inode,
1638				 struct ocfs2_extent_list *el,
1639				 struct ocfs2_extent_rec *rec,
1640				 u32 trunc_start, u32 *trunc_cpos,
1641				 u32 *trunc_len, u32 *trunc_end,
1642				 u64 *blkno, int *done)
1643{
1644	int ret = 0;
1645	u32 coff, range;
1646
1647	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1648
1649	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1650		/*
1651		 * remove an entire extent record.
1652		 */
1653		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1654		/*
1655		 * Skip holes if any.
1656		 */
1657		if (range < *trunc_end)
1658			*trunc_end = range;
1659		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1660		*blkno = le64_to_cpu(rec->e_blkno);
1661		*trunc_end = le32_to_cpu(rec->e_cpos);
1662	} else if (range > trunc_start) {
1663		/*
1664		 * remove a partial extent record, which means we're
1665		 * removing the last extent record.
1666		 */
1667		*trunc_cpos = trunc_start;
1668		/*
1669		 * skip hole if any.
1670		 */
1671		if (range < *trunc_end)
1672			*trunc_end = range;
1673		*trunc_len = *trunc_end - trunc_start;
1674		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1675		*blkno = le64_to_cpu(rec->e_blkno) +
1676				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1677		*trunc_end = trunc_start;
1678	} else {
1679		/*
1680		 * It may have two following possibilities:
1681		 *
1682		 * - last record has been removed
1683		 * - trunc_start was within a hole
1684		 *
1685		 * both two cases mean the completion of hole punching.
1686		 */
1687		ret = 1;
1688	}
1689
1690	*done = ret;
1691}
1692
1693int ocfs2_remove_inode_range(struct inode *inode,
1694			     struct buffer_head *di_bh, u64 byte_start,
1695			     u64 byte_len)
1696{
1697	int ret = 0, flags = 0, done = 0, i;
1698	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1699	u32 cluster_in_el;
1700	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1701	struct ocfs2_cached_dealloc_ctxt dealloc;
1702	struct address_space *mapping = inode->i_mapping;
1703	struct ocfs2_extent_tree et;
1704	struct ocfs2_path *path = NULL;
1705	struct ocfs2_extent_list *el = NULL;
1706	struct ocfs2_extent_rec *rec = NULL;
1707	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1708	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1709
1710	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1711	ocfs2_init_dealloc_ctxt(&dealloc);
1712
1713	trace_ocfs2_remove_inode_range(
1714			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1715			(unsigned long long)byte_start,
1716			(unsigned long long)byte_len);
1717
1718	if (byte_len == 0)
1719		return 0;
1720
1721	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 
 
 
 
 
 
 
 
1722		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1723					    byte_start + byte_len, 0);
1724		if (ret) {
1725			mlog_errno(ret);
1726			goto out;
1727		}
1728		/*
1729		 * There's no need to get fancy with the page cache
1730		 * truncate of an inline-data inode. We're talking
1731		 * about less than a page here, which will be cached
1732		 * in the dinode buffer anyway.
1733		 */
1734		unmap_mapping_range(mapping, 0, 0, 0);
1735		truncate_inode_pages(mapping, 0);
1736		goto out;
1737	}
1738
1739	/*
1740	 * For reflinks, we may need to CoW 2 clusters which might be
1741	 * partially zero'd later, if hole's start and end offset were
1742	 * within one cluster(means is not exactly aligned to clustersize).
1743	 */
1744
1745	if (ocfs2_is_refcount_inode(inode)) {
1746		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1747		if (ret) {
1748			mlog_errno(ret);
1749			goto out;
1750		}
1751
1752		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1753		if (ret) {
1754			mlog_errno(ret);
1755			goto out;
1756		}
1757	}
1758
1759	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1760	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1761	cluster_in_el = trunc_end;
1762
1763	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1764	if (ret) {
1765		mlog_errno(ret);
1766		goto out;
1767	}
1768
1769	path = ocfs2_new_path_from_et(&et);
1770	if (!path) {
1771		ret = -ENOMEM;
1772		mlog_errno(ret);
1773		goto out;
1774	}
1775
1776	while (trunc_end > trunc_start) {
1777
1778		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1779				      cluster_in_el);
1780		if (ret) {
1781			mlog_errno(ret);
1782			goto out;
1783		}
1784
1785		el = path_leaf_el(path);
1786
1787		i = ocfs2_find_rec(el, trunc_end);
1788		/*
1789		 * Need to go to previous extent block.
1790		 */
1791		if (i < 0) {
1792			if (path->p_tree_depth == 0)
1793				break;
1794
1795			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1796							    path,
1797							    &cluster_in_el);
1798			if (ret) {
1799				mlog_errno(ret);
1800				goto out;
1801			}
1802
1803			/*
1804			 * We've reached the leftmost extent block,
1805			 * it's safe to leave.
1806			 */
1807			if (cluster_in_el == 0)
1808				break;
1809
1810			/*
1811			 * The 'pos' searched for previous extent block is
1812			 * always one cluster less than actual trunc_end.
1813			 */
1814			trunc_end = cluster_in_el + 1;
1815
1816			ocfs2_reinit_path(path, 1);
1817
1818			continue;
1819
1820		} else
1821			rec = &el->l_recs[i];
1822
1823		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1824				     &trunc_len, &trunc_end, &blkno, &done);
1825		if (done)
1826			break;
1827
1828		flags = rec->e_flags;
1829		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1830
1831		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1832					       phys_cpos, trunc_len, flags,
1833					       &dealloc, refcount_loc, false);
1834		if (ret < 0) {
1835			mlog_errno(ret);
1836			goto out;
1837		}
1838
1839		cluster_in_el = trunc_end;
1840
1841		ocfs2_reinit_path(path, 1);
1842	}
1843
1844	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1845
1846out:
1847	ocfs2_free_path(path);
1848	ocfs2_schedule_truncate_log_flush(osb, 1);
1849	ocfs2_run_deallocs(osb, &dealloc);
1850
1851	return ret;
1852}
1853
1854/*
1855 * Parts of this function taken from xfs_change_file_space()
1856 */
1857static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1858				     loff_t f_pos, unsigned int cmd,
1859				     struct ocfs2_space_resv *sr,
1860				     int change_size)
1861{
1862	int ret;
1863	s64 llen;
1864	loff_t size;
1865	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1866	struct buffer_head *di_bh = NULL;
1867	handle_t *handle;
1868	unsigned long long max_off = inode->i_sb->s_maxbytes;
1869
1870	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1871		return -EROFS;
1872
1873	inode_lock(inode);
1874
 
 
1875	/*
1876	 * This prevents concurrent writes on other nodes
1877	 */
1878	ret = ocfs2_rw_lock(inode, 1);
1879	if (ret) {
1880		mlog_errno(ret);
1881		goto out;
1882	}
1883
1884	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1885	if (ret) {
1886		mlog_errno(ret);
1887		goto out_rw_unlock;
1888	}
1889
1890	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1891		ret = -EPERM;
1892		goto out_inode_unlock;
1893	}
1894
1895	switch (sr->l_whence) {
1896	case 0: /*SEEK_SET*/
1897		break;
1898	case 1: /*SEEK_CUR*/
1899		sr->l_start += f_pos;
1900		break;
1901	case 2: /*SEEK_END*/
1902		sr->l_start += i_size_read(inode);
1903		break;
1904	default:
1905		ret = -EINVAL;
1906		goto out_inode_unlock;
1907	}
1908	sr->l_whence = 0;
1909
1910	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1911
1912	if (sr->l_start < 0
1913	    || sr->l_start > max_off
1914	    || (sr->l_start + llen) < 0
1915	    || (sr->l_start + llen) > max_off) {
1916		ret = -EINVAL;
1917		goto out_inode_unlock;
1918	}
1919	size = sr->l_start + sr->l_len;
1920
1921	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1922	    cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1923		if (sr->l_len <= 0) {
1924			ret = -EINVAL;
1925			goto out_inode_unlock;
1926		}
1927	}
1928
1929	if (file && should_remove_suid(file->f_path.dentry)) {
1930		ret = __ocfs2_write_remove_suid(inode, di_bh);
1931		if (ret) {
1932			mlog_errno(ret);
1933			goto out_inode_unlock;
1934		}
1935	}
1936
1937	down_write(&OCFS2_I(inode)->ip_alloc_sem);
1938	switch (cmd) {
1939	case OCFS2_IOC_RESVSP:
1940	case OCFS2_IOC_RESVSP64:
1941		/*
1942		 * This takes unsigned offsets, but the signed ones we
1943		 * pass have been checked against overflow above.
1944		 */
1945		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1946						       sr->l_len);
1947		break;
1948	case OCFS2_IOC_UNRESVSP:
1949	case OCFS2_IOC_UNRESVSP64:
1950		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1951					       sr->l_len);
1952		break;
1953	default:
1954		ret = -EINVAL;
1955	}
 
 
 
 
 
 
 
 
 
1956	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1957	if (ret) {
1958		mlog_errno(ret);
1959		goto out_inode_unlock;
1960	}
1961
1962	/*
1963	 * We update c/mtime for these changes
1964	 */
1965	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1966	if (IS_ERR(handle)) {
1967		ret = PTR_ERR(handle);
1968		mlog_errno(ret);
1969		goto out_inode_unlock;
1970	}
1971
1972	if (change_size && i_size_read(inode) < size)
1973		i_size_write(inode, size);
1974
1975	inode->i_ctime = inode->i_mtime = current_time(inode);
1976	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1977	if (ret < 0)
1978		mlog_errno(ret);
1979
1980	if (file && (file->f_flags & O_SYNC))
1981		handle->h_sync = 1;
1982
1983	ocfs2_commit_trans(osb, handle);
1984
1985out_inode_unlock:
1986	brelse(di_bh);
1987	ocfs2_inode_unlock(inode, 1);
1988out_rw_unlock:
1989	ocfs2_rw_unlock(inode, 1);
1990
1991out:
1992	inode_unlock(inode);
1993	return ret;
1994}
1995
1996int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1997			    struct ocfs2_space_resv *sr)
1998{
1999	struct inode *inode = file_inode(file);
2000	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2001	int ret;
2002
2003	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2004	    !ocfs2_writes_unwritten_extents(osb))
2005		return -ENOTTY;
2006	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2007		 !ocfs2_sparse_alloc(osb))
2008		return -ENOTTY;
2009
2010	if (!S_ISREG(inode->i_mode))
2011		return -EINVAL;
2012
2013	if (!(file->f_mode & FMODE_WRITE))
2014		return -EBADF;
2015
2016	ret = mnt_want_write_file(file);
2017	if (ret)
2018		return ret;
2019	ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2020	mnt_drop_write_file(file);
2021	return ret;
2022}
2023
2024static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2025			    loff_t len)
2026{
2027	struct inode *inode = file_inode(file);
2028	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2029	struct ocfs2_space_resv sr;
2030	int change_size = 1;
2031	int cmd = OCFS2_IOC_RESVSP64;
 
2032
2033	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2034		return -EOPNOTSUPP;
2035	if (!ocfs2_writes_unwritten_extents(osb))
2036		return -EOPNOTSUPP;
2037
2038	if (mode & FALLOC_FL_KEEP_SIZE)
2039		change_size = 0;
 
 
 
 
 
2040
2041	if (mode & FALLOC_FL_PUNCH_HOLE)
2042		cmd = OCFS2_IOC_UNRESVSP64;
2043
2044	sr.l_whence = 0;
2045	sr.l_start = (s64)offset;
2046	sr.l_len = (s64)len;
2047
2048	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2049					 change_size);
2050}
2051
2052int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2053				   size_t count)
2054{
2055	int ret = 0;
2056	unsigned int extent_flags;
2057	u32 cpos, clusters, extent_len, phys_cpos;
2058	struct super_block *sb = inode->i_sb;
2059
2060	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2061	    !ocfs2_is_refcount_inode(inode) ||
2062	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2063		return 0;
2064
2065	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2066	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2067
2068	while (clusters) {
2069		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2070					 &extent_flags);
2071		if (ret < 0) {
2072			mlog_errno(ret);
2073			goto out;
2074		}
2075
2076		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2077			ret = 1;
2078			break;
2079		}
2080
2081		if (extent_len > clusters)
2082			extent_len = clusters;
2083
2084		clusters -= extent_len;
2085		cpos += extent_len;
2086	}
2087out:
2088	return ret;
2089}
2090
2091static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2092{
2093	int blockmask = inode->i_sb->s_blocksize - 1;
2094	loff_t final_size = pos + count;
2095
2096	if ((pos & blockmask) || (final_size & blockmask))
2097		return 1;
2098	return 0;
2099}
2100
2101static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2102					    struct buffer_head **di_bh,
2103					    int meta_level,
2104					    int write_sem,
2105					    int wait)
2106{
2107	int ret = 0;
2108
2109	if (wait)
2110		ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2111	else
2112		ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2113	if (ret < 0)
2114		goto out;
2115
2116	if (wait) {
2117		if (write_sem)
2118			down_write(&OCFS2_I(inode)->ip_alloc_sem);
2119		else
2120			down_read(&OCFS2_I(inode)->ip_alloc_sem);
2121	} else {
2122		if (write_sem)
2123			ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2124		else
2125			ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2126
2127		if (!ret) {
2128			ret = -EAGAIN;
2129			goto out_unlock;
2130		}
2131	}
2132
2133	return ret;
2134
2135out_unlock:
2136	brelse(*di_bh);
2137	*di_bh = NULL;
2138	ocfs2_inode_unlock(inode, meta_level);
2139out:
2140	return ret;
2141}
2142
2143static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2144					       struct buffer_head **di_bh,
2145					       int meta_level,
2146					       int write_sem)
2147{
2148	if (write_sem)
2149		up_write(&OCFS2_I(inode)->ip_alloc_sem);
2150	else
2151		up_read(&OCFS2_I(inode)->ip_alloc_sem);
2152
2153	brelse(*di_bh);
2154	*di_bh = NULL;
2155
2156	if (meta_level >= 0)
2157		ocfs2_inode_unlock(inode, meta_level);
2158}
2159
2160static int ocfs2_prepare_inode_for_write(struct file *file,
2161					 loff_t pos, size_t count, int wait)
2162{
2163	int ret = 0, meta_level = 0, overwrite_io = 0;
2164	int write_sem = 0;
2165	struct dentry *dentry = file->f_path.dentry;
2166	struct inode *inode = d_inode(dentry);
2167	struct buffer_head *di_bh = NULL;
2168	u32 cpos;
2169	u32 clusters;
2170
2171	/*
2172	 * We start with a read level meta lock and only jump to an ex
2173	 * if we need to make modifications here.
2174	 */
2175	for(;;) {
2176		ret = ocfs2_inode_lock_for_extent_tree(inode,
2177						       &di_bh,
2178						       meta_level,
2179						       write_sem,
2180						       wait);
2181		if (ret < 0) {
2182			if (ret != -EAGAIN)
2183				mlog_errno(ret);
2184			goto out;
2185		}
2186
2187		/*
2188		 * Check if IO will overwrite allocated blocks in case
2189		 * IOCB_NOWAIT flag is set.
2190		 */
2191		if (!wait && !overwrite_io) {
2192			overwrite_io = 1;
2193
2194			ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2195			if (ret < 0) {
2196				if (ret != -EAGAIN)
2197					mlog_errno(ret);
2198				goto out_unlock;
2199			}
2200		}
2201
2202		/* Clear suid / sgid if necessary. We do this here
2203		 * instead of later in the write path because
2204		 * remove_suid() calls ->setattr without any hint that
2205		 * we may have already done our cluster locking. Since
2206		 * ocfs2_setattr() *must* take cluster locks to
2207		 * proceed, this will lead us to recursively lock the
2208		 * inode. There's also the dinode i_size state which
2209		 * can be lost via setattr during extending writes (we
2210		 * set inode->i_size at the end of a write. */
2211		if (should_remove_suid(dentry)) {
2212			if (meta_level == 0) {
2213				ocfs2_inode_unlock_for_extent_tree(inode,
2214								   &di_bh,
2215								   meta_level,
2216								   write_sem);
2217				meta_level = 1;
2218				continue;
2219			}
2220
2221			ret = ocfs2_write_remove_suid(inode);
2222			if (ret < 0) {
2223				mlog_errno(ret);
2224				goto out_unlock;
2225			}
2226		}
2227
2228		ret = ocfs2_check_range_for_refcount(inode, pos, count);
2229		if (ret == 1) {
2230			ocfs2_inode_unlock_for_extent_tree(inode,
2231							   &di_bh,
2232							   meta_level,
2233							   write_sem);
2234			meta_level = 1;
2235			write_sem = 1;
2236			ret = ocfs2_inode_lock_for_extent_tree(inode,
2237							       &di_bh,
2238							       meta_level,
2239							       write_sem,
2240							       wait);
2241			if (ret < 0) {
2242				if (ret != -EAGAIN)
2243					mlog_errno(ret);
2244				goto out;
2245			}
2246
2247			cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2248			clusters =
2249				ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2250			ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2251		}
2252
2253		if (ret < 0) {
2254			if (ret != -EAGAIN)
2255				mlog_errno(ret);
2256			goto out_unlock;
2257		}
2258
2259		break;
2260	}
2261
2262out_unlock:
2263	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2264					    pos, count, wait);
2265
2266	ocfs2_inode_unlock_for_extent_tree(inode,
2267					   &di_bh,
2268					   meta_level,
2269					   write_sem);
2270
2271out:
2272	return ret;
2273}
2274
2275static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2276				    struct iov_iter *from)
2277{
2278	int rw_level;
2279	ssize_t written = 0;
2280	ssize_t ret;
2281	size_t count = iov_iter_count(from);
2282	struct file *file = iocb->ki_filp;
2283	struct inode *inode = file_inode(file);
2284	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2285	int full_coherency = !(osb->s_mount_opt &
2286			       OCFS2_MOUNT_COHERENCY_BUFFERED);
2287	void *saved_ki_complete = NULL;
2288	int append_write = ((iocb->ki_pos + count) >=
2289			i_size_read(inode) ? 1 : 0);
2290	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2291	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2292
2293	trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2294		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2295		file->f_path.dentry->d_name.len,
2296		file->f_path.dentry->d_name.name,
2297		(unsigned int)from->nr_segs);	/* GRRRRR */
2298
2299	if (!direct_io && nowait)
2300		return -EOPNOTSUPP;
2301
2302	if (count == 0)
2303		return 0;
2304
2305	if (nowait) {
2306		if (!inode_trylock(inode))
2307			return -EAGAIN;
2308	} else
2309		inode_lock(inode);
2310
 
 
2311	/*
2312	 * Concurrent O_DIRECT writes are allowed with
2313	 * mount_option "coherency=buffered".
2314	 * For append write, we must take rw EX.
2315	 */
2316	rw_level = (!direct_io || full_coherency || append_write);
2317
2318	if (nowait)
2319		ret = ocfs2_try_rw_lock(inode, rw_level);
2320	else
2321		ret = ocfs2_rw_lock(inode, rw_level);
2322	if (ret < 0) {
2323		if (ret != -EAGAIN)
2324			mlog_errno(ret);
2325		goto out_mutex;
2326	}
2327
2328	/*
2329	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2330	 * inode_lock to guarantee coherency.
2331	 */
2332	if (direct_io && full_coherency) {
2333		/*
2334		 * We need to take and drop the inode lock to force
2335		 * other nodes to drop their caches.  Buffered I/O
2336		 * already does this in write_begin().
2337		 */
2338		if (nowait)
2339			ret = ocfs2_try_inode_lock(inode, NULL, 1);
2340		else
2341			ret = ocfs2_inode_lock(inode, NULL, 1);
2342		if (ret < 0) {
2343			if (ret != -EAGAIN)
2344				mlog_errno(ret);
2345			goto out;
2346		}
2347
2348		ocfs2_inode_unlock(inode, 1);
2349	}
2350
2351	ret = generic_write_checks(iocb, from);
2352	if (ret <= 0) {
2353		if (ret)
2354			mlog_errno(ret);
2355		goto out;
2356	}
2357	count = ret;
2358
2359	ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2360	if (ret < 0) {
2361		if (ret != -EAGAIN)
2362			mlog_errno(ret);
2363		goto out;
2364	}
2365
2366	if (direct_io && !is_sync_kiocb(iocb) &&
2367	    ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2368		/*
2369		 * Make it a sync io if it's an unaligned aio.
2370		 */
2371		saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2372	}
2373
2374	/* communicate with ocfs2_dio_end_io */
2375	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2376
2377	written = __generic_file_write_iter(iocb, from);
2378	/* buffered aio wouldn't have proper lock coverage today */
2379	BUG_ON(written == -EIOCBQUEUED && !direct_io);
2380
2381	/*
2382	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2383	 * function pointer which is called when o_direct io completes so that
2384	 * it can unlock our rw lock.
2385	 * Unfortunately there are error cases which call end_io and others
2386	 * that don't.  so we don't have to unlock the rw_lock if either an
2387	 * async dio is going to do it in the future or an end_io after an
2388	 * error has already done it.
2389	 */
2390	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2391		rw_level = -1;
2392	}
2393
2394	if (unlikely(written <= 0))
2395		goto out;
2396
2397	if (((file->f_flags & O_DSYNC) && !direct_io) ||
2398	    IS_SYNC(inode)) {
2399		ret = filemap_fdatawrite_range(file->f_mapping,
2400					       iocb->ki_pos - written,
2401					       iocb->ki_pos - 1);
2402		if (ret < 0)
2403			written = ret;
2404
2405		if (!ret) {
2406			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2407			if (ret < 0)
2408				written = ret;
2409		}
2410
2411		if (!ret)
2412			ret = filemap_fdatawait_range(file->f_mapping,
2413						      iocb->ki_pos - written,
2414						      iocb->ki_pos - 1);
2415	}
2416
2417out:
2418	if (saved_ki_complete)
2419		xchg(&iocb->ki_complete, saved_ki_complete);
2420
2421	if (rw_level != -1)
2422		ocfs2_rw_unlock(inode, rw_level);
2423
2424out_mutex:
2425	inode_unlock(inode);
2426
2427	if (written)
2428		ret = written;
2429	return ret;
2430}
2431
2432static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2433				   struct iov_iter *to)
2434{
2435	int ret = 0, rw_level = -1, lock_level = 0;
2436	struct file *filp = iocb->ki_filp;
2437	struct inode *inode = file_inode(filp);
2438	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2439	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2440
2441	trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2442			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2443			filp->f_path.dentry->d_name.len,
2444			filp->f_path.dentry->d_name.name,
2445			to->nr_segs);	/* GRRRRR */
2446
2447
2448	if (!inode) {
2449		ret = -EINVAL;
2450		mlog_errno(ret);
2451		goto bail;
2452	}
2453
2454	if (!direct_io && nowait)
2455		return -EOPNOTSUPP;
2456
 
 
2457	/*
2458	 * buffered reads protect themselves in ->readpage().  O_DIRECT reads
2459	 * need locks to protect pending reads from racing with truncate.
2460	 */
2461	if (direct_io) {
2462		if (nowait)
2463			ret = ocfs2_try_rw_lock(inode, 0);
2464		else
2465			ret = ocfs2_rw_lock(inode, 0);
2466
2467		if (ret < 0) {
2468			if (ret != -EAGAIN)
2469				mlog_errno(ret);
2470			goto bail;
2471		}
2472		rw_level = 0;
2473		/* communicate with ocfs2_dio_end_io */
2474		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2475	}
2476
2477	/*
2478	 * We're fine letting folks race truncates and extending
2479	 * writes with read across the cluster, just like they can
2480	 * locally. Hence no rw_lock during read.
2481	 *
2482	 * Take and drop the meta data lock to update inode fields
2483	 * like i_size. This allows the checks down below
2484	 * generic_file_read_iter() a chance of actually working.
2485	 */
2486	ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2487				     !nowait);
2488	if (ret < 0) {
2489		if (ret != -EAGAIN)
2490			mlog_errno(ret);
2491		goto bail;
2492	}
2493	ocfs2_inode_unlock(inode, lock_level);
2494
2495	ret = generic_file_read_iter(iocb, to);
2496	trace_generic_file_read_iter_ret(ret);
2497
2498	/* buffered aio wouldn't have proper lock coverage today */
2499	BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2500
2501	/* see ocfs2_file_write_iter */
2502	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2503		rw_level = -1;
2504	}
2505
2506bail:
2507	if (rw_level != -1)
2508		ocfs2_rw_unlock(inode, rw_level);
2509
2510	return ret;
2511}
2512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2513/* Refer generic_file_llseek_unlocked() */
2514static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2515{
2516	struct inode *inode = file->f_mapping->host;
2517	int ret = 0;
2518
2519	inode_lock(inode);
2520
2521	switch (whence) {
2522	case SEEK_SET:
2523		break;
2524	case SEEK_END:
2525		/* SEEK_END requires the OCFS2 inode lock for the file
2526		 * because it references the file's size.
2527		 */
2528		ret = ocfs2_inode_lock(inode, NULL, 0);
2529		if (ret < 0) {
2530			mlog_errno(ret);
2531			goto out;
2532		}
2533		offset += i_size_read(inode);
2534		ocfs2_inode_unlock(inode, 0);
2535		break;
2536	case SEEK_CUR:
2537		if (offset == 0) {
2538			offset = file->f_pos;
2539			goto out;
2540		}
2541		offset += file->f_pos;
2542		break;
2543	case SEEK_DATA:
2544	case SEEK_HOLE:
2545		ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2546		if (ret)
2547			goto out;
2548		break;
2549	default:
2550		ret = -EINVAL;
2551		goto out;
2552	}
2553
2554	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2555
2556out:
2557	inode_unlock(inode);
2558	if (ret)
2559		return ret;
2560	return offset;
2561}
2562
2563static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2564				     struct file *file_out, loff_t pos_out,
2565				     loff_t len, unsigned int remap_flags)
2566{
2567	struct inode *inode_in = file_inode(file_in);
2568	struct inode *inode_out = file_inode(file_out);
2569	struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2570	struct buffer_head *in_bh = NULL, *out_bh = NULL;
2571	bool same_inode = (inode_in == inode_out);
2572	loff_t remapped = 0;
2573	ssize_t ret;
2574
2575	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2576		return -EINVAL;
2577	if (!ocfs2_refcount_tree(osb))
2578		return -EOPNOTSUPP;
2579	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
2580		return -EROFS;
2581
2582	/* Lock both files against IO */
2583	ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2584	if (ret)
2585		return ret;
2586
2587	/* Check file eligibility and prepare for block sharing. */
2588	ret = -EINVAL;
2589	if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2590	    (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2591		goto out_unlock;
2592
2593	ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2594			&len, remap_flags);
2595	if (ret < 0 || len == 0)
2596		goto out_unlock;
2597
2598	/* Lock out changes to the allocation maps and remap. */
2599	down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2600	if (!same_inode)
2601		down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2602				  SINGLE_DEPTH_NESTING);
2603
2604	/* Zap any page cache for the destination file's range. */
2605	truncate_inode_pages_range(&inode_out->i_data,
2606				   round_down(pos_out, PAGE_SIZE),
2607				   round_up(pos_out + len, PAGE_SIZE) - 1);
2608
2609	remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2610			inode_out, out_bh, pos_out, len);
2611	up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2612	if (!same_inode)
2613		up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2614	if (remapped < 0) {
2615		ret = remapped;
2616		mlog_errno(ret);
2617		goto out_unlock;
2618	}
2619
2620	/*
2621	 * Empty the extent map so that we may get the right extent
2622	 * record from the disk.
2623	 */
2624	ocfs2_extent_map_trunc(inode_in, 0);
2625	ocfs2_extent_map_trunc(inode_out, 0);
2626
2627	ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2628	if (ret) {
2629		mlog_errno(ret);
2630		goto out_unlock;
2631	}
2632
2633out_unlock:
2634	ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2635	return remapped > 0 ? remapped : ret;
2636}
2637
 
 
 
 
 
 
 
2638const struct inode_operations ocfs2_file_iops = {
2639	.setattr	= ocfs2_setattr,
2640	.getattr	= ocfs2_getattr,
2641	.permission	= ocfs2_permission,
2642	.listxattr	= ocfs2_listxattr,
2643	.fiemap		= ocfs2_fiemap,
2644	.get_acl	= ocfs2_iop_get_acl,
2645	.set_acl	= ocfs2_iop_set_acl,
 
 
2646};
2647
2648const struct inode_operations ocfs2_special_file_iops = {
2649	.setattr	= ocfs2_setattr,
2650	.getattr	= ocfs2_getattr,
 
2651	.permission	= ocfs2_permission,
2652	.get_acl	= ocfs2_iop_get_acl,
2653	.set_acl	= ocfs2_iop_set_acl,
2654};
2655
2656/*
2657 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2658 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2659 */
2660const struct file_operations ocfs2_fops = {
2661	.llseek		= ocfs2_file_llseek,
2662	.mmap		= ocfs2_mmap,
2663	.fsync		= ocfs2_sync_file,
2664	.release	= ocfs2_file_release,
2665	.open		= ocfs2_file_open,
2666	.read_iter	= ocfs2_file_read_iter,
2667	.write_iter	= ocfs2_file_write_iter,
2668	.unlocked_ioctl	= ocfs2_ioctl,
2669#ifdef CONFIG_COMPAT
2670	.compat_ioctl   = ocfs2_compat_ioctl,
2671#endif
2672	.lock		= ocfs2_lock,
2673	.flock		= ocfs2_flock,
2674	.splice_read	= generic_file_splice_read,
2675	.splice_write	= iter_file_splice_write,
2676	.fallocate	= ocfs2_fallocate,
2677	.remap_file_range = ocfs2_remap_file_range,
 
2678};
2679
 
2680const struct file_operations ocfs2_dops = {
2681	.llseek		= generic_file_llseek,
2682	.read		= generic_read_dir,
2683	.iterate	= ocfs2_readdir,
2684	.fsync		= ocfs2_sync_file,
2685	.release	= ocfs2_dir_release,
2686	.open		= ocfs2_dir_open,
2687	.unlocked_ioctl	= ocfs2_ioctl,
2688#ifdef CONFIG_COMPAT
2689	.compat_ioctl   = ocfs2_compat_ioctl,
2690#endif
2691	.lock		= ocfs2_lock,
2692	.flock		= ocfs2_flock,
 
2693};
2694
2695/*
2696 * POSIX-lockless variants of our file_operations.
2697 *
2698 * These will be used if the underlying cluster stack does not support
2699 * posix file locking, if the user passes the "localflocks" mount
2700 * option, or if we have a local-only fs.
2701 *
2702 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2703 * so we still want it in the case of no stack support for
2704 * plocks. Internally, it will do the right thing when asked to ignore
2705 * the cluster.
2706 */
2707const struct file_operations ocfs2_fops_no_plocks = {
2708	.llseek		= ocfs2_file_llseek,
2709	.mmap		= ocfs2_mmap,
2710	.fsync		= ocfs2_sync_file,
2711	.release	= ocfs2_file_release,
2712	.open		= ocfs2_file_open,
2713	.read_iter	= ocfs2_file_read_iter,
2714	.write_iter	= ocfs2_file_write_iter,
2715	.unlocked_ioctl	= ocfs2_ioctl,
2716#ifdef CONFIG_COMPAT
2717	.compat_ioctl   = ocfs2_compat_ioctl,
2718#endif
2719	.flock		= ocfs2_flock,
2720	.splice_read	= generic_file_splice_read,
2721	.splice_write	= iter_file_splice_write,
2722	.fallocate	= ocfs2_fallocate,
2723	.remap_file_range = ocfs2_remap_file_range,
2724};
2725
2726const struct file_operations ocfs2_dops_no_plocks = {
2727	.llseek		= generic_file_llseek,
2728	.read		= generic_read_dir,
2729	.iterate	= ocfs2_readdir,
2730	.fsync		= ocfs2_sync_file,
2731	.release	= ocfs2_dir_release,
2732	.open		= ocfs2_dir_open,
2733	.unlocked_ioctl	= ocfs2_ioctl,
2734#ifdef CONFIG_COMPAT
2735	.compat_ioctl   = ocfs2_compat_ioctl,
2736#endif
2737	.flock		= ocfs2_flock,
2738};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
 
 
   3 * file.c
   4 *
   5 * File open, close, extend, truncate
   6 *
   7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/fs.h>
  12#include <linux/types.h>
  13#include <linux/slab.h>
  14#include <linux/highmem.h>
  15#include <linux/pagemap.h>
  16#include <linux/uio.h>
  17#include <linux/sched.h>
  18#include <linux/splice.h>
  19#include <linux/mount.h>
  20#include <linux/writeback.h>
  21#include <linux/falloc.h>
  22#include <linux/quotaops.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25
  26#include <cluster/masklog.h>
  27
  28#include "ocfs2.h"
  29
  30#include "alloc.h"
  31#include "aops.h"
  32#include "dir.h"
  33#include "dlmglue.h"
  34#include "extent_map.h"
  35#include "file.h"
  36#include "sysfile.h"
  37#include "inode.h"
  38#include "ioctl.h"
  39#include "journal.h"
  40#include "locks.h"
  41#include "mmap.h"
  42#include "suballoc.h"
  43#include "super.h"
  44#include "xattr.h"
  45#include "acl.h"
  46#include "quota.h"
  47#include "refcounttree.h"
  48#include "ocfs2_trace.h"
  49
  50#include "buffer_head_io.h"
  51
  52static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  53{
  54	struct ocfs2_file_private *fp;
  55
  56	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  57	if (!fp)
  58		return -ENOMEM;
  59
  60	fp->fp_file = file;
  61	mutex_init(&fp->fp_mutex);
  62	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  63	file->private_data = fp;
  64
  65	return 0;
  66}
  67
  68static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  69{
  70	struct ocfs2_file_private *fp = file->private_data;
  71	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  72
  73	if (fp) {
  74		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  75		ocfs2_lock_res_free(&fp->fp_flock);
  76		kfree(fp);
  77		file->private_data = NULL;
  78	}
  79}
  80
  81static int ocfs2_file_open(struct inode *inode, struct file *file)
  82{
  83	int status;
  84	int mode = file->f_flags;
  85	struct ocfs2_inode_info *oi = OCFS2_I(inode);
  86
  87	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
  88			      (unsigned long long)oi->ip_blkno,
  89			      file->f_path.dentry->d_name.len,
  90			      file->f_path.dentry->d_name.name, mode);
  91
  92	if (file->f_mode & FMODE_WRITE) {
  93		status = dquot_initialize(inode);
  94		if (status)
  95			goto leave;
  96	}
  97
  98	spin_lock(&oi->ip_lock);
  99
 100	/* Check that the inode hasn't been wiped from disk by another
 101	 * node. If it hasn't then we're safe as long as we hold the
 102	 * spin lock until our increment of open count. */
 103	if (oi->ip_flags & OCFS2_INODE_DELETED) {
 104		spin_unlock(&oi->ip_lock);
 105
 106		status = -ENOENT;
 107		goto leave;
 108	}
 109
 110	if (mode & O_DIRECT)
 111		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
 112
 113	oi->ip_open_count++;
 114	spin_unlock(&oi->ip_lock);
 115
 116	status = ocfs2_init_file_private(inode, file);
 117	if (status) {
 118		/*
 119		 * We want to set open count back if we're failing the
 120		 * open.
 121		 */
 122		spin_lock(&oi->ip_lock);
 123		oi->ip_open_count--;
 124		spin_unlock(&oi->ip_lock);
 125	}
 126
 127	file->f_mode |= FMODE_NOWAIT;
 128
 129leave:
 130	return status;
 131}
 132
 133static int ocfs2_file_release(struct inode *inode, struct file *file)
 134{
 135	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 136
 137	spin_lock(&oi->ip_lock);
 138	if (!--oi->ip_open_count)
 139		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
 140
 141	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
 142				 oi->ip_blkno,
 143				 file->f_path.dentry->d_name.len,
 144				 file->f_path.dentry->d_name.name,
 145				 oi->ip_open_count);
 146	spin_unlock(&oi->ip_lock);
 147
 148	ocfs2_free_file_private(inode, file);
 149
 150	return 0;
 151}
 152
 153static int ocfs2_dir_open(struct inode *inode, struct file *file)
 154{
 155	return ocfs2_init_file_private(inode, file);
 156}
 157
 158static int ocfs2_dir_release(struct inode *inode, struct file *file)
 159{
 160	ocfs2_free_file_private(inode, file);
 161	return 0;
 162}
 163
 164static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
 165			   int datasync)
 166{
 167	int err = 0;
 168	struct inode *inode = file->f_mapping->host;
 169	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 170	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 171	journal_t *journal = osb->journal->j_journal;
 172	int ret;
 173	tid_t commit_tid;
 174	bool needs_barrier = false;
 175
 176	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
 177			      oi->ip_blkno,
 178			      file->f_path.dentry->d_name.len,
 179			      file->f_path.dentry->d_name.name,
 180			      (unsigned long long)datasync);
 181
 182	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 183		return -EROFS;
 184
 185	err = file_write_and_wait_range(file, start, end);
 186	if (err)
 187		return err;
 188
 189	commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
 190	if (journal->j_flags & JBD2_BARRIER &&
 191	    !jbd2_trans_will_send_data_barrier(journal, commit_tid))
 192		needs_barrier = true;
 193	err = jbd2_complete_transaction(journal, commit_tid);
 194	if (needs_barrier) {
 195		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
 196		if (!err)
 197			err = ret;
 198	}
 199
 200	if (err)
 201		mlog_errno(err);
 202
 203	return (err < 0) ? -EIO : 0;
 204}
 205
 206int ocfs2_should_update_atime(struct inode *inode,
 207			      struct vfsmount *vfsmnt)
 208{
 209	struct timespec64 now;
 210	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 211
 212	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 213		return 0;
 214
 215	if ((inode->i_flags & S_NOATIME) ||
 216	    ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
 217		return 0;
 218
 219	/*
 220	 * We can be called with no vfsmnt structure - NFSD will
 221	 * sometimes do this.
 222	 *
 223	 * Note that our action here is different than touch_atime() -
 224	 * if we can't tell whether this is a noatime mount, then we
 225	 * don't know whether to trust the value of s_atime_quantum.
 226	 */
 227	if (vfsmnt == NULL)
 228		return 0;
 229
 230	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
 231	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
 232		return 0;
 233
 234	if (vfsmnt->mnt_flags & MNT_RELATIME) {
 235		struct timespec64 ctime = inode_get_ctime(inode);
 236		struct timespec64 atime = inode_get_atime(inode);
 237		struct timespec64 mtime = inode_get_mtime(inode);
 238
 239		if ((timespec64_compare(&atime, &mtime) <= 0) ||
 240		    (timespec64_compare(&atime, &ctime) <= 0))
 241			return 1;
 242
 243		return 0;
 244	}
 245
 246	now = current_time(inode);
 247	if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
 248		return 0;
 249	else
 250		return 1;
 251}
 252
 253int ocfs2_update_inode_atime(struct inode *inode,
 254			     struct buffer_head *bh)
 255{
 256	int ret;
 257	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 258	handle_t *handle;
 259	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
 260
 261	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 262	if (IS_ERR(handle)) {
 263		ret = PTR_ERR(handle);
 264		mlog_errno(ret);
 265		goto out;
 266	}
 267
 268	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 269				      OCFS2_JOURNAL_ACCESS_WRITE);
 270	if (ret) {
 271		mlog_errno(ret);
 272		goto out_commit;
 273	}
 274
 275	/*
 276	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
 277	 * have i_rwsem to guard against concurrent changes to other
 278	 * inode fields.
 279	 */
 280	inode_set_atime_to_ts(inode, current_time(inode));
 281	di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
 282	di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
 283	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 284	ocfs2_journal_dirty(handle, bh);
 285
 286out_commit:
 287	ocfs2_commit_trans(osb, handle);
 288out:
 289	return ret;
 290}
 291
 292int ocfs2_set_inode_size(handle_t *handle,
 293				struct inode *inode,
 294				struct buffer_head *fe_bh,
 295				u64 new_i_size)
 296{
 297	int status;
 298
 299	i_size_write(inode, new_i_size);
 300	inode->i_blocks = ocfs2_inode_sector_count(inode);
 301	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 302
 303	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 304	if (status < 0) {
 305		mlog_errno(status);
 306		goto bail;
 307	}
 308
 309bail:
 310	return status;
 311}
 312
 313int ocfs2_simple_size_update(struct inode *inode,
 314			     struct buffer_head *di_bh,
 315			     u64 new_i_size)
 316{
 317	int ret;
 318	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 319	handle_t *handle = NULL;
 320
 321	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 322	if (IS_ERR(handle)) {
 323		ret = PTR_ERR(handle);
 324		mlog_errno(ret);
 325		goto out;
 326	}
 327
 328	ret = ocfs2_set_inode_size(handle, inode, di_bh,
 329				   new_i_size);
 330	if (ret < 0)
 331		mlog_errno(ret);
 332
 333	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 334	ocfs2_commit_trans(osb, handle);
 335out:
 336	return ret;
 337}
 338
 339static int ocfs2_cow_file_pos(struct inode *inode,
 340			      struct buffer_head *fe_bh,
 341			      u64 offset)
 342{
 343	int status;
 344	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 345	unsigned int num_clusters = 0;
 346	unsigned int ext_flags = 0;
 347
 348	/*
 349	 * If the new offset is aligned to the range of the cluster, there is
 350	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
 351	 * CoW either.
 352	 */
 353	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
 354		return 0;
 355
 356	status = ocfs2_get_clusters(inode, cpos, &phys,
 357				    &num_clusters, &ext_flags);
 358	if (status) {
 359		mlog_errno(status);
 360		goto out;
 361	}
 362
 363	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
 364		goto out;
 365
 366	return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
 367
 368out:
 369	return status;
 370}
 371
 372static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
 373				     struct inode *inode,
 374				     struct buffer_head *fe_bh,
 375				     u64 new_i_size)
 376{
 377	int status;
 378	handle_t *handle;
 379	struct ocfs2_dinode *di;
 380	u64 cluster_bytes;
 381
 382	/*
 383	 * We need to CoW the cluster contains the offset if it is reflinked
 384	 * since we will call ocfs2_zero_range_for_truncate later which will
 385	 * write "0" from offset to the end of the cluster.
 386	 */
 387	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
 388	if (status) {
 389		mlog_errno(status);
 390		return status;
 391	}
 392
 393	/* TODO: This needs to actually orphan the inode in this
 394	 * transaction. */
 395
 396	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 397	if (IS_ERR(handle)) {
 398		status = PTR_ERR(handle);
 399		mlog_errno(status);
 400		goto out;
 401	}
 402
 403	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
 404					 OCFS2_JOURNAL_ACCESS_WRITE);
 405	if (status < 0) {
 406		mlog_errno(status);
 407		goto out_commit;
 408	}
 409
 410	/*
 411	 * Do this before setting i_size.
 412	 */
 413	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
 414	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
 415					       cluster_bytes);
 416	if (status) {
 417		mlog_errno(status);
 418		goto out_commit;
 419	}
 420
 421	i_size_write(inode, new_i_size);
 422	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 423
 424	di = (struct ocfs2_dinode *) fe_bh->b_data;
 425	di->i_size = cpu_to_le64(new_i_size);
 426	di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
 427	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
 428	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 429
 430	ocfs2_journal_dirty(handle, fe_bh);
 431
 432out_commit:
 433	ocfs2_commit_trans(osb, handle);
 434out:
 435	return status;
 436}
 437
 438int ocfs2_truncate_file(struct inode *inode,
 439			       struct buffer_head *di_bh,
 440			       u64 new_i_size)
 441{
 442	int status = 0;
 443	struct ocfs2_dinode *fe = NULL;
 444	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 445
 446	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
 447	 * already validated it */
 448	fe = (struct ocfs2_dinode *) di_bh->b_data;
 449
 450	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
 451				  (unsigned long long)le64_to_cpu(fe->i_size),
 452				  (unsigned long long)new_i_size);
 453
 454	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
 455			"Inode %llu, inode i_size = %lld != di "
 456			"i_size = %llu, i_flags = 0x%x\n",
 457			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 458			i_size_read(inode),
 459			(unsigned long long)le64_to_cpu(fe->i_size),
 460			le32_to_cpu(fe->i_flags));
 461
 462	if (new_i_size > le64_to_cpu(fe->i_size)) {
 463		trace_ocfs2_truncate_file_error(
 464			(unsigned long long)le64_to_cpu(fe->i_size),
 465			(unsigned long long)new_i_size);
 466		status = -EINVAL;
 467		mlog_errno(status);
 468		goto bail;
 469	}
 470
 471	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 472
 473	ocfs2_resv_discard(&osb->osb_la_resmap,
 474			   &OCFS2_I(inode)->ip_la_data_resv);
 475
 476	/*
 477	 * The inode lock forced other nodes to sync and drop their
 478	 * pages, which (correctly) happens even if we have a truncate
 479	 * without allocation change - ocfs2 cluster sizes can be much
 480	 * greater than page size, so we have to truncate them
 481	 * anyway.
 482	 */
 
 
 483
 484	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 485		unmap_mapping_range(inode->i_mapping,
 486				    new_i_size + PAGE_SIZE - 1, 0, 1);
 487		truncate_inode_pages(inode->i_mapping, new_i_size);
 488		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
 489					       i_size_read(inode), 1);
 490		if (status)
 491			mlog_errno(status);
 492
 493		goto bail_unlock_sem;
 494	}
 495
 496	/* alright, we're going to need to do a full blown alloc size
 497	 * change. Orphan the inode so that recovery can complete the
 498	 * truncate if necessary. This does the task of marking
 499	 * i_size. */
 500	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
 501	if (status < 0) {
 502		mlog_errno(status);
 503		goto bail_unlock_sem;
 504	}
 505
 506	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
 507	truncate_inode_pages(inode->i_mapping, new_i_size);
 508
 509	status = ocfs2_commit_truncate(osb, inode, di_bh);
 510	if (status < 0) {
 511		mlog_errno(status);
 512		goto bail_unlock_sem;
 513	}
 514
 515	/* TODO: orphan dir cleanup here. */
 516bail_unlock_sem:
 517	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 518
 519bail:
 520	if (!status && OCFS2_I(inode)->ip_clusters == 0)
 521		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
 522
 523	return status;
 524}
 525
 526/*
 527 * extend file allocation only here.
 528 * we'll update all the disk stuff, and oip->alloc_size
 529 *
 530 * expect stuff to be locked, a transaction started and enough data /
 531 * metadata reservations in the contexts.
 532 *
 533 * Will return -EAGAIN, and a reason if a restart is needed.
 534 * If passed in, *reason will always be set, even in error.
 535 */
 536int ocfs2_add_inode_data(struct ocfs2_super *osb,
 537			 struct inode *inode,
 538			 u32 *logical_offset,
 539			 u32 clusters_to_add,
 540			 int mark_unwritten,
 541			 struct buffer_head *fe_bh,
 542			 handle_t *handle,
 543			 struct ocfs2_alloc_context *data_ac,
 544			 struct ocfs2_alloc_context *meta_ac,
 545			 enum ocfs2_alloc_restarted *reason_ret)
 546{
 
 547	struct ocfs2_extent_tree et;
 548
 549	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
 550	return ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
 551					   clusters_to_add, mark_unwritten,
 552					   data_ac, meta_ac, reason_ret);
 
 
 553}
 554
 555static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
 556				   u32 clusters_to_add, int mark_unwritten)
 557{
 558	int status = 0;
 559	int restart_func = 0;
 560	int credits;
 561	u32 prev_clusters;
 562	struct buffer_head *bh = NULL;
 563	struct ocfs2_dinode *fe = NULL;
 564	handle_t *handle = NULL;
 565	struct ocfs2_alloc_context *data_ac = NULL;
 566	struct ocfs2_alloc_context *meta_ac = NULL;
 567	enum ocfs2_alloc_restarted why = RESTART_NONE;
 568	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 569	struct ocfs2_extent_tree et;
 570	int did_quota = 0;
 571
 572	/*
 573	 * Unwritten extent only exists for file systems which
 574	 * support holes.
 575	 */
 576	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
 577
 578	status = ocfs2_read_inode_block(inode, &bh);
 579	if (status < 0) {
 580		mlog_errno(status);
 581		goto leave;
 582	}
 583	fe = (struct ocfs2_dinode *) bh->b_data;
 584
 585restart_all:
 586	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 587
 588	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
 589	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
 590				       &data_ac, &meta_ac);
 591	if (status) {
 592		mlog_errno(status);
 593		goto leave;
 594	}
 595
 596	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
 597	handle = ocfs2_start_trans(osb, credits);
 598	if (IS_ERR(handle)) {
 599		status = PTR_ERR(handle);
 600		handle = NULL;
 601		mlog_errno(status);
 602		goto leave;
 603	}
 604
 605restarted_transaction:
 606	trace_ocfs2_extend_allocation(
 607		(unsigned long long)OCFS2_I(inode)->ip_blkno,
 608		(unsigned long long)i_size_read(inode),
 609		le32_to_cpu(fe->i_clusters), clusters_to_add,
 610		why, restart_func);
 611
 612	status = dquot_alloc_space_nodirty(inode,
 613			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 614	if (status)
 615		goto leave;
 616	did_quota = 1;
 617
 618	/* reserve a write to the file entry early on - that we if we
 619	 * run out of credits in the allocation path, we can still
 620	 * update i_size. */
 621	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 622					 OCFS2_JOURNAL_ACCESS_WRITE);
 623	if (status < 0) {
 624		mlog_errno(status);
 625		goto leave;
 626	}
 627
 628	prev_clusters = OCFS2_I(inode)->ip_clusters;
 629
 630	status = ocfs2_add_inode_data(osb,
 631				      inode,
 632				      &logical_start,
 633				      clusters_to_add,
 634				      mark_unwritten,
 635				      bh,
 636				      handle,
 637				      data_ac,
 638				      meta_ac,
 639				      &why);
 640	if ((status < 0) && (status != -EAGAIN)) {
 641		if (status != -ENOSPC)
 642			mlog_errno(status);
 643		goto leave;
 644	}
 645	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 646	ocfs2_journal_dirty(handle, bh);
 647
 648	spin_lock(&OCFS2_I(inode)->ip_lock);
 649	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
 650	spin_unlock(&OCFS2_I(inode)->ip_lock);
 651	/* Release unused quota reservation */
 652	dquot_free_space(inode,
 653			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 654	did_quota = 0;
 655
 656	if (why != RESTART_NONE && clusters_to_add) {
 657		if (why == RESTART_META) {
 658			restart_func = 1;
 659			status = 0;
 660		} else {
 661			BUG_ON(why != RESTART_TRANS);
 662
 663			status = ocfs2_allocate_extend_trans(handle, 1);
 664			if (status < 0) {
 665				/* handle still has to be committed at
 666				 * this point. */
 667				status = -ENOMEM;
 668				mlog_errno(status);
 669				goto leave;
 670			}
 671			goto restarted_transaction;
 672		}
 673	}
 674
 675	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
 676	     le32_to_cpu(fe->i_clusters),
 677	     (unsigned long long)le64_to_cpu(fe->i_size),
 678	     OCFS2_I(inode)->ip_clusters,
 679	     (unsigned long long)i_size_read(inode));
 680
 681leave:
 682	if (status < 0 && did_quota)
 683		dquot_free_space(inode,
 684			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 685	if (handle) {
 686		ocfs2_commit_trans(osb, handle);
 687		handle = NULL;
 688	}
 689	if (data_ac) {
 690		ocfs2_free_alloc_context(data_ac);
 691		data_ac = NULL;
 692	}
 693	if (meta_ac) {
 694		ocfs2_free_alloc_context(meta_ac);
 695		meta_ac = NULL;
 696	}
 697	if ((!status) && restart_func) {
 698		restart_func = 0;
 699		goto restart_all;
 700	}
 701	brelse(bh);
 702	bh = NULL;
 703
 704	return status;
 705}
 706
 707/*
 708 * While a write will already be ordering the data, a truncate will not.
 709 * Thus, we need to explicitly order the zeroed pages.
 710 */
 711static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
 712						      struct buffer_head *di_bh,
 713						      loff_t start_byte,
 714						      loff_t length)
 715{
 716	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 717	handle_t *handle = NULL;
 718	int ret = 0;
 719
 720	if (!ocfs2_should_order_data(inode))
 721		goto out;
 722
 723	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 724	if (IS_ERR(handle)) {
 725		ret = -ENOMEM;
 726		mlog_errno(ret);
 727		goto out;
 728	}
 729
 730	ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
 731	if (ret < 0) {
 732		mlog_errno(ret);
 733		goto out;
 734	}
 735
 736	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 737				      OCFS2_JOURNAL_ACCESS_WRITE);
 738	if (ret)
 739		mlog_errno(ret);
 740	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 741
 742out:
 743	if (ret) {
 744		if (!IS_ERR(handle))
 745			ocfs2_commit_trans(osb, handle);
 746		handle = ERR_PTR(ret);
 747	}
 748	return handle;
 749}
 750
 751/* Some parts of this taken from generic_cont_expand, which turned out
 752 * to be too fragile to do exactly what we need without us having to
 753 * worry about recursive locking in ->write_begin() and ->write_end(). */
 754static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 755				 u64 abs_to, struct buffer_head *di_bh)
 756{
 757	struct address_space *mapping = inode->i_mapping;
 758	struct folio *folio;
 759	unsigned long index = abs_from >> PAGE_SHIFT;
 760	handle_t *handle;
 761	int ret = 0;
 762	unsigned zero_from, zero_to, block_start, block_end;
 763	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 764
 765	BUG_ON(abs_from >= abs_to);
 766	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
 767	BUG_ON(abs_from & (inode->i_blkbits - 1));
 768
 769	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
 770						      abs_from,
 771						      abs_to - abs_from);
 772	if (IS_ERR(handle)) {
 773		ret = PTR_ERR(handle);
 774		goto out;
 775	}
 776
 777	folio = __filemap_get_folio(mapping, index,
 778			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
 779	if (IS_ERR(folio)) {
 780		ret = PTR_ERR(folio);
 781		mlog_errno(ret);
 782		goto out_commit_trans;
 783	}
 784
 785	/* Get the offsets within the page that we want to zero */
 786	zero_from = abs_from & (PAGE_SIZE - 1);
 787	zero_to = abs_to & (PAGE_SIZE - 1);
 788	if (!zero_to)
 789		zero_to = PAGE_SIZE;
 790
 791	trace_ocfs2_write_zero_page(
 792			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 793			(unsigned long long)abs_from,
 794			(unsigned long long)abs_to,
 795			index, zero_from, zero_to);
 796
 797	/* We know that zero_from is block aligned */
 798	for (block_start = zero_from; block_start < zero_to;
 799	     block_start = block_end) {
 800		block_end = block_start + i_blocksize(inode);
 801
 802		/*
 803		 * block_start is block-aligned.  Bump it by one to force
 804		 * __block_write_begin and block_commit_write to zero the
 805		 * whole block.
 806		 */
 807		ret = __block_write_begin(folio, block_start + 1, 0,
 808					  ocfs2_get_block);
 809		if (ret < 0) {
 810			mlog_errno(ret);
 811			goto out_unlock;
 812		}
 813
 814
 815		/* must not update i_size! */
 816		block_commit_write(&folio->page, block_start + 1, block_start + 1);
 
 
 
 
 
 817	}
 818
 819	/*
 820	 * fs-writeback will release the dirty pages without page lock
 821	 * whose offset are over inode size, the release happens at
 822	 * block_write_full_folio().
 823	 */
 824	i_size_write(inode, abs_to);
 825	inode->i_blocks = ocfs2_inode_sector_count(inode);
 826	di->i_size = cpu_to_le64((u64)i_size_read(inode));
 827	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 828	di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
 829	di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
 830	di->i_mtime_nsec = di->i_ctime_nsec;
 831	if (handle) {
 832		ocfs2_journal_dirty(handle, di_bh);
 833		ocfs2_update_inode_fsync_trans(handle, inode, 1);
 834	}
 835
 836out_unlock:
 837	folio_unlock(folio);
 838	folio_put(folio);
 839out_commit_trans:
 840	if (handle)
 841		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 842out:
 843	return ret;
 844}
 845
 846/*
 847 * Find the next range to zero.  We do this in terms of bytes because
 848 * that's what ocfs2_zero_extend() wants, and it is dealing with the
 849 * pagecache.  We may return multiple extents.
 850 *
 851 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
 852 * needs to be zeroed.  range_start and range_end return the next zeroing
 853 * range.  A subsequent call should pass the previous range_end as its
 854 * zero_start.  If range_end is 0, there's nothing to do.
 855 *
 856 * Unwritten extents are skipped over.  Refcounted extents are CoWd.
 857 */
 858static int ocfs2_zero_extend_get_range(struct inode *inode,
 859				       struct buffer_head *di_bh,
 860				       u64 zero_start, u64 zero_end,
 861				       u64 *range_start, u64 *range_end)
 862{
 863	int rc = 0, needs_cow = 0;
 864	u32 p_cpos, zero_clusters = 0;
 865	u32 zero_cpos =
 866		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 867	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
 868	unsigned int num_clusters = 0;
 869	unsigned int ext_flags = 0;
 870
 871	while (zero_cpos < last_cpos) {
 872		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
 873					&num_clusters, &ext_flags);
 874		if (rc) {
 875			mlog_errno(rc);
 876			goto out;
 877		}
 878
 879		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
 880			zero_clusters = num_clusters;
 881			if (ext_flags & OCFS2_EXT_REFCOUNTED)
 882				needs_cow = 1;
 883			break;
 884		}
 885
 886		zero_cpos += num_clusters;
 887	}
 888	if (!zero_clusters) {
 889		*range_end = 0;
 890		goto out;
 891	}
 892
 893	while ((zero_cpos + zero_clusters) < last_cpos) {
 894		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
 895					&p_cpos, &num_clusters,
 896					&ext_flags);
 897		if (rc) {
 898			mlog_errno(rc);
 899			goto out;
 900		}
 901
 902		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
 903			break;
 904		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 905			needs_cow = 1;
 906		zero_clusters += num_clusters;
 907	}
 908	if ((zero_cpos + zero_clusters) > last_cpos)
 909		zero_clusters = last_cpos - zero_cpos;
 910
 911	if (needs_cow) {
 912		rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
 913					zero_clusters, UINT_MAX);
 914		if (rc) {
 915			mlog_errno(rc);
 916			goto out;
 917		}
 918	}
 919
 920	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
 921	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
 922					     zero_cpos + zero_clusters);
 923
 924out:
 925	return rc;
 926}
 927
 928/*
 929 * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
 930 * has made sure that the entire range needs zeroing.
 931 */
 932static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
 933				   u64 range_end, struct buffer_head *di_bh)
 934{
 935	int rc = 0;
 936	u64 next_pos;
 937	u64 zero_pos = range_start;
 938
 939	trace_ocfs2_zero_extend_range(
 940			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 941			(unsigned long long)range_start,
 942			(unsigned long long)range_end);
 943	BUG_ON(range_start >= range_end);
 944
 945	while (zero_pos < range_end) {
 946		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
 947		if (next_pos > range_end)
 948			next_pos = range_end;
 949		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
 950		if (rc < 0) {
 951			mlog_errno(rc);
 952			break;
 953		}
 954		zero_pos = next_pos;
 955
 956		/*
 957		 * Very large extends have the potential to lock up
 958		 * the cpu for extended periods of time.
 959		 */
 960		cond_resched();
 961	}
 962
 963	return rc;
 964}
 965
 966int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
 967		      loff_t zero_to_size)
 968{
 969	int ret = 0;
 970	u64 zero_start, range_start = 0, range_end = 0;
 971	struct super_block *sb = inode->i_sb;
 972
 973	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
 974	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
 975				(unsigned long long)zero_start,
 976				(unsigned long long)i_size_read(inode));
 977	while (zero_start < zero_to_size) {
 978		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
 979						  zero_to_size,
 980						  &range_start,
 981						  &range_end);
 982		if (ret) {
 983			mlog_errno(ret);
 984			break;
 985		}
 986		if (!range_end)
 987			break;
 988		/* Trim the ends */
 989		if (range_start < zero_start)
 990			range_start = zero_start;
 991		if (range_end > zero_to_size)
 992			range_end = zero_to_size;
 993
 994		ret = ocfs2_zero_extend_range(inode, range_start,
 995					      range_end, di_bh);
 996		if (ret) {
 997			mlog_errno(ret);
 998			break;
 999		}
1000		zero_start = range_end;
1001	}
1002
1003	return ret;
1004}
1005
1006int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1007			  u64 new_i_size, u64 zero_to)
1008{
1009	int ret;
1010	u32 clusters_to_add;
1011	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1012
1013	/*
1014	 * Only quota files call this without a bh, and they can't be
1015	 * refcounted.
1016	 */
1017	BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1018	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1019
1020	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1021	if (clusters_to_add < oi->ip_clusters)
1022		clusters_to_add = 0;
1023	else
1024		clusters_to_add -= oi->ip_clusters;
1025
1026	if (clusters_to_add) {
1027		ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1028					      clusters_to_add, 0);
1029		if (ret) {
1030			mlog_errno(ret);
1031			goto out;
1032		}
1033	}
1034
1035	/*
1036	 * Call this even if we don't add any clusters to the tree. We
1037	 * still need to zero the area between the old i_size and the
1038	 * new i_size.
1039	 */
1040	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1041	if (ret < 0)
1042		mlog_errno(ret);
1043
1044out:
1045	return ret;
1046}
1047
1048static int ocfs2_extend_file(struct inode *inode,
1049			     struct buffer_head *di_bh,
1050			     u64 new_i_size)
1051{
1052	int ret = 0;
1053	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1054
1055	BUG_ON(!di_bh);
1056
1057	/* setattr sometimes calls us like this. */
1058	if (new_i_size == 0)
1059		goto out;
1060
1061	if (i_size_read(inode) == new_i_size)
1062		goto out;
1063	BUG_ON(new_i_size < i_size_read(inode));
1064
1065	/*
1066	 * The alloc sem blocks people in read/write from reading our
1067	 * allocation until we're done changing it. We depend on
1068	 * i_rwsem to block other extend/truncate calls while we're
1069	 * here.  We even have to hold it for sparse files because there
1070	 * might be some tail zeroing.
1071	 */
1072	down_write(&oi->ip_alloc_sem);
1073
1074	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1075		/*
1076		 * We can optimize small extends by keeping the inodes
1077		 * inline data.
1078		 */
1079		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1080			up_write(&oi->ip_alloc_sem);
1081			goto out_update_size;
1082		}
1083
1084		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1085		if (ret) {
1086			up_write(&oi->ip_alloc_sem);
1087			mlog_errno(ret);
1088			goto out;
1089		}
1090	}
1091
1092	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1093		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1094	else
1095		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1096					    new_i_size);
1097
1098	up_write(&oi->ip_alloc_sem);
1099
1100	if (ret < 0) {
1101		mlog_errno(ret);
1102		goto out;
1103	}
1104
1105out_update_size:
1106	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1107	if (ret < 0)
1108		mlog_errno(ret);
1109
1110out:
1111	return ret;
1112}
1113
1114int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1115		  struct iattr *attr)
1116{
1117	int status = 0, size_change;
1118	int inode_locked = 0;
1119	struct inode *inode = d_inode(dentry);
1120	struct super_block *sb = inode->i_sb;
1121	struct ocfs2_super *osb = OCFS2_SB(sb);
1122	struct buffer_head *bh = NULL;
1123	handle_t *handle = NULL;
1124	struct dquot *transfer_to[MAXQUOTAS] = { };
1125	int qtype;
1126	int had_lock;
1127	struct ocfs2_lock_holder oh;
1128
1129	trace_ocfs2_setattr(inode, dentry,
1130			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1131			    dentry->d_name.len, dentry->d_name.name,
1132			    attr->ia_valid,
1133				attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0,
1134				attr->ia_valid & ATTR_UID ?
1135					from_kuid(&init_user_ns, attr->ia_uid) : 0,
1136				attr->ia_valid & ATTR_GID ?
1137					from_kgid(&init_user_ns, attr->ia_gid) : 0);
1138
1139	/* ensuring we don't even attempt to truncate a symlink */
1140	if (S_ISLNK(inode->i_mode))
1141		attr->ia_valid &= ~ATTR_SIZE;
1142
1143#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1144			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1145	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1146		return 0;
1147
1148	status = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1149	if (status)
1150		return status;
1151
1152	if (is_quota_modification(&nop_mnt_idmap, inode, attr)) {
1153		status = dquot_initialize(inode);
1154		if (status)
1155			return status;
1156	}
1157	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1158	if (size_change) {
1159		/*
1160		 * Here we should wait dio to finish before inode lock
1161		 * to avoid a deadlock between ocfs2_setattr() and
1162		 * ocfs2_dio_end_io_write()
1163		 */
1164		inode_dio_wait(inode);
1165
1166		status = ocfs2_rw_lock(inode, 1);
1167		if (status < 0) {
1168			mlog_errno(status);
1169			goto bail;
1170		}
1171	}
1172
1173	had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1174	if (had_lock < 0) {
1175		status = had_lock;
1176		goto bail_unlock_rw;
1177	} else if (had_lock) {
1178		/*
1179		 * As far as we know, ocfs2_setattr() could only be the first
1180		 * VFS entry point in the call chain of recursive cluster
1181		 * locking issue.
1182		 *
1183		 * For instance:
1184		 * chmod_common()
1185		 *  notify_change()
1186		 *   ocfs2_setattr()
1187		 *    posix_acl_chmod()
1188		 *     ocfs2_iop_get_acl()
1189		 *
1190		 * But, we're not 100% sure if it's always true, because the
1191		 * ordering of the VFS entry points in the call chain is out
1192		 * of our control. So, we'd better dump the stack here to
1193		 * catch the other cases of recursive locking.
1194		 */
1195		mlog(ML_ERROR, "Another case of recursive locking:\n");
1196		dump_stack();
1197	}
1198	inode_locked = 1;
1199
1200	if (size_change) {
1201		status = inode_newsize_ok(inode, attr->ia_size);
1202		if (status)
1203			goto bail_unlock;
1204
1205		if (i_size_read(inode) >= attr->ia_size) {
1206			if (ocfs2_should_order_data(inode)) {
1207				status = ocfs2_begin_ordered_truncate(inode,
1208								      attr->ia_size);
1209				if (status)
1210					goto bail_unlock;
1211			}
1212			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1213		} else
1214			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1215		if (status < 0) {
1216			if (status != -ENOSPC)
1217				mlog_errno(status);
1218			status = -ENOSPC;
1219			goto bail_unlock;
1220		}
1221	}
1222
1223	if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1224	    (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1225		/*
1226		 * Gather pointers to quota structures so that allocation /
1227		 * freeing of quota structures happens here and not inside
1228		 * dquot_transfer() where we have problems with lock ordering
1229		 */
1230		if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1231		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1232		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1233			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1234			if (IS_ERR(transfer_to[USRQUOTA])) {
1235				status = PTR_ERR(transfer_to[USRQUOTA]);
1236				transfer_to[USRQUOTA] = NULL;
1237				goto bail_unlock;
1238			}
1239		}
1240		if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1241		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1242		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1243			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1244			if (IS_ERR(transfer_to[GRPQUOTA])) {
1245				status = PTR_ERR(transfer_to[GRPQUOTA]);
1246				transfer_to[GRPQUOTA] = NULL;
1247				goto bail_unlock;
1248			}
1249		}
1250		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1251		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1252					   2 * ocfs2_quota_trans_credits(sb));
1253		if (IS_ERR(handle)) {
1254			status = PTR_ERR(handle);
1255			mlog_errno(status);
1256			goto bail_unlock_alloc;
1257		}
1258		status = __dquot_transfer(inode, transfer_to);
1259		if (status < 0)
1260			goto bail_commit;
1261	} else {
1262		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1263		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1264		if (IS_ERR(handle)) {
1265			status = PTR_ERR(handle);
1266			mlog_errno(status);
1267			goto bail_unlock_alloc;
1268		}
1269	}
1270
1271	setattr_copy(&nop_mnt_idmap, inode, attr);
1272	mark_inode_dirty(inode);
1273
1274	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1275	if (status < 0)
1276		mlog_errno(status);
1277
1278bail_commit:
1279	ocfs2_commit_trans(osb, handle);
1280bail_unlock_alloc:
1281	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1282bail_unlock:
1283	if (status && inode_locked) {
1284		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1285		inode_locked = 0;
1286	}
1287bail_unlock_rw:
1288	if (size_change)
1289		ocfs2_rw_unlock(inode, 1);
1290bail:
1291
1292	/* Release quota pointers in case we acquired them */
1293	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1294		dqput(transfer_to[qtype]);
1295
1296	if (!status && attr->ia_valid & ATTR_MODE) {
1297		status = ocfs2_acl_chmod(inode, bh);
1298		if (status < 0)
1299			mlog_errno(status);
1300	}
1301	if (inode_locked)
1302		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1303
1304	brelse(bh);
1305	return status;
1306}
1307
1308int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path,
1309		  struct kstat *stat, u32 request_mask, unsigned int flags)
1310{
1311	struct inode *inode = d_inode(path->dentry);
1312	struct super_block *sb = path->dentry->d_sb;
1313	struct ocfs2_super *osb = sb->s_fs_info;
1314	int err;
1315
1316	err = ocfs2_inode_revalidate(path->dentry);
1317	if (err) {
1318		if (err != -ENOENT)
1319			mlog_errno(err);
1320		goto bail;
1321	}
1322
1323	generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1324	/*
1325	 * If there is inline data in the inode, the inode will normally not
1326	 * have data blocks allocated (it may have an external xattr block).
1327	 * Report at least one sector for such files, so tools like tar, rsync,
1328	 * others don't incorrectly think the file is completely sparse.
1329	 */
1330	if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1331		stat->blocks += (stat->size + 511)>>9;
1332
1333	/* We set the blksize from the cluster size for performance */
1334	stat->blksize = osb->s_clustersize;
1335
1336bail:
1337	return err;
1338}
1339
1340int ocfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
1341		     int mask)
1342{
1343	int ret, had_lock;
1344	struct ocfs2_lock_holder oh;
1345
1346	if (mask & MAY_NOT_BLOCK)
1347		return -ECHILD;
1348
1349	had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1350	if (had_lock < 0) {
1351		ret = had_lock;
1352		goto out;
1353	} else if (had_lock) {
1354		/* See comments in ocfs2_setattr() for details.
1355		 * The call chain of this case could be:
1356		 * do_sys_open()
1357		 *  may_open()
1358		 *   inode_permission()
1359		 *    ocfs2_permission()
1360		 *     ocfs2_iop_get_acl()
1361		 */
1362		mlog(ML_ERROR, "Another case of recursive locking:\n");
1363		dump_stack();
1364	}
1365
1366	ret = generic_permission(&nop_mnt_idmap, inode, mask);
1367
1368	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1369out:
1370	return ret;
1371}
1372
1373static int __ocfs2_write_remove_suid(struct inode *inode,
1374				     struct buffer_head *bh)
1375{
1376	int ret;
1377	handle_t *handle;
1378	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1379	struct ocfs2_dinode *di;
1380
1381	trace_ocfs2_write_remove_suid(
1382			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1383			inode->i_mode);
1384
1385	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1386	if (IS_ERR(handle)) {
1387		ret = PTR_ERR(handle);
1388		mlog_errno(ret);
1389		goto out;
1390	}
1391
1392	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1393				      OCFS2_JOURNAL_ACCESS_WRITE);
1394	if (ret < 0) {
1395		mlog_errno(ret);
1396		goto out_trans;
1397	}
1398
1399	inode->i_mode &= ~S_ISUID;
1400	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1401		inode->i_mode &= ~S_ISGID;
1402
1403	di = (struct ocfs2_dinode *) bh->b_data;
1404	di->i_mode = cpu_to_le16(inode->i_mode);
1405	ocfs2_update_inode_fsync_trans(handle, inode, 0);
1406
1407	ocfs2_journal_dirty(handle, bh);
1408
1409out_trans:
1410	ocfs2_commit_trans(osb, handle);
1411out:
1412	return ret;
1413}
1414
1415static int ocfs2_write_remove_suid(struct inode *inode)
1416{
1417	int ret;
1418	struct buffer_head *bh = NULL;
1419
1420	ret = ocfs2_read_inode_block(inode, &bh);
1421	if (ret < 0) {
1422		mlog_errno(ret);
1423		goto out;
1424	}
1425
1426	ret =  __ocfs2_write_remove_suid(inode, bh);
1427out:
1428	brelse(bh);
1429	return ret;
1430}
1431
1432/*
1433 * Allocate enough extents to cover the region starting at byte offset
1434 * start for len bytes. Existing extents are skipped, any extents
1435 * added are marked as "unwritten".
1436 */
1437static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1438					    u64 start, u64 len)
1439{
1440	int ret;
1441	u32 cpos, phys_cpos, clusters, alloc_size;
1442	u64 end = start + len;
1443	struct buffer_head *di_bh = NULL;
1444
1445	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1446		ret = ocfs2_read_inode_block(inode, &di_bh);
1447		if (ret) {
1448			mlog_errno(ret);
1449			goto out;
1450		}
1451
1452		/*
1453		 * Nothing to do if the requested reservation range
1454		 * fits within the inode.
1455		 */
1456		if (ocfs2_size_fits_inline_data(di_bh, end))
1457			goto out;
1458
1459		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1460		if (ret) {
1461			mlog_errno(ret);
1462			goto out;
1463		}
1464	}
1465
1466	/*
1467	 * We consider both start and len to be inclusive.
1468	 */
1469	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1470	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1471	clusters -= cpos;
1472
1473	while (clusters) {
1474		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1475					 &alloc_size, NULL);
1476		if (ret) {
1477			mlog_errno(ret);
1478			goto out;
1479		}
1480
1481		/*
1482		 * Hole or existing extent len can be arbitrary, so
1483		 * cap it to our own allocation request.
1484		 */
1485		if (alloc_size > clusters)
1486			alloc_size = clusters;
1487
1488		if (phys_cpos) {
1489			/*
1490			 * We already have an allocation at this
1491			 * region so we can safely skip it.
1492			 */
1493			goto next;
1494		}
1495
1496		ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1497		if (ret) {
1498			if (ret != -ENOSPC)
1499				mlog_errno(ret);
1500			goto out;
1501		}
1502
1503next:
1504		cpos += alloc_size;
1505		clusters -= alloc_size;
1506	}
1507
1508	ret = 0;
1509out:
1510
1511	brelse(di_bh);
1512	return ret;
1513}
1514
1515/*
1516 * Truncate a byte range, avoiding pages within partial clusters. This
1517 * preserves those pages for the zeroing code to write to.
1518 */
1519static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1520					 u64 byte_len)
1521{
1522	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1523	loff_t start, end;
1524	struct address_space *mapping = inode->i_mapping;
1525
1526	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1527	end = byte_start + byte_len;
1528	end = end & ~(osb->s_clustersize - 1);
1529
1530	if (start < end) {
1531		unmap_mapping_range(mapping, start, end - start, 0);
1532		truncate_inode_pages_range(mapping, start, end - 1);
1533	}
1534}
1535
1536/*
1537 * zero out partial blocks of one cluster.
1538 *
1539 * start: file offset where zero starts, will be made upper block aligned.
1540 * len: it will be trimmed to the end of current cluster if "start + len"
1541 *      is bigger than it.
1542 */
1543static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1544					u64 start, u64 len)
1545{
1546	int ret;
1547	u64 start_block, end_block, nr_blocks;
1548	u64 p_block, offset;
1549	u32 cluster, p_cluster, nr_clusters;
1550	struct super_block *sb = inode->i_sb;
1551	u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1552
1553	if (start + len < end)
1554		end = start + len;
1555
1556	start_block = ocfs2_blocks_for_bytes(sb, start);
1557	end_block = ocfs2_blocks_for_bytes(sb, end);
1558	nr_blocks = end_block - start_block;
1559	if (!nr_blocks)
1560		return 0;
1561
1562	cluster = ocfs2_bytes_to_clusters(sb, start);
1563	ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1564				&nr_clusters, NULL);
1565	if (ret)
1566		return ret;
1567	if (!p_cluster)
1568		return 0;
1569
1570	offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1571	p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1572	return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1573}
1574
1575static int ocfs2_zero_partial_clusters(struct inode *inode,
1576				       u64 start, u64 len)
1577{
1578	int ret = 0;
1579	u64 tmpend = 0;
1580	u64 end = start + len;
1581	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1582	unsigned int csize = osb->s_clustersize;
1583	handle_t *handle;
1584	loff_t isize = i_size_read(inode);
1585
1586	/*
1587	 * The "start" and "end" values are NOT necessarily part of
1588	 * the range whose allocation is being deleted. Rather, this
1589	 * is what the user passed in with the request. We must zero
1590	 * partial clusters here. There's no need to worry about
1591	 * physical allocation - the zeroing code knows to skip holes.
1592	 */
1593	trace_ocfs2_zero_partial_clusters(
1594		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1595		(unsigned long long)start, (unsigned long long)end);
1596
1597	/*
1598	 * If both edges are on a cluster boundary then there's no
1599	 * zeroing required as the region is part of the allocation to
1600	 * be truncated.
1601	 */
1602	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1603		goto out;
1604
1605	/* No page cache for EOF blocks, issue zero out to disk. */
1606	if (end > isize) {
1607		/*
1608		 * zeroout eof blocks in last cluster starting from
1609		 * "isize" even "start" > "isize" because it is
1610		 * complicated to zeroout just at "start" as "start"
1611		 * may be not aligned with block size, buffer write
1612		 * would be required to do that, but out of eof buffer
1613		 * write is not supported.
1614		 */
1615		ret = ocfs2_zeroout_partial_cluster(inode, isize,
1616					end - isize);
1617		if (ret) {
1618			mlog_errno(ret);
1619			goto out;
1620		}
1621		if (start >= isize)
1622			goto out;
1623		end = isize;
1624	}
1625	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1626	if (IS_ERR(handle)) {
1627		ret = PTR_ERR(handle);
1628		mlog_errno(ret);
1629		goto out;
1630	}
1631
1632	/*
1633	 * If start is on a cluster boundary and end is somewhere in another
1634	 * cluster, we have not COWed the cluster starting at start, unless
1635	 * end is also within the same cluster. So, in this case, we skip this
1636	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1637	 * to the next one.
1638	 */
1639	if ((start & (csize - 1)) != 0) {
1640		/*
1641		 * We want to get the byte offset of the end of the 1st
1642		 * cluster.
1643		 */
1644		tmpend = (u64)osb->s_clustersize +
1645			(start & ~(osb->s_clustersize - 1));
1646		if (tmpend > end)
1647			tmpend = end;
1648
1649		trace_ocfs2_zero_partial_clusters_range1(
1650			(unsigned long long)start,
1651			(unsigned long long)tmpend);
1652
1653		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1654						    tmpend);
1655		if (ret)
1656			mlog_errno(ret);
1657	}
1658
1659	if (tmpend < end) {
1660		/*
1661		 * This may make start and end equal, but the zeroing
1662		 * code will skip any work in that case so there's no
1663		 * need to catch it up here.
1664		 */
1665		start = end & ~(osb->s_clustersize - 1);
1666
1667		trace_ocfs2_zero_partial_clusters_range2(
1668			(unsigned long long)start, (unsigned long long)end);
1669
1670		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1671		if (ret)
1672			mlog_errno(ret);
1673	}
1674	ocfs2_update_inode_fsync_trans(handle, inode, 1);
1675
1676	ocfs2_commit_trans(osb, handle);
1677out:
1678	return ret;
1679}
1680
1681static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1682{
1683	int i;
1684	struct ocfs2_extent_rec *rec = NULL;
1685
1686	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1687
1688		rec = &el->l_recs[i];
1689
1690		if (le32_to_cpu(rec->e_cpos) < pos)
1691			break;
1692	}
1693
1694	return i;
1695}
1696
1697/*
1698 * Helper to calculate the punching pos and length in one run, we handle the
1699 * following three cases in order:
1700 *
1701 * - remove the entire record
1702 * - remove a partial record
1703 * - no record needs to be removed (hole-punching completed)
1704*/
1705static void ocfs2_calc_trunc_pos(struct inode *inode,
1706				 struct ocfs2_extent_list *el,
1707				 struct ocfs2_extent_rec *rec,
1708				 u32 trunc_start, u32 *trunc_cpos,
1709				 u32 *trunc_len, u32 *trunc_end,
1710				 u64 *blkno, int *done)
1711{
1712	int ret = 0;
1713	u32 coff, range;
1714
1715	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1716
1717	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1718		/*
1719		 * remove an entire extent record.
1720		 */
1721		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1722		/*
1723		 * Skip holes if any.
1724		 */
1725		if (range < *trunc_end)
1726			*trunc_end = range;
1727		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1728		*blkno = le64_to_cpu(rec->e_blkno);
1729		*trunc_end = le32_to_cpu(rec->e_cpos);
1730	} else if (range > trunc_start) {
1731		/*
1732		 * remove a partial extent record, which means we're
1733		 * removing the last extent record.
1734		 */
1735		*trunc_cpos = trunc_start;
1736		/*
1737		 * skip hole if any.
1738		 */
1739		if (range < *trunc_end)
1740			*trunc_end = range;
1741		*trunc_len = *trunc_end - trunc_start;
1742		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1743		*blkno = le64_to_cpu(rec->e_blkno) +
1744				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1745		*trunc_end = trunc_start;
1746	} else {
1747		/*
1748		 * It may have two following possibilities:
1749		 *
1750		 * - last record has been removed
1751		 * - trunc_start was within a hole
1752		 *
1753		 * both two cases mean the completion of hole punching.
1754		 */
1755		ret = 1;
1756	}
1757
1758	*done = ret;
1759}
1760
1761int ocfs2_remove_inode_range(struct inode *inode,
1762			     struct buffer_head *di_bh, u64 byte_start,
1763			     u64 byte_len)
1764{
1765	int ret = 0, flags = 0, done = 0, i;
1766	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1767	u32 cluster_in_el;
1768	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1769	struct ocfs2_cached_dealloc_ctxt dealloc;
1770	struct address_space *mapping = inode->i_mapping;
1771	struct ocfs2_extent_tree et;
1772	struct ocfs2_path *path = NULL;
1773	struct ocfs2_extent_list *el = NULL;
1774	struct ocfs2_extent_rec *rec = NULL;
1775	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1776	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1777
1778	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1779	ocfs2_init_dealloc_ctxt(&dealloc);
1780
1781	trace_ocfs2_remove_inode_range(
1782			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1783			(unsigned long long)byte_start,
1784			(unsigned long long)byte_len);
1785
1786	if (byte_len == 0)
1787		return 0;
1788
1789	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1790		int id_count = ocfs2_max_inline_data_with_xattr(inode->i_sb, di);
1791
1792		if (byte_start > id_count || byte_start + byte_len > id_count) {
1793			ret = -EINVAL;
1794			mlog_errno(ret);
1795			goto out;
1796		}
1797
1798		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1799					    byte_start + byte_len, 0);
1800		if (ret) {
1801			mlog_errno(ret);
1802			goto out;
1803		}
1804		/*
1805		 * There's no need to get fancy with the page cache
1806		 * truncate of an inline-data inode. We're talking
1807		 * about less than a page here, which will be cached
1808		 * in the dinode buffer anyway.
1809		 */
1810		unmap_mapping_range(mapping, 0, 0, 0);
1811		truncate_inode_pages(mapping, 0);
1812		goto out;
1813	}
1814
1815	/*
1816	 * For reflinks, we may need to CoW 2 clusters which might be
1817	 * partially zero'd later, if hole's start and end offset were
1818	 * within one cluster(means is not exactly aligned to clustersize).
1819	 */
1820
1821	if (ocfs2_is_refcount_inode(inode)) {
1822		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1823		if (ret) {
1824			mlog_errno(ret);
1825			goto out;
1826		}
1827
1828		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1829		if (ret) {
1830			mlog_errno(ret);
1831			goto out;
1832		}
1833	}
1834
1835	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1836	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1837	cluster_in_el = trunc_end;
1838
1839	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1840	if (ret) {
1841		mlog_errno(ret);
1842		goto out;
1843	}
1844
1845	path = ocfs2_new_path_from_et(&et);
1846	if (!path) {
1847		ret = -ENOMEM;
1848		mlog_errno(ret);
1849		goto out;
1850	}
1851
1852	while (trunc_end > trunc_start) {
1853
1854		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1855				      cluster_in_el);
1856		if (ret) {
1857			mlog_errno(ret);
1858			goto out;
1859		}
1860
1861		el = path_leaf_el(path);
1862
1863		i = ocfs2_find_rec(el, trunc_end);
1864		/*
1865		 * Need to go to previous extent block.
1866		 */
1867		if (i < 0) {
1868			if (path->p_tree_depth == 0)
1869				break;
1870
1871			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1872							    path,
1873							    &cluster_in_el);
1874			if (ret) {
1875				mlog_errno(ret);
1876				goto out;
1877			}
1878
1879			/*
1880			 * We've reached the leftmost extent block,
1881			 * it's safe to leave.
1882			 */
1883			if (cluster_in_el == 0)
1884				break;
1885
1886			/*
1887			 * The 'pos' searched for previous extent block is
1888			 * always one cluster less than actual trunc_end.
1889			 */
1890			trunc_end = cluster_in_el + 1;
1891
1892			ocfs2_reinit_path(path, 1);
1893
1894			continue;
1895
1896		} else
1897			rec = &el->l_recs[i];
1898
1899		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1900				     &trunc_len, &trunc_end, &blkno, &done);
1901		if (done)
1902			break;
1903
1904		flags = rec->e_flags;
1905		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1906
1907		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1908					       phys_cpos, trunc_len, flags,
1909					       &dealloc, refcount_loc, false);
1910		if (ret < 0) {
1911			mlog_errno(ret);
1912			goto out;
1913		}
1914
1915		cluster_in_el = trunc_end;
1916
1917		ocfs2_reinit_path(path, 1);
1918	}
1919
1920	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1921
1922out:
1923	ocfs2_free_path(path);
1924	ocfs2_schedule_truncate_log_flush(osb, 1);
1925	ocfs2_run_deallocs(osb, &dealloc);
1926
1927	return ret;
1928}
1929
1930/*
1931 * Parts of this function taken from xfs_change_file_space()
1932 */
1933static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1934				     loff_t f_pos, unsigned int cmd,
1935				     struct ocfs2_space_resv *sr,
1936				     int change_size)
1937{
1938	int ret;
1939	s64 llen;
1940	loff_t size, orig_isize;
1941	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1942	struct buffer_head *di_bh = NULL;
1943	handle_t *handle;
1944	unsigned long long max_off = inode->i_sb->s_maxbytes;
1945
1946	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1947		return -EROFS;
1948
1949	inode_lock(inode);
1950
1951	/* Wait all existing dio workers, newcomers will block on i_rwsem */
1952	inode_dio_wait(inode);
1953	/*
1954	 * This prevents concurrent writes on other nodes
1955	 */
1956	ret = ocfs2_rw_lock(inode, 1);
1957	if (ret) {
1958		mlog_errno(ret);
1959		goto out;
1960	}
1961
1962	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1963	if (ret) {
1964		mlog_errno(ret);
1965		goto out_rw_unlock;
1966	}
1967
1968	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1969		ret = -EPERM;
1970		goto out_inode_unlock;
1971	}
1972
1973	switch (sr->l_whence) {
1974	case 0: /*SEEK_SET*/
1975		break;
1976	case 1: /*SEEK_CUR*/
1977		sr->l_start += f_pos;
1978		break;
1979	case 2: /*SEEK_END*/
1980		sr->l_start += i_size_read(inode);
1981		break;
1982	default:
1983		ret = -EINVAL;
1984		goto out_inode_unlock;
1985	}
1986	sr->l_whence = 0;
1987
1988	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1989
1990	if (sr->l_start < 0
1991	    || sr->l_start > max_off
1992	    || (sr->l_start + llen) < 0
1993	    || (sr->l_start + llen) > max_off) {
1994		ret = -EINVAL;
1995		goto out_inode_unlock;
1996	}
1997	size = sr->l_start + sr->l_len;
1998
1999	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
2000	    cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
2001		if (sr->l_len <= 0) {
2002			ret = -EINVAL;
2003			goto out_inode_unlock;
2004		}
2005	}
2006
2007	if (file && setattr_should_drop_suidgid(&nop_mnt_idmap, file_inode(file))) {
2008		ret = __ocfs2_write_remove_suid(inode, di_bh);
2009		if (ret) {
2010			mlog_errno(ret);
2011			goto out_inode_unlock;
2012		}
2013	}
2014
2015	down_write(&OCFS2_I(inode)->ip_alloc_sem);
2016	switch (cmd) {
2017	case OCFS2_IOC_RESVSP:
2018	case OCFS2_IOC_RESVSP64:
2019		/*
2020		 * This takes unsigned offsets, but the signed ones we
2021		 * pass have been checked against overflow above.
2022		 */
2023		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2024						       sr->l_len);
2025		break;
2026	case OCFS2_IOC_UNRESVSP:
2027	case OCFS2_IOC_UNRESVSP64:
2028		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2029					       sr->l_len);
2030		break;
2031	default:
2032		ret = -EINVAL;
2033	}
2034
2035	orig_isize = i_size_read(inode);
2036	/* zeroout eof blocks in the cluster. */
2037	if (!ret && change_size && orig_isize < size) {
2038		ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2039					size - orig_isize);
2040		if (!ret)
2041			i_size_write(inode, size);
2042	}
2043	up_write(&OCFS2_I(inode)->ip_alloc_sem);
2044	if (ret) {
2045		mlog_errno(ret);
2046		goto out_inode_unlock;
2047	}
2048
2049	/*
2050	 * We update c/mtime for these changes
2051	 */
2052	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2053	if (IS_ERR(handle)) {
2054		ret = PTR_ERR(handle);
2055		mlog_errno(ret);
2056		goto out_inode_unlock;
2057	}
2058
2059	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 
 
 
2060	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2061	if (ret < 0)
2062		mlog_errno(ret);
2063
2064	if (file && (file->f_flags & O_SYNC))
2065		handle->h_sync = 1;
2066
2067	ocfs2_commit_trans(osb, handle);
2068
2069out_inode_unlock:
2070	brelse(di_bh);
2071	ocfs2_inode_unlock(inode, 1);
2072out_rw_unlock:
2073	ocfs2_rw_unlock(inode, 1);
2074
2075out:
2076	inode_unlock(inode);
2077	return ret;
2078}
2079
2080int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2081			    struct ocfs2_space_resv *sr)
2082{
2083	struct inode *inode = file_inode(file);
2084	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2085	int ret;
2086
2087	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2088	    !ocfs2_writes_unwritten_extents(osb))
2089		return -ENOTTY;
2090	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2091		 !ocfs2_sparse_alloc(osb))
2092		return -ENOTTY;
2093
2094	if (!S_ISREG(inode->i_mode))
2095		return -EINVAL;
2096
2097	if (!(file->f_mode & FMODE_WRITE))
2098		return -EBADF;
2099
2100	ret = mnt_want_write_file(file);
2101	if (ret)
2102		return ret;
2103	ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2104	mnt_drop_write_file(file);
2105	return ret;
2106}
2107
2108static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2109			    loff_t len)
2110{
2111	struct inode *inode = file_inode(file);
2112	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2113	struct ocfs2_space_resv sr;
2114	int change_size = 1;
2115	int cmd = OCFS2_IOC_RESVSP64;
2116	int ret = 0;
2117
2118	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2119		return -EOPNOTSUPP;
2120	if (!ocfs2_writes_unwritten_extents(osb))
2121		return -EOPNOTSUPP;
2122
2123	if (mode & FALLOC_FL_KEEP_SIZE) {
2124		change_size = 0;
2125	} else {
2126		ret = inode_newsize_ok(inode, offset + len);
2127		if (ret)
2128			return ret;
2129	}
2130
2131	if (mode & FALLOC_FL_PUNCH_HOLE)
2132		cmd = OCFS2_IOC_UNRESVSP64;
2133
2134	sr.l_whence = 0;
2135	sr.l_start = (s64)offset;
2136	sr.l_len = (s64)len;
2137
2138	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2139					 change_size);
2140}
2141
2142int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2143				   size_t count)
2144{
2145	int ret = 0;
2146	unsigned int extent_flags;
2147	u32 cpos, clusters, extent_len, phys_cpos;
2148	struct super_block *sb = inode->i_sb;
2149
2150	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2151	    !ocfs2_is_refcount_inode(inode) ||
2152	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2153		return 0;
2154
2155	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2156	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2157
2158	while (clusters) {
2159		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2160					 &extent_flags);
2161		if (ret < 0) {
2162			mlog_errno(ret);
2163			goto out;
2164		}
2165
2166		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2167			ret = 1;
2168			break;
2169		}
2170
2171		if (extent_len > clusters)
2172			extent_len = clusters;
2173
2174		clusters -= extent_len;
2175		cpos += extent_len;
2176	}
2177out:
2178	return ret;
2179}
2180
2181static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2182{
2183	int blockmask = inode->i_sb->s_blocksize - 1;
2184	loff_t final_size = pos + count;
2185
2186	if ((pos & blockmask) || (final_size & blockmask))
2187		return 1;
2188	return 0;
2189}
2190
2191static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2192					    struct buffer_head **di_bh,
2193					    int meta_level,
2194					    int write_sem,
2195					    int wait)
2196{
2197	int ret = 0;
2198
2199	if (wait)
2200		ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2201	else
2202		ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2203	if (ret < 0)
2204		goto out;
2205
2206	if (wait) {
2207		if (write_sem)
2208			down_write(&OCFS2_I(inode)->ip_alloc_sem);
2209		else
2210			down_read(&OCFS2_I(inode)->ip_alloc_sem);
2211	} else {
2212		if (write_sem)
2213			ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2214		else
2215			ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2216
2217		if (!ret) {
2218			ret = -EAGAIN;
2219			goto out_unlock;
2220		}
2221	}
2222
2223	return ret;
2224
2225out_unlock:
2226	brelse(*di_bh);
2227	*di_bh = NULL;
2228	ocfs2_inode_unlock(inode, meta_level);
2229out:
2230	return ret;
2231}
2232
2233static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2234					       struct buffer_head **di_bh,
2235					       int meta_level,
2236					       int write_sem)
2237{
2238	if (write_sem)
2239		up_write(&OCFS2_I(inode)->ip_alloc_sem);
2240	else
2241		up_read(&OCFS2_I(inode)->ip_alloc_sem);
2242
2243	brelse(*di_bh);
2244	*di_bh = NULL;
2245
2246	if (meta_level >= 0)
2247		ocfs2_inode_unlock(inode, meta_level);
2248}
2249
2250static int ocfs2_prepare_inode_for_write(struct file *file,
2251					 loff_t pos, size_t count, int wait)
2252{
2253	int ret = 0, meta_level = 0, overwrite_io = 0;
2254	int write_sem = 0;
2255	struct dentry *dentry = file->f_path.dentry;
2256	struct inode *inode = d_inode(dentry);
2257	struct buffer_head *di_bh = NULL;
2258	u32 cpos;
2259	u32 clusters;
2260
2261	/*
2262	 * We start with a read level meta lock and only jump to an ex
2263	 * if we need to make modifications here.
2264	 */
2265	for(;;) {
2266		ret = ocfs2_inode_lock_for_extent_tree(inode,
2267						       &di_bh,
2268						       meta_level,
2269						       write_sem,
2270						       wait);
2271		if (ret < 0) {
2272			if (ret != -EAGAIN)
2273				mlog_errno(ret);
2274			goto out;
2275		}
2276
2277		/*
2278		 * Check if IO will overwrite allocated blocks in case
2279		 * IOCB_NOWAIT flag is set.
2280		 */
2281		if (!wait && !overwrite_io) {
2282			overwrite_io = 1;
2283
2284			ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2285			if (ret < 0) {
2286				if (ret != -EAGAIN)
2287					mlog_errno(ret);
2288				goto out_unlock;
2289			}
2290		}
2291
2292		/* Clear suid / sgid if necessary. We do this here
2293		 * instead of later in the write path because
2294		 * remove_suid() calls ->setattr without any hint that
2295		 * we may have already done our cluster locking. Since
2296		 * ocfs2_setattr() *must* take cluster locks to
2297		 * proceed, this will lead us to recursively lock the
2298		 * inode. There's also the dinode i_size state which
2299		 * can be lost via setattr during extending writes (we
2300		 * set inode->i_size at the end of a write. */
2301		if (setattr_should_drop_suidgid(&nop_mnt_idmap, inode)) {
2302			if (meta_level == 0) {
2303				ocfs2_inode_unlock_for_extent_tree(inode,
2304								   &di_bh,
2305								   meta_level,
2306								   write_sem);
2307				meta_level = 1;
2308				continue;
2309			}
2310
2311			ret = ocfs2_write_remove_suid(inode);
2312			if (ret < 0) {
2313				mlog_errno(ret);
2314				goto out_unlock;
2315			}
2316		}
2317
2318		ret = ocfs2_check_range_for_refcount(inode, pos, count);
2319		if (ret == 1) {
2320			ocfs2_inode_unlock_for_extent_tree(inode,
2321							   &di_bh,
2322							   meta_level,
2323							   write_sem);
2324			meta_level = 1;
2325			write_sem = 1;
2326			ret = ocfs2_inode_lock_for_extent_tree(inode,
2327							       &di_bh,
2328							       meta_level,
2329							       write_sem,
2330							       wait);
2331			if (ret < 0) {
2332				if (ret != -EAGAIN)
2333					mlog_errno(ret);
2334				goto out;
2335			}
2336
2337			cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2338			clusters =
2339				ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2340			ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2341		}
2342
2343		if (ret < 0) {
2344			if (ret != -EAGAIN)
2345				mlog_errno(ret);
2346			goto out_unlock;
2347		}
2348
2349		break;
2350	}
2351
2352out_unlock:
2353	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2354					    pos, count, wait);
2355
2356	ocfs2_inode_unlock_for_extent_tree(inode,
2357					   &di_bh,
2358					   meta_level,
2359					   write_sem);
2360
2361out:
2362	return ret;
2363}
2364
2365static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2366				    struct iov_iter *from)
2367{
2368	int rw_level;
2369	ssize_t written = 0;
2370	ssize_t ret;
2371	size_t count = iov_iter_count(from);
2372	struct file *file = iocb->ki_filp;
2373	struct inode *inode = file_inode(file);
2374	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2375	int full_coherency = !(osb->s_mount_opt &
2376			       OCFS2_MOUNT_COHERENCY_BUFFERED);
2377	void *saved_ki_complete = NULL;
2378	int append_write = ((iocb->ki_pos + count) >=
2379			i_size_read(inode) ? 1 : 0);
2380	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2381	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2382
2383	trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2384		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2385		file->f_path.dentry->d_name.len,
2386		file->f_path.dentry->d_name.name,
2387		(unsigned int)from->nr_segs);	/* GRRRRR */
2388
2389	if (!direct_io && nowait)
2390		return -EOPNOTSUPP;
2391
2392	if (count == 0)
2393		return 0;
2394
2395	if (nowait) {
2396		if (!inode_trylock(inode))
2397			return -EAGAIN;
2398	} else
2399		inode_lock(inode);
2400
2401	ocfs2_iocb_init_rw_locked(iocb);
2402
2403	/*
2404	 * Concurrent O_DIRECT writes are allowed with
2405	 * mount_option "coherency=buffered".
2406	 * For append write, we must take rw EX.
2407	 */
2408	rw_level = (!direct_io || full_coherency || append_write);
2409
2410	if (nowait)
2411		ret = ocfs2_try_rw_lock(inode, rw_level);
2412	else
2413		ret = ocfs2_rw_lock(inode, rw_level);
2414	if (ret < 0) {
2415		if (ret != -EAGAIN)
2416			mlog_errno(ret);
2417		goto out_mutex;
2418	}
2419
2420	/*
2421	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2422	 * inode_lock to guarantee coherency.
2423	 */
2424	if (direct_io && full_coherency) {
2425		/*
2426		 * We need to take and drop the inode lock to force
2427		 * other nodes to drop their caches.  Buffered I/O
2428		 * already does this in write_begin().
2429		 */
2430		if (nowait)
2431			ret = ocfs2_try_inode_lock(inode, NULL, 1);
2432		else
2433			ret = ocfs2_inode_lock(inode, NULL, 1);
2434		if (ret < 0) {
2435			if (ret != -EAGAIN)
2436				mlog_errno(ret);
2437			goto out;
2438		}
2439
2440		ocfs2_inode_unlock(inode, 1);
2441	}
2442
2443	ret = generic_write_checks(iocb, from);
2444	if (ret <= 0) {
2445		if (ret)
2446			mlog_errno(ret);
2447		goto out;
2448	}
2449	count = ret;
2450
2451	ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2452	if (ret < 0) {
2453		if (ret != -EAGAIN)
2454			mlog_errno(ret);
2455		goto out;
2456	}
2457
2458	if (direct_io && !is_sync_kiocb(iocb) &&
2459	    ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2460		/*
2461		 * Make it a sync io if it's an unaligned aio.
2462		 */
2463		saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2464	}
2465
2466	/* communicate with ocfs2_dio_end_io */
2467	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2468
2469	written = __generic_file_write_iter(iocb, from);
2470	/* buffered aio wouldn't have proper lock coverage today */
2471	BUG_ON(written == -EIOCBQUEUED && !direct_io);
2472
2473	/*
2474	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2475	 * function pointer which is called when o_direct io completes so that
2476	 * it can unlock our rw lock.
2477	 * Unfortunately there are error cases which call end_io and others
2478	 * that don't.  so we don't have to unlock the rw_lock if either an
2479	 * async dio is going to do it in the future or an end_io after an
2480	 * error has already done it.
2481	 */
2482	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2483		rw_level = -1;
2484	}
2485
2486	if (unlikely(written <= 0))
2487		goto out;
2488
2489	if (((file->f_flags & O_DSYNC) && !direct_io) ||
2490	    IS_SYNC(inode)) {
2491		ret = filemap_fdatawrite_range(file->f_mapping,
2492					       iocb->ki_pos - written,
2493					       iocb->ki_pos - 1);
2494		if (ret < 0)
2495			written = ret;
2496
2497		if (!ret) {
2498			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2499			if (ret < 0)
2500				written = ret;
2501		}
2502
2503		if (!ret)
2504			ret = filemap_fdatawait_range(file->f_mapping,
2505						      iocb->ki_pos - written,
2506						      iocb->ki_pos - 1);
2507	}
2508
2509out:
2510	if (saved_ki_complete)
2511		xchg(&iocb->ki_complete, saved_ki_complete);
2512
2513	if (rw_level != -1)
2514		ocfs2_rw_unlock(inode, rw_level);
2515
2516out_mutex:
2517	inode_unlock(inode);
2518
2519	if (written)
2520		ret = written;
2521	return ret;
2522}
2523
2524static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2525				   struct iov_iter *to)
2526{
2527	int ret = 0, rw_level = -1, lock_level = 0;
2528	struct file *filp = iocb->ki_filp;
2529	struct inode *inode = file_inode(filp);
2530	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2531	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2532
2533	trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2534			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2535			filp->f_path.dentry->d_name.len,
2536			filp->f_path.dentry->d_name.name,
2537			to->nr_segs);	/* GRRRRR */
2538
2539
2540	if (!inode) {
2541		ret = -EINVAL;
2542		mlog_errno(ret);
2543		goto bail;
2544	}
2545
2546	if (!direct_io && nowait)
2547		return -EOPNOTSUPP;
2548
2549	ocfs2_iocb_init_rw_locked(iocb);
2550
2551	/*
2552	 * buffered reads protect themselves in ->read_folio().  O_DIRECT reads
2553	 * need locks to protect pending reads from racing with truncate.
2554	 */
2555	if (direct_io) {
2556		if (nowait)
2557			ret = ocfs2_try_rw_lock(inode, 0);
2558		else
2559			ret = ocfs2_rw_lock(inode, 0);
2560
2561		if (ret < 0) {
2562			if (ret != -EAGAIN)
2563				mlog_errno(ret);
2564			goto bail;
2565		}
2566		rw_level = 0;
2567		/* communicate with ocfs2_dio_end_io */
2568		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2569	}
2570
2571	/*
2572	 * We're fine letting folks race truncates and extending
2573	 * writes with read across the cluster, just like they can
2574	 * locally. Hence no rw_lock during read.
2575	 *
2576	 * Take and drop the meta data lock to update inode fields
2577	 * like i_size. This allows the checks down below
2578	 * copy_splice_read() a chance of actually working.
2579	 */
2580	ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2581				     !nowait);
2582	if (ret < 0) {
2583		if (ret != -EAGAIN)
2584			mlog_errno(ret);
2585		goto bail;
2586	}
2587	ocfs2_inode_unlock(inode, lock_level);
2588
2589	ret = generic_file_read_iter(iocb, to);
2590	trace_generic_file_read_iter_ret(ret);
2591
2592	/* buffered aio wouldn't have proper lock coverage today */
2593	BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2594
2595	/* see ocfs2_file_write_iter */
2596	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2597		rw_level = -1;
2598	}
2599
2600bail:
2601	if (rw_level != -1)
2602		ocfs2_rw_unlock(inode, rw_level);
2603
2604	return ret;
2605}
2606
2607static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos,
2608				      struct pipe_inode_info *pipe,
2609				      size_t len, unsigned int flags)
2610{
2611	struct inode *inode = file_inode(in);
2612	ssize_t ret = 0;
2613	int lock_level = 0;
2614
2615	trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2616				     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2617				     in->f_path.dentry->d_name.len,
2618				     in->f_path.dentry->d_name.name,
2619				     flags);
2620
2621	/*
2622	 * We're fine letting folks race truncates and extending writes with
2623	 * read across the cluster, just like they can locally.  Hence no
2624	 * rw_lock during read.
2625	 *
2626	 * Take and drop the meta data lock to update inode fields like i_size.
2627	 * This allows the checks down below filemap_splice_read() a chance of
2628	 * actually working.
2629	 */
2630	ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level, 1);
2631	if (ret < 0) {
2632		if (ret != -EAGAIN)
2633			mlog_errno(ret);
2634		goto bail;
2635	}
2636	ocfs2_inode_unlock(inode, lock_level);
2637
2638	ret = filemap_splice_read(in, ppos, pipe, len, flags);
2639	trace_filemap_splice_read_ret(ret);
2640bail:
2641	return ret;
2642}
2643
2644/* Refer generic_file_llseek_unlocked() */
2645static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2646{
2647	struct inode *inode = file->f_mapping->host;
2648	int ret = 0;
2649
2650	inode_lock(inode);
2651
2652	switch (whence) {
2653	case SEEK_SET:
2654		break;
2655	case SEEK_END:
2656		/* SEEK_END requires the OCFS2 inode lock for the file
2657		 * because it references the file's size.
2658		 */
2659		ret = ocfs2_inode_lock(inode, NULL, 0);
2660		if (ret < 0) {
2661			mlog_errno(ret);
2662			goto out;
2663		}
2664		offset += i_size_read(inode);
2665		ocfs2_inode_unlock(inode, 0);
2666		break;
2667	case SEEK_CUR:
2668		if (offset == 0) {
2669			offset = file->f_pos;
2670			goto out;
2671		}
2672		offset += file->f_pos;
2673		break;
2674	case SEEK_DATA:
2675	case SEEK_HOLE:
2676		ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2677		if (ret)
2678			goto out;
2679		break;
2680	default:
2681		ret = -EINVAL;
2682		goto out;
2683	}
2684
2685	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2686
2687out:
2688	inode_unlock(inode);
2689	if (ret)
2690		return ret;
2691	return offset;
2692}
2693
2694static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2695				     struct file *file_out, loff_t pos_out,
2696				     loff_t len, unsigned int remap_flags)
2697{
2698	struct inode *inode_in = file_inode(file_in);
2699	struct inode *inode_out = file_inode(file_out);
2700	struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2701	struct buffer_head *in_bh = NULL, *out_bh = NULL;
2702	bool same_inode = (inode_in == inode_out);
2703	loff_t remapped = 0;
2704	ssize_t ret;
2705
2706	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2707		return -EINVAL;
2708	if (!ocfs2_refcount_tree(osb))
2709		return -EOPNOTSUPP;
2710	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
2711		return -EROFS;
2712
2713	/* Lock both files against IO */
2714	ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2715	if (ret)
2716		return ret;
2717
2718	/* Check file eligibility and prepare for block sharing. */
2719	ret = -EINVAL;
2720	if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2721	    (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2722		goto out_unlock;
2723
2724	ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2725			&len, remap_flags);
2726	if (ret < 0 || len == 0)
2727		goto out_unlock;
2728
2729	/* Lock out changes to the allocation maps and remap. */
2730	down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2731	if (!same_inode)
2732		down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2733				  SINGLE_DEPTH_NESTING);
2734
2735	/* Zap any page cache for the destination file's range. */
2736	truncate_inode_pages_range(&inode_out->i_data,
2737				   round_down(pos_out, PAGE_SIZE),
2738				   round_up(pos_out + len, PAGE_SIZE) - 1);
2739
2740	remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2741			inode_out, out_bh, pos_out, len);
2742	up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2743	if (!same_inode)
2744		up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2745	if (remapped < 0) {
2746		ret = remapped;
2747		mlog_errno(ret);
2748		goto out_unlock;
2749	}
2750
2751	/*
2752	 * Empty the extent map so that we may get the right extent
2753	 * record from the disk.
2754	 */
2755	ocfs2_extent_map_trunc(inode_in, 0);
2756	ocfs2_extent_map_trunc(inode_out, 0);
2757
2758	ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2759	if (ret) {
2760		mlog_errno(ret);
2761		goto out_unlock;
2762	}
2763
2764out_unlock:
2765	ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2766	return remapped > 0 ? remapped : ret;
2767}
2768
2769static loff_t ocfs2_dir_llseek(struct file *file, loff_t offset, int whence)
2770{
2771	struct ocfs2_file_private *fp = file->private_data;
2772
2773	return generic_llseek_cookie(file, offset, whence, &fp->cookie);
2774}
2775
2776const struct inode_operations ocfs2_file_iops = {
2777	.setattr	= ocfs2_setattr,
2778	.getattr	= ocfs2_getattr,
2779	.permission	= ocfs2_permission,
2780	.listxattr	= ocfs2_listxattr,
2781	.fiemap		= ocfs2_fiemap,
2782	.get_inode_acl	= ocfs2_iop_get_acl,
2783	.set_acl	= ocfs2_iop_set_acl,
2784	.fileattr_get	= ocfs2_fileattr_get,
2785	.fileattr_set	= ocfs2_fileattr_set,
2786};
2787
2788const struct inode_operations ocfs2_special_file_iops = {
2789	.setattr	= ocfs2_setattr,
2790	.getattr	= ocfs2_getattr,
2791	.listxattr	= ocfs2_listxattr,
2792	.permission	= ocfs2_permission,
2793	.get_inode_acl	= ocfs2_iop_get_acl,
2794	.set_acl	= ocfs2_iop_set_acl,
2795};
2796
2797/*
2798 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2799 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2800 */
2801const struct file_operations ocfs2_fops = {
2802	.llseek		= ocfs2_file_llseek,
2803	.mmap		= ocfs2_mmap,
2804	.fsync		= ocfs2_sync_file,
2805	.release	= ocfs2_file_release,
2806	.open		= ocfs2_file_open,
2807	.read_iter	= ocfs2_file_read_iter,
2808	.write_iter	= ocfs2_file_write_iter,
2809	.unlocked_ioctl	= ocfs2_ioctl,
2810#ifdef CONFIG_COMPAT
2811	.compat_ioctl   = ocfs2_compat_ioctl,
2812#endif
2813	.lock		= ocfs2_lock,
2814	.flock		= ocfs2_flock,
2815	.splice_read	= ocfs2_file_splice_read,
2816	.splice_write	= iter_file_splice_write,
2817	.fallocate	= ocfs2_fallocate,
2818	.remap_file_range = ocfs2_remap_file_range,
2819	.fop_flags	= FOP_ASYNC_LOCK,
2820};
2821
2822WRAP_DIR_ITER(ocfs2_readdir) // FIXME!
2823const struct file_operations ocfs2_dops = {
2824	.llseek		= ocfs2_dir_llseek,
2825	.read		= generic_read_dir,
2826	.iterate_shared	= shared_ocfs2_readdir,
2827	.fsync		= ocfs2_sync_file,
2828	.release	= ocfs2_dir_release,
2829	.open		= ocfs2_dir_open,
2830	.unlocked_ioctl	= ocfs2_ioctl,
2831#ifdef CONFIG_COMPAT
2832	.compat_ioctl   = ocfs2_compat_ioctl,
2833#endif
2834	.lock		= ocfs2_lock,
2835	.flock		= ocfs2_flock,
2836	.fop_flags	= FOP_ASYNC_LOCK,
2837};
2838
2839/*
2840 * POSIX-lockless variants of our file_operations.
2841 *
2842 * These will be used if the underlying cluster stack does not support
2843 * posix file locking, if the user passes the "localflocks" mount
2844 * option, or if we have a local-only fs.
2845 *
2846 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2847 * so we still want it in the case of no stack support for
2848 * plocks. Internally, it will do the right thing when asked to ignore
2849 * the cluster.
2850 */
2851const struct file_operations ocfs2_fops_no_plocks = {
2852	.llseek		= ocfs2_file_llseek,
2853	.mmap		= ocfs2_mmap,
2854	.fsync		= ocfs2_sync_file,
2855	.release	= ocfs2_file_release,
2856	.open		= ocfs2_file_open,
2857	.read_iter	= ocfs2_file_read_iter,
2858	.write_iter	= ocfs2_file_write_iter,
2859	.unlocked_ioctl	= ocfs2_ioctl,
2860#ifdef CONFIG_COMPAT
2861	.compat_ioctl   = ocfs2_compat_ioctl,
2862#endif
2863	.flock		= ocfs2_flock,
2864	.splice_read	= filemap_splice_read,
2865	.splice_write	= iter_file_splice_write,
2866	.fallocate	= ocfs2_fallocate,
2867	.remap_file_range = ocfs2_remap_file_range,
2868};
2869
2870const struct file_operations ocfs2_dops_no_plocks = {
2871	.llseek		= ocfs2_dir_llseek,
2872	.read		= generic_read_dir,
2873	.iterate_shared	= shared_ocfs2_readdir,
2874	.fsync		= ocfs2_sync_file,
2875	.release	= ocfs2_dir_release,
2876	.open		= ocfs2_dir_open,
2877	.unlocked_ioctl	= ocfs2_ioctl,
2878#ifdef CONFIG_COMPAT
2879	.compat_ioctl   = ocfs2_compat_ioctl,
2880#endif
2881	.flock		= ocfs2_flock,
2882};