Linux Audio

Check our new training course

Loading...
v3.1
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * file.c
   5 *
   6 * File open, close, extend, truncate
   7 *
   8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public
  12 * License as published by the Free Software Foundation; either
  13 * version 2 of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public
  21 * License along with this program; if not, write to the
  22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23 * Boston, MA 021110-1307, USA.
  24 */
  25
  26#include <linux/capability.h>
  27#include <linux/fs.h>
  28#include <linux/types.h>
  29#include <linux/slab.h>
  30#include <linux/highmem.h>
  31#include <linux/pagemap.h>
  32#include <linux/uio.h>
  33#include <linux/sched.h>
  34#include <linux/splice.h>
  35#include <linux/mount.h>
  36#include <linux/writeback.h>
  37#include <linux/falloc.h>
  38#include <linux/quotaops.h>
  39#include <linux/blkdev.h>
 
  40
  41#include <cluster/masklog.h>
  42
  43#include "ocfs2.h"
  44
  45#include "alloc.h"
  46#include "aops.h"
  47#include "dir.h"
  48#include "dlmglue.h"
  49#include "extent_map.h"
  50#include "file.h"
  51#include "sysfile.h"
  52#include "inode.h"
  53#include "ioctl.h"
  54#include "journal.h"
  55#include "locks.h"
  56#include "mmap.h"
  57#include "suballoc.h"
  58#include "super.h"
  59#include "xattr.h"
  60#include "acl.h"
  61#include "quota.h"
  62#include "refcounttree.h"
  63#include "ocfs2_trace.h"
  64
  65#include "buffer_head_io.h"
  66
  67static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  68{
  69	struct ocfs2_file_private *fp;
  70
  71	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  72	if (!fp)
  73		return -ENOMEM;
  74
  75	fp->fp_file = file;
  76	mutex_init(&fp->fp_mutex);
  77	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  78	file->private_data = fp;
  79
  80	return 0;
  81}
  82
  83static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  84{
  85	struct ocfs2_file_private *fp = file->private_data;
  86	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  87
  88	if (fp) {
  89		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  90		ocfs2_lock_res_free(&fp->fp_flock);
  91		kfree(fp);
  92		file->private_data = NULL;
  93	}
  94}
  95
  96static int ocfs2_file_open(struct inode *inode, struct file *file)
  97{
  98	int status;
  99	int mode = file->f_flags;
 100	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 101
 102	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
 103			      (unsigned long long)OCFS2_I(inode)->ip_blkno,
 104			      file->f_path.dentry->d_name.len,
 105			      file->f_path.dentry->d_name.name, mode);
 106
 107	if (file->f_mode & FMODE_WRITE)
 108		dquot_initialize(inode);
 
 
 
 109
 110	spin_lock(&oi->ip_lock);
 111
 112	/* Check that the inode hasn't been wiped from disk by another
 113	 * node. If it hasn't then we're safe as long as we hold the
 114	 * spin lock until our increment of open count. */
 115	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
 116		spin_unlock(&oi->ip_lock);
 117
 118		status = -ENOENT;
 119		goto leave;
 120	}
 121
 122	if (mode & O_DIRECT)
 123		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
 124
 125	oi->ip_open_count++;
 126	spin_unlock(&oi->ip_lock);
 127
 128	status = ocfs2_init_file_private(inode, file);
 129	if (status) {
 130		/*
 131		 * We want to set open count back if we're failing the
 132		 * open.
 133		 */
 134		spin_lock(&oi->ip_lock);
 135		oi->ip_open_count--;
 136		spin_unlock(&oi->ip_lock);
 137	}
 138
 
 
 139leave:
 140	return status;
 141}
 142
 143static int ocfs2_file_release(struct inode *inode, struct file *file)
 144{
 145	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 146
 147	spin_lock(&oi->ip_lock);
 148	if (!--oi->ip_open_count)
 149		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
 150
 151	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
 152				 oi->ip_blkno,
 153				 file->f_path.dentry->d_name.len,
 154				 file->f_path.dentry->d_name.name,
 155				 oi->ip_open_count);
 156	spin_unlock(&oi->ip_lock);
 157
 158	ocfs2_free_file_private(inode, file);
 159
 160	return 0;
 161}
 162
 163static int ocfs2_dir_open(struct inode *inode, struct file *file)
 164{
 165	return ocfs2_init_file_private(inode, file);
 166}
 167
 168static int ocfs2_dir_release(struct inode *inode, struct file *file)
 169{
 170	ocfs2_free_file_private(inode, file);
 171	return 0;
 172}
 173
 174static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
 175			   int datasync)
 176{
 177	int err = 0;
 178	journal_t *journal;
 179	struct inode *inode = file->f_mapping->host;
 180	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 
 
 
 
 181
 182	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
 183			      OCFS2_I(inode)->ip_blkno,
 184			      file->f_path.dentry->d_name.len,
 185			      file->f_path.dentry->d_name.name,
 186			      (unsigned long long)datasync);
 187
 188	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
 
 
 
 189	if (err)
 190		return err;
 191
 192	/*
 193	 * Probably don't need the i_mutex at all in here, just putting it here
 194	 * to be consistent with how fsync used to be called, someone more
 195	 * familiar with the fs could possibly remove it.
 196	 */
 197	mutex_lock(&inode->i_mutex);
 198	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
 199		/*
 200		 * We still have to flush drive's caches to get data to the
 201		 * platter
 202		 */
 203		if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
 204			blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 205		goto bail;
 206	}
 207
 208	journal = osb->journal->j_journal;
 209	err = jbd2_journal_force_commit(journal);
 210
 211bail:
 212	if (err)
 213		mlog_errno(err);
 214	mutex_unlock(&inode->i_mutex);
 215
 216	return (err < 0) ? -EIO : 0;
 217}
 218
 219int ocfs2_should_update_atime(struct inode *inode,
 220			      struct vfsmount *vfsmnt)
 221{
 222	struct timespec now;
 223	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 224
 225	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 226		return 0;
 227
 228	if ((inode->i_flags & S_NOATIME) ||
 229	    ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
 230		return 0;
 231
 232	/*
 233	 * We can be called with no vfsmnt structure - NFSD will
 234	 * sometimes do this.
 235	 *
 236	 * Note that our action here is different than touch_atime() -
 237	 * if we can't tell whether this is a noatime mount, then we
 238	 * don't know whether to trust the value of s_atime_quantum.
 239	 */
 240	if (vfsmnt == NULL)
 241		return 0;
 242
 243	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
 244	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
 245		return 0;
 246
 247	if (vfsmnt->mnt_flags & MNT_RELATIME) {
 248		if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
 249		    (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
 250			return 1;
 251
 252		return 0;
 253	}
 254
 255	now = CURRENT_TIME;
 256	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
 257		return 0;
 258	else
 259		return 1;
 260}
 261
 262int ocfs2_update_inode_atime(struct inode *inode,
 263			     struct buffer_head *bh)
 264{
 265	int ret;
 266	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 267	handle_t *handle;
 268	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
 269
 270	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 271	if (IS_ERR(handle)) {
 272		ret = PTR_ERR(handle);
 273		mlog_errno(ret);
 274		goto out;
 275	}
 276
 277	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 278				      OCFS2_JOURNAL_ACCESS_WRITE);
 279	if (ret) {
 280		mlog_errno(ret);
 281		goto out_commit;
 282	}
 283
 284	/*
 285	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
 286	 * have i_mutex to guard against concurrent changes to other
 287	 * inode fields.
 288	 */
 289	inode->i_atime = CURRENT_TIME;
 290	di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 291	di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 
 292	ocfs2_journal_dirty(handle, bh);
 293
 294out_commit:
 295	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 296out:
 297	return ret;
 298}
 299
 300static int ocfs2_set_inode_size(handle_t *handle,
 301				struct inode *inode,
 302				struct buffer_head *fe_bh,
 303				u64 new_i_size)
 304{
 305	int status;
 306
 307	i_size_write(inode, new_i_size);
 308	inode->i_blocks = ocfs2_inode_sector_count(inode);
 309	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 310
 311	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 312	if (status < 0) {
 313		mlog_errno(status);
 314		goto bail;
 315	}
 316
 317bail:
 318	return status;
 319}
 320
 321int ocfs2_simple_size_update(struct inode *inode,
 322			     struct buffer_head *di_bh,
 323			     u64 new_i_size)
 324{
 325	int ret;
 326	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 327	handle_t *handle = NULL;
 328
 329	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 330	if (IS_ERR(handle)) {
 331		ret = PTR_ERR(handle);
 332		mlog_errno(ret);
 333		goto out;
 334	}
 335
 336	ret = ocfs2_set_inode_size(handle, inode, di_bh,
 337				   new_i_size);
 338	if (ret < 0)
 339		mlog_errno(ret);
 340
 
 341	ocfs2_commit_trans(osb, handle);
 342out:
 343	return ret;
 344}
 345
 346static int ocfs2_cow_file_pos(struct inode *inode,
 347			      struct buffer_head *fe_bh,
 348			      u64 offset)
 349{
 350	int status;
 351	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 352	unsigned int num_clusters = 0;
 353	unsigned int ext_flags = 0;
 354
 355	/*
 356	 * If the new offset is aligned to the range of the cluster, there is
 357	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
 358	 * CoW either.
 359	 */
 360	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
 361		return 0;
 362
 363	status = ocfs2_get_clusters(inode, cpos, &phys,
 364				    &num_clusters, &ext_flags);
 365	if (status) {
 366		mlog_errno(status);
 367		goto out;
 368	}
 369
 370	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
 371		goto out;
 372
 373	return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
 374
 375out:
 376	return status;
 377}
 378
 379static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
 380				     struct inode *inode,
 381				     struct buffer_head *fe_bh,
 382				     u64 new_i_size)
 383{
 384	int status;
 385	handle_t *handle;
 386	struct ocfs2_dinode *di;
 387	u64 cluster_bytes;
 388
 389	/*
 390	 * We need to CoW the cluster contains the offset if it is reflinked
 391	 * since we will call ocfs2_zero_range_for_truncate later which will
 392	 * write "0" from offset to the end of the cluster.
 393	 */
 394	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
 395	if (status) {
 396		mlog_errno(status);
 397		return status;
 398	}
 399
 400	/* TODO: This needs to actually orphan the inode in this
 401	 * transaction. */
 402
 403	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 404	if (IS_ERR(handle)) {
 405		status = PTR_ERR(handle);
 406		mlog_errno(status);
 407		goto out;
 408	}
 409
 410	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
 411					 OCFS2_JOURNAL_ACCESS_WRITE);
 412	if (status < 0) {
 413		mlog_errno(status);
 414		goto out_commit;
 415	}
 416
 417	/*
 418	 * Do this before setting i_size.
 419	 */
 420	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
 421	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
 422					       cluster_bytes);
 423	if (status) {
 424		mlog_errno(status);
 425		goto out_commit;
 426	}
 427
 428	i_size_write(inode, new_i_size);
 429	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 430
 431	di = (struct ocfs2_dinode *) fe_bh->b_data;
 432	di->i_size = cpu_to_le64(new_i_size);
 433	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
 434	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 
 435
 436	ocfs2_journal_dirty(handle, fe_bh);
 437
 438out_commit:
 439	ocfs2_commit_trans(osb, handle);
 440out:
 441	return status;
 442}
 443
 444static int ocfs2_truncate_file(struct inode *inode,
 445			       struct buffer_head *di_bh,
 446			       u64 new_i_size)
 447{
 448	int status = 0;
 449	struct ocfs2_dinode *fe = NULL;
 450	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 451
 452	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
 453	 * already validated it */
 454	fe = (struct ocfs2_dinode *) di_bh->b_data;
 455
 456	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
 457				  (unsigned long long)le64_to_cpu(fe->i_size),
 458				  (unsigned long long)new_i_size);
 459
 460	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
 461			"Inode %llu, inode i_size = %lld != di "
 462			"i_size = %llu, i_flags = 0x%x\n",
 463			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 464			i_size_read(inode),
 465			(unsigned long long)le64_to_cpu(fe->i_size),
 466			le32_to_cpu(fe->i_flags));
 467
 468	if (new_i_size > le64_to_cpu(fe->i_size)) {
 469		trace_ocfs2_truncate_file_error(
 470			(unsigned long long)le64_to_cpu(fe->i_size),
 471			(unsigned long long)new_i_size);
 472		status = -EINVAL;
 473		mlog_errno(status);
 474		goto bail;
 475	}
 476
 477	/* lets handle the simple truncate cases before doing any more
 478	 * cluster locking. */
 479	if (new_i_size == le64_to_cpu(fe->i_size))
 480		goto bail;
 481
 482	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 483
 484	ocfs2_resv_discard(&osb->osb_la_resmap,
 485			   &OCFS2_I(inode)->ip_la_data_resv);
 486
 487	/*
 488	 * The inode lock forced other nodes to sync and drop their
 489	 * pages, which (correctly) happens even if we have a truncate
 490	 * without allocation change - ocfs2 cluster sizes can be much
 491	 * greater than page size, so we have to truncate them
 492	 * anyway.
 493	 */
 494	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
 495	truncate_inode_pages(inode->i_mapping, new_i_size);
 496
 497	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 498		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
 499					       i_size_read(inode), 1);
 500		if (status)
 501			mlog_errno(status);
 502
 503		goto bail_unlock_sem;
 504	}
 505
 506	/* alright, we're going to need to do a full blown alloc size
 507	 * change. Orphan the inode so that recovery can complete the
 508	 * truncate if necessary. This does the task of marking
 509	 * i_size. */
 510	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
 511	if (status < 0) {
 512		mlog_errno(status);
 513		goto bail_unlock_sem;
 514	}
 515
 516	status = ocfs2_commit_truncate(osb, inode, di_bh);
 517	if (status < 0) {
 518		mlog_errno(status);
 519		goto bail_unlock_sem;
 520	}
 521
 522	/* TODO: orphan dir cleanup here. */
 523bail_unlock_sem:
 524	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 525
 526bail:
 527	if (!status && OCFS2_I(inode)->ip_clusters == 0)
 528		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
 529
 530	return status;
 531}
 532
 533/*
 534 * extend file allocation only here.
 535 * we'll update all the disk stuff, and oip->alloc_size
 536 *
 537 * expect stuff to be locked, a transaction started and enough data /
 538 * metadata reservations in the contexts.
 539 *
 540 * Will return -EAGAIN, and a reason if a restart is needed.
 541 * If passed in, *reason will always be set, even in error.
 542 */
 543int ocfs2_add_inode_data(struct ocfs2_super *osb,
 544			 struct inode *inode,
 545			 u32 *logical_offset,
 546			 u32 clusters_to_add,
 547			 int mark_unwritten,
 548			 struct buffer_head *fe_bh,
 549			 handle_t *handle,
 550			 struct ocfs2_alloc_context *data_ac,
 551			 struct ocfs2_alloc_context *meta_ac,
 552			 enum ocfs2_alloc_restarted *reason_ret)
 553{
 554	int ret;
 555	struct ocfs2_extent_tree et;
 556
 557	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
 558	ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
 559					  clusters_to_add, mark_unwritten,
 560					  data_ac, meta_ac, reason_ret);
 561
 562	return ret;
 563}
 564
 565static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
 566				     u32 clusters_to_add, int mark_unwritten)
 567{
 568	int status = 0;
 569	int restart_func = 0;
 570	int credits;
 571	u32 prev_clusters;
 572	struct buffer_head *bh = NULL;
 573	struct ocfs2_dinode *fe = NULL;
 574	handle_t *handle = NULL;
 575	struct ocfs2_alloc_context *data_ac = NULL;
 576	struct ocfs2_alloc_context *meta_ac = NULL;
 577	enum ocfs2_alloc_restarted why;
 578	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 579	struct ocfs2_extent_tree et;
 580	int did_quota = 0;
 581
 582	/*
 583	 * This function only exists for file systems which don't
 584	 * support holes.
 585	 */
 586	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
 587
 588	status = ocfs2_read_inode_block(inode, &bh);
 589	if (status < 0) {
 590		mlog_errno(status);
 591		goto leave;
 592	}
 593	fe = (struct ocfs2_dinode *) bh->b_data;
 594
 595restart_all:
 596	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 597
 598	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
 599	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
 600				       &data_ac, &meta_ac);
 601	if (status) {
 602		mlog_errno(status);
 603		goto leave;
 604	}
 605
 606	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
 607					    clusters_to_add);
 608	handle = ocfs2_start_trans(osb, credits);
 609	if (IS_ERR(handle)) {
 610		status = PTR_ERR(handle);
 611		handle = NULL;
 612		mlog_errno(status);
 613		goto leave;
 614	}
 615
 616restarted_transaction:
 617	trace_ocfs2_extend_allocation(
 618		(unsigned long long)OCFS2_I(inode)->ip_blkno,
 619		(unsigned long long)i_size_read(inode),
 620		le32_to_cpu(fe->i_clusters), clusters_to_add,
 621		why, restart_func);
 622
 623	status = dquot_alloc_space_nodirty(inode,
 624			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 625	if (status)
 626		goto leave;
 627	did_quota = 1;
 628
 629	/* reserve a write to the file entry early on - that we if we
 630	 * run out of credits in the allocation path, we can still
 631	 * update i_size. */
 632	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 633					 OCFS2_JOURNAL_ACCESS_WRITE);
 634	if (status < 0) {
 635		mlog_errno(status);
 636		goto leave;
 637	}
 638
 639	prev_clusters = OCFS2_I(inode)->ip_clusters;
 640
 641	status = ocfs2_add_inode_data(osb,
 642				      inode,
 643				      &logical_start,
 644				      clusters_to_add,
 645				      mark_unwritten,
 646				      bh,
 647				      handle,
 648				      data_ac,
 649				      meta_ac,
 650				      &why);
 651	if ((status < 0) && (status != -EAGAIN)) {
 652		if (status != -ENOSPC)
 653			mlog_errno(status);
 654		goto leave;
 655	}
 656
 657	ocfs2_journal_dirty(handle, bh);
 658
 659	spin_lock(&OCFS2_I(inode)->ip_lock);
 660	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
 661	spin_unlock(&OCFS2_I(inode)->ip_lock);
 662	/* Release unused quota reservation */
 663	dquot_free_space(inode,
 664			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 665	did_quota = 0;
 666
 667	if (why != RESTART_NONE && clusters_to_add) {
 668		if (why == RESTART_META) {
 669			restart_func = 1;
 670			status = 0;
 671		} else {
 672			BUG_ON(why != RESTART_TRANS);
 673
 674			/* TODO: This can be more intelligent. */
 675			credits = ocfs2_calc_extend_credits(osb->sb,
 676							    &fe->id2.i_list,
 677							    clusters_to_add);
 678			status = ocfs2_extend_trans(handle, credits);
 679			if (status < 0) {
 680				/* handle still has to be committed at
 681				 * this point. */
 682				status = -ENOMEM;
 683				mlog_errno(status);
 684				goto leave;
 685			}
 686			goto restarted_transaction;
 687		}
 688	}
 689
 690	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
 691	     le32_to_cpu(fe->i_clusters),
 692	     (unsigned long long)le64_to_cpu(fe->i_size),
 693	     OCFS2_I(inode)->ip_clusters,
 694	     (unsigned long long)i_size_read(inode));
 695
 696leave:
 697	if (status < 0 && did_quota)
 698		dquot_free_space(inode,
 699			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 700	if (handle) {
 701		ocfs2_commit_trans(osb, handle);
 702		handle = NULL;
 703	}
 704	if (data_ac) {
 705		ocfs2_free_alloc_context(data_ac);
 706		data_ac = NULL;
 707	}
 708	if (meta_ac) {
 709		ocfs2_free_alloc_context(meta_ac);
 710		meta_ac = NULL;
 711	}
 712	if ((!status) && restart_func) {
 713		restart_func = 0;
 714		goto restart_all;
 715	}
 716	brelse(bh);
 717	bh = NULL;
 718
 719	return status;
 720}
 721
 722/*
 723 * While a write will already be ordering the data, a truncate will not.
 724 * Thus, we need to explicitly order the zeroed pages.
 725 */
 726static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
 
 727{
 728	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 729	handle_t *handle = NULL;
 730	int ret = 0;
 731
 732	if (!ocfs2_should_order_data(inode))
 733		goto out;
 734
 735	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 736	if (IS_ERR(handle)) {
 737		ret = -ENOMEM;
 738		mlog_errno(ret);
 739		goto out;
 740	}
 741
 742	ret = ocfs2_jbd2_file_inode(handle, inode);
 743	if (ret < 0)
 744		mlog_errno(ret);
 
 
 
 
 
 
 
 
 745
 746out:
 747	if (ret) {
 748		if (!IS_ERR(handle))
 749			ocfs2_commit_trans(osb, handle);
 750		handle = ERR_PTR(ret);
 751	}
 752	return handle;
 753}
 754
 755/* Some parts of this taken from generic_cont_expand, which turned out
 756 * to be too fragile to do exactly what we need without us having to
 757 * worry about recursive locking in ->write_begin() and ->write_end(). */
 758static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 759				 u64 abs_to)
 760{
 761	struct address_space *mapping = inode->i_mapping;
 762	struct page *page;
 763	unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
 764	handle_t *handle = NULL;
 765	int ret = 0;
 766	unsigned zero_from, zero_to, block_start, block_end;
 
 767
 768	BUG_ON(abs_from >= abs_to);
 769	BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
 770	BUG_ON(abs_from & (inode->i_blkbits - 1));
 771
 
 
 
 
 
 
 772	page = find_or_create_page(mapping, index, GFP_NOFS);
 773	if (!page) {
 774		ret = -ENOMEM;
 775		mlog_errno(ret);
 776		goto out;
 777	}
 778
 779	/* Get the offsets within the page that we want to zero */
 780	zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
 781	zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
 782	if (!zero_to)
 783		zero_to = PAGE_CACHE_SIZE;
 784
 785	trace_ocfs2_write_zero_page(
 786			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 787			(unsigned long long)abs_from,
 788			(unsigned long long)abs_to,
 789			index, zero_from, zero_to);
 790
 791	/* We know that zero_from is block aligned */
 792	for (block_start = zero_from; block_start < zero_to;
 793	     block_start = block_end) {
 794		block_end = block_start + (1 << inode->i_blkbits);
 795
 796		/*
 797		 * block_start is block-aligned.  Bump it by one to force
 798		 * __block_write_begin and block_commit_write to zero the
 799		 * whole block.
 800		 */
 801		ret = __block_write_begin(page, block_start + 1, 0,
 802					  ocfs2_get_block);
 803		if (ret < 0) {
 804			mlog_errno(ret);
 805			goto out_unlock;
 806		}
 807
 808		if (!handle) {
 809			handle = ocfs2_zero_start_ordered_transaction(inode);
 810			if (IS_ERR(handle)) {
 811				ret = PTR_ERR(handle);
 812				handle = NULL;
 813				break;
 814			}
 815		}
 816
 817		/* must not update i_size! */
 818		ret = block_commit_write(page, block_start + 1,
 819					 block_start + 1);
 820		if (ret < 0)
 821			mlog_errno(ret);
 822		else
 823			ret = 0;
 824	}
 825
 826	if (handle)
 827		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828
 829out_unlock:
 830	unlock_page(page);
 831	page_cache_release(page);
 
 
 
 832out:
 833	return ret;
 834}
 835
 836/*
 837 * Find the next range to zero.  We do this in terms of bytes because
 838 * that's what ocfs2_zero_extend() wants, and it is dealing with the
 839 * pagecache.  We may return multiple extents.
 840 *
 841 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
 842 * needs to be zeroed.  range_start and range_end return the next zeroing
 843 * range.  A subsequent call should pass the previous range_end as its
 844 * zero_start.  If range_end is 0, there's nothing to do.
 845 *
 846 * Unwritten extents are skipped over.  Refcounted extents are CoWd.
 847 */
 848static int ocfs2_zero_extend_get_range(struct inode *inode,
 849				       struct buffer_head *di_bh,
 850				       u64 zero_start, u64 zero_end,
 851				       u64 *range_start, u64 *range_end)
 852{
 853	int rc = 0, needs_cow = 0;
 854	u32 p_cpos, zero_clusters = 0;
 855	u32 zero_cpos =
 856		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 857	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
 858	unsigned int num_clusters = 0;
 859	unsigned int ext_flags = 0;
 860
 861	while (zero_cpos < last_cpos) {
 862		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
 863					&num_clusters, &ext_flags);
 864		if (rc) {
 865			mlog_errno(rc);
 866			goto out;
 867		}
 868
 869		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
 870			zero_clusters = num_clusters;
 871			if (ext_flags & OCFS2_EXT_REFCOUNTED)
 872				needs_cow = 1;
 873			break;
 874		}
 875
 876		zero_cpos += num_clusters;
 877	}
 878	if (!zero_clusters) {
 879		*range_end = 0;
 880		goto out;
 881	}
 882
 883	while ((zero_cpos + zero_clusters) < last_cpos) {
 884		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
 885					&p_cpos, &num_clusters,
 886					&ext_flags);
 887		if (rc) {
 888			mlog_errno(rc);
 889			goto out;
 890		}
 891
 892		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
 893			break;
 894		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 895			needs_cow = 1;
 896		zero_clusters += num_clusters;
 897	}
 898	if ((zero_cpos + zero_clusters) > last_cpos)
 899		zero_clusters = last_cpos - zero_cpos;
 900
 901	if (needs_cow) {
 902		rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
 903					zero_clusters, UINT_MAX);
 904		if (rc) {
 905			mlog_errno(rc);
 906			goto out;
 907		}
 908	}
 909
 910	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
 911	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
 912					     zero_cpos + zero_clusters);
 913
 914out:
 915	return rc;
 916}
 917
 918/*
 919 * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
 920 * has made sure that the entire range needs zeroing.
 921 */
 922static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
 923				   u64 range_end)
 924{
 925	int rc = 0;
 926	u64 next_pos;
 927	u64 zero_pos = range_start;
 928
 929	trace_ocfs2_zero_extend_range(
 930			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 931			(unsigned long long)range_start,
 932			(unsigned long long)range_end);
 933	BUG_ON(range_start >= range_end);
 934
 935	while (zero_pos < range_end) {
 936		next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
 937		if (next_pos > range_end)
 938			next_pos = range_end;
 939		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
 940		if (rc < 0) {
 941			mlog_errno(rc);
 942			break;
 943		}
 944		zero_pos = next_pos;
 945
 946		/*
 947		 * Very large extends have the potential to lock up
 948		 * the cpu for extended periods of time.
 949		 */
 950		cond_resched();
 951	}
 952
 953	return rc;
 954}
 955
 956int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
 957		      loff_t zero_to_size)
 958{
 959	int ret = 0;
 960	u64 zero_start, range_start = 0, range_end = 0;
 961	struct super_block *sb = inode->i_sb;
 962
 963	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
 964	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
 965				(unsigned long long)zero_start,
 966				(unsigned long long)i_size_read(inode));
 967	while (zero_start < zero_to_size) {
 968		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
 969						  zero_to_size,
 970						  &range_start,
 971						  &range_end);
 972		if (ret) {
 973			mlog_errno(ret);
 974			break;
 975		}
 976		if (!range_end)
 977			break;
 978		/* Trim the ends */
 979		if (range_start < zero_start)
 980			range_start = zero_start;
 981		if (range_end > zero_to_size)
 982			range_end = zero_to_size;
 983
 984		ret = ocfs2_zero_extend_range(inode, range_start,
 985					      range_end);
 986		if (ret) {
 987			mlog_errno(ret);
 988			break;
 989		}
 990		zero_start = range_end;
 991	}
 992
 993	return ret;
 994}
 995
 996int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
 997			  u64 new_i_size, u64 zero_to)
 998{
 999	int ret;
1000	u32 clusters_to_add;
1001	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1002
1003	/*
1004	 * Only quota files call this without a bh, and they can't be
1005	 * refcounted.
1006	 */
1007	BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
1008	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1009
1010	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1011	if (clusters_to_add < oi->ip_clusters)
1012		clusters_to_add = 0;
1013	else
1014		clusters_to_add -= oi->ip_clusters;
1015
1016	if (clusters_to_add) {
1017		ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1018						clusters_to_add, 0);
1019		if (ret) {
1020			mlog_errno(ret);
1021			goto out;
1022		}
1023	}
1024
1025	/*
1026	 * Call this even if we don't add any clusters to the tree. We
1027	 * still need to zero the area between the old i_size and the
1028	 * new i_size.
1029	 */
1030	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1031	if (ret < 0)
1032		mlog_errno(ret);
1033
1034out:
1035	return ret;
1036}
1037
1038static int ocfs2_extend_file(struct inode *inode,
1039			     struct buffer_head *di_bh,
1040			     u64 new_i_size)
1041{
1042	int ret = 0;
1043	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1044
1045	BUG_ON(!di_bh);
1046
1047	/* setattr sometimes calls us like this. */
1048	if (new_i_size == 0)
1049		goto out;
1050
1051	if (i_size_read(inode) == new_i_size)
1052		goto out;
1053	BUG_ON(new_i_size < i_size_read(inode));
1054
1055	/*
1056	 * The alloc sem blocks people in read/write from reading our
1057	 * allocation until we're done changing it. We depend on
1058	 * i_mutex to block other extend/truncate calls while we're
1059	 * here.  We even have to hold it for sparse files because there
1060	 * might be some tail zeroing.
1061	 */
1062	down_write(&oi->ip_alloc_sem);
1063
1064	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1065		/*
1066		 * We can optimize small extends by keeping the inodes
1067		 * inline data.
1068		 */
1069		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1070			up_write(&oi->ip_alloc_sem);
1071			goto out_update_size;
1072		}
1073
1074		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1075		if (ret) {
1076			up_write(&oi->ip_alloc_sem);
1077			mlog_errno(ret);
1078			goto out;
1079		}
1080	}
1081
1082	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1083		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1084	else
1085		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1086					    new_i_size);
1087
1088	up_write(&oi->ip_alloc_sem);
1089
1090	if (ret < 0) {
1091		mlog_errno(ret);
1092		goto out;
1093	}
1094
1095out_update_size:
1096	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1097	if (ret < 0)
1098		mlog_errno(ret);
1099
1100out:
1101	return ret;
1102}
1103
1104int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1105{
1106	int status = 0, size_change;
1107	struct inode *inode = dentry->d_inode;
 
1108	struct super_block *sb = inode->i_sb;
1109	struct ocfs2_super *osb = OCFS2_SB(sb);
1110	struct buffer_head *bh = NULL;
1111	handle_t *handle = NULL;
1112	struct dquot *transfer_to[MAXQUOTAS] = { };
1113	int qtype;
 
 
1114
1115	trace_ocfs2_setattr(inode, dentry,
1116			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1117			    dentry->d_name.len, dentry->d_name.name,
1118			    attr->ia_valid, attr->ia_mode,
1119			    attr->ia_uid, attr->ia_gid);
 
1120
1121	/* ensuring we don't even attempt to truncate a symlink */
1122	if (S_ISLNK(inode->i_mode))
1123		attr->ia_valid &= ~ATTR_SIZE;
1124
1125#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1126			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1127	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1128		return 0;
1129
1130	status = inode_change_ok(inode, attr);
1131	if (status)
1132		return status;
1133
1134	if (is_quota_modification(inode, attr))
1135		dquot_initialize(inode);
 
 
 
1136	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1137	if (size_change) {
 
 
 
 
 
 
 
1138		status = ocfs2_rw_lock(inode, 1);
1139		if (status < 0) {
1140			mlog_errno(status);
1141			goto bail;
1142		}
1143	}
1144
1145	status = ocfs2_inode_lock(inode, &bh, 1);
1146	if (status < 0) {
1147		if (status != -ENOENT)
1148			mlog_errno(status);
1149		goto bail_unlock_rw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150	}
 
1151
1152	if (size_change && attr->ia_size != i_size_read(inode)) {
1153		status = inode_newsize_ok(inode, attr->ia_size);
1154		if (status)
1155			goto bail_unlock;
1156
1157		inode_dio_wait(inode);
1158
1159		if (i_size_read(inode) > attr->ia_size) {
1160			if (ocfs2_should_order_data(inode)) {
1161				status = ocfs2_begin_ordered_truncate(inode,
1162								      attr->ia_size);
1163				if (status)
1164					goto bail_unlock;
1165			}
1166			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1167		} else
1168			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1169		if (status < 0) {
1170			if (status != -ENOSPC)
1171				mlog_errno(status);
1172			status = -ENOSPC;
1173			goto bail_unlock;
1174		}
1175	}
1176
1177	if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
1178	    (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
1179		/*
1180		 * Gather pointers to quota structures so that allocation /
1181		 * freeing of quota structures happens here and not inside
1182		 * dquot_transfer() where we have problems with lock ordering
1183		 */
1184		if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
1185		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1186		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1187			transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1188						      USRQUOTA);
1189			if (!transfer_to[USRQUOTA]) {
1190				status = -ESRCH;
1191				goto bail_unlock;
1192			}
1193		}
1194		if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
1195		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1196		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1197			transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1198						      GRPQUOTA);
1199			if (!transfer_to[GRPQUOTA]) {
1200				status = -ESRCH;
1201				goto bail_unlock;
1202			}
1203		}
1204		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1205					   2 * ocfs2_quota_trans_credits(sb));
1206		if (IS_ERR(handle)) {
1207			status = PTR_ERR(handle);
1208			mlog_errno(status);
1209			goto bail_unlock;
1210		}
1211		status = __dquot_transfer(inode, transfer_to);
1212		if (status < 0)
1213			goto bail_commit;
1214	} else {
1215		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1216		if (IS_ERR(handle)) {
1217			status = PTR_ERR(handle);
1218			mlog_errno(status);
1219			goto bail_unlock;
1220		}
1221	}
1222
1223	/*
1224	 * This will intentionally not wind up calling truncate_setsize(),
1225	 * since all the work for a size change has been done above.
1226	 * Otherwise, we could get into problems with truncate as
1227	 * ip_alloc_sem is used there to protect against i_size
1228	 * changes.
1229	 *
1230	 * XXX: this means the conditional below can probably be removed.
1231	 */
1232	if ((attr->ia_valid & ATTR_SIZE) &&
1233	    attr->ia_size != i_size_read(inode)) {
1234		status = vmtruncate(inode, attr->ia_size);
1235		if (status) {
1236			mlog_errno(status);
1237			goto bail_commit;
1238		}
1239	}
1240
1241	setattr_copy(inode, attr);
1242	mark_inode_dirty(inode);
1243
1244	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1245	if (status < 0)
1246		mlog_errno(status);
1247
1248bail_commit:
1249	ocfs2_commit_trans(osb, handle);
1250bail_unlock:
1251	ocfs2_inode_unlock(inode, 1);
 
 
 
1252bail_unlock_rw:
1253	if (size_change)
1254		ocfs2_rw_unlock(inode, 1);
1255bail:
1256	brelse(bh);
1257
1258	/* Release quota pointers in case we acquired them */
1259	for (qtype = 0; qtype < MAXQUOTAS; qtype++)
1260		dqput(transfer_to[qtype]);
1261
1262	if (!status && attr->ia_valid & ATTR_MODE) {
1263		status = ocfs2_acl_chmod(inode);
1264		if (status < 0)
1265			mlog_errno(status);
1266	}
 
 
1267
 
1268	return status;
1269}
1270
1271int ocfs2_getattr(struct vfsmount *mnt,
1272		  struct dentry *dentry,
1273		  struct kstat *stat)
1274{
1275	struct inode *inode = dentry->d_inode;
1276	struct super_block *sb = dentry->d_inode->i_sb;
1277	struct ocfs2_super *osb = sb->s_fs_info;
1278	int err;
1279
1280	err = ocfs2_inode_revalidate(dentry);
1281	if (err) {
1282		if (err != -ENOENT)
1283			mlog_errno(err);
1284		goto bail;
1285	}
1286
1287	generic_fillattr(inode, stat);
 
 
 
 
 
 
 
 
1288
1289	/* We set the blksize from the cluster size for performance */
1290	stat->blksize = osb->s_clustersize;
1291
1292bail:
1293	return err;
1294}
1295
1296int ocfs2_permission(struct inode *inode, int mask)
1297{
1298	int ret;
 
1299
1300	if (mask & MAY_NOT_BLOCK)
1301		return -ECHILD;
1302
1303	ret = ocfs2_inode_lock(inode, NULL, 0);
1304	if (ret) {
1305		if (ret != -ENOENT)
1306			mlog_errno(ret);
1307		goto out;
 
 
 
 
 
 
 
 
 
 
1308	}
1309
1310	ret = generic_permission(inode, mask);
1311
1312	ocfs2_inode_unlock(inode, 0);
1313out:
1314	return ret;
1315}
1316
1317static int __ocfs2_write_remove_suid(struct inode *inode,
1318				     struct buffer_head *bh)
1319{
1320	int ret;
1321	handle_t *handle;
1322	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1323	struct ocfs2_dinode *di;
1324
1325	trace_ocfs2_write_remove_suid(
1326			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1327			inode->i_mode);
1328
1329	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1330	if (IS_ERR(handle)) {
1331		ret = PTR_ERR(handle);
1332		mlog_errno(ret);
1333		goto out;
1334	}
1335
1336	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1337				      OCFS2_JOURNAL_ACCESS_WRITE);
1338	if (ret < 0) {
1339		mlog_errno(ret);
1340		goto out_trans;
1341	}
1342
1343	inode->i_mode &= ~S_ISUID;
1344	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1345		inode->i_mode &= ~S_ISGID;
1346
1347	di = (struct ocfs2_dinode *) bh->b_data;
1348	di->i_mode = cpu_to_le16(inode->i_mode);
 
1349
1350	ocfs2_journal_dirty(handle, bh);
1351
1352out_trans:
1353	ocfs2_commit_trans(osb, handle);
1354out:
1355	return ret;
1356}
1357
1358/*
1359 * Will look for holes and unwritten extents in the range starting at
1360 * pos for count bytes (inclusive).
1361 */
1362static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1363				       size_t count)
1364{
1365	int ret = 0;
1366	unsigned int extent_flags;
1367	u32 cpos, clusters, extent_len, phys_cpos;
1368	struct super_block *sb = inode->i_sb;
1369
1370	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1371	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1372
1373	while (clusters) {
1374		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1375					 &extent_flags);
1376		if (ret < 0) {
1377			mlog_errno(ret);
1378			goto out;
1379		}
1380
1381		if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1382			ret = 1;
1383			break;
1384		}
1385
1386		if (extent_len > clusters)
1387			extent_len = clusters;
1388
1389		clusters -= extent_len;
1390		cpos += extent_len;
1391	}
1392out:
1393	return ret;
1394}
1395
1396static int ocfs2_write_remove_suid(struct inode *inode)
1397{
1398	int ret;
1399	struct buffer_head *bh = NULL;
1400
1401	ret = ocfs2_read_inode_block(inode, &bh);
1402	if (ret < 0) {
1403		mlog_errno(ret);
1404		goto out;
1405	}
1406
1407	ret =  __ocfs2_write_remove_suid(inode, bh);
1408out:
1409	brelse(bh);
1410	return ret;
1411}
1412
1413/*
1414 * Allocate enough extents to cover the region starting at byte offset
1415 * start for len bytes. Existing extents are skipped, any extents
1416 * added are marked as "unwritten".
1417 */
1418static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1419					    u64 start, u64 len)
1420{
1421	int ret;
1422	u32 cpos, phys_cpos, clusters, alloc_size;
1423	u64 end = start + len;
1424	struct buffer_head *di_bh = NULL;
1425
1426	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1427		ret = ocfs2_read_inode_block(inode, &di_bh);
1428		if (ret) {
1429			mlog_errno(ret);
1430			goto out;
1431		}
1432
1433		/*
1434		 * Nothing to do if the requested reservation range
1435		 * fits within the inode.
1436		 */
1437		if (ocfs2_size_fits_inline_data(di_bh, end))
1438			goto out;
1439
1440		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1441		if (ret) {
1442			mlog_errno(ret);
1443			goto out;
1444		}
1445	}
1446
1447	/*
1448	 * We consider both start and len to be inclusive.
1449	 */
1450	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1451	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1452	clusters -= cpos;
1453
1454	while (clusters) {
1455		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1456					 &alloc_size, NULL);
1457		if (ret) {
1458			mlog_errno(ret);
1459			goto out;
1460		}
1461
1462		/*
1463		 * Hole or existing extent len can be arbitrary, so
1464		 * cap it to our own allocation request.
1465		 */
1466		if (alloc_size > clusters)
1467			alloc_size = clusters;
1468
1469		if (phys_cpos) {
1470			/*
1471			 * We already have an allocation at this
1472			 * region so we can safely skip it.
1473			 */
1474			goto next;
1475		}
1476
1477		ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1478		if (ret) {
1479			if (ret != -ENOSPC)
1480				mlog_errno(ret);
1481			goto out;
1482		}
1483
1484next:
1485		cpos += alloc_size;
1486		clusters -= alloc_size;
1487	}
1488
1489	ret = 0;
1490out:
1491
1492	brelse(di_bh);
1493	return ret;
1494}
1495
1496/*
1497 * Truncate a byte range, avoiding pages within partial clusters. This
1498 * preserves those pages for the zeroing code to write to.
1499 */
1500static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1501					 u64 byte_len)
1502{
1503	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1504	loff_t start, end;
1505	struct address_space *mapping = inode->i_mapping;
1506
1507	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1508	end = byte_start + byte_len;
1509	end = end & ~(osb->s_clustersize - 1);
1510
1511	if (start < end) {
1512		unmap_mapping_range(mapping, start, end - start, 0);
1513		truncate_inode_pages_range(mapping, start, end - 1);
1514	}
1515}
1516
1517static int ocfs2_zero_partial_clusters(struct inode *inode,
1518				       u64 start, u64 len)
1519{
1520	int ret = 0;
1521	u64 tmpend, end = start + len;
 
1522	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1523	unsigned int csize = osb->s_clustersize;
1524	handle_t *handle;
1525
1526	/*
1527	 * The "start" and "end" values are NOT necessarily part of
1528	 * the range whose allocation is being deleted. Rather, this
1529	 * is what the user passed in with the request. We must zero
1530	 * partial clusters here. There's no need to worry about
1531	 * physical allocation - the zeroing code knows to skip holes.
1532	 */
1533	trace_ocfs2_zero_partial_clusters(
1534		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1535		(unsigned long long)start, (unsigned long long)end);
1536
1537	/*
1538	 * If both edges are on a cluster boundary then there's no
1539	 * zeroing required as the region is part of the allocation to
1540	 * be truncated.
1541	 */
1542	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1543		goto out;
1544
1545	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1546	if (IS_ERR(handle)) {
1547		ret = PTR_ERR(handle);
1548		mlog_errno(ret);
1549		goto out;
1550	}
1551
1552	/*
1553	 * We want to get the byte offset of the end of the 1st cluster.
1554	 */
1555	tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1556	if (tmpend > end)
1557		tmpend = end;
1558
1559	trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1560						 (unsigned long long)tmpend);
 
 
 
 
 
 
 
 
 
 
 
1561
1562	ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1563	if (ret)
1564		mlog_errno(ret);
 
 
1565
1566	if (tmpend < end) {
1567		/*
1568		 * This may make start and end equal, but the zeroing
1569		 * code will skip any work in that case so there's no
1570		 * need to catch it up here.
1571		 */
1572		start = end & ~(osb->s_clustersize - 1);
1573
1574		trace_ocfs2_zero_partial_clusters_range2(
1575			(unsigned long long)start, (unsigned long long)end);
1576
1577		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1578		if (ret)
1579			mlog_errno(ret);
1580	}
 
1581
1582	ocfs2_commit_trans(osb, handle);
1583out:
1584	return ret;
1585}
1586
1587static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1588{
1589	int i;
1590	struct ocfs2_extent_rec *rec = NULL;
1591
1592	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1593
1594		rec = &el->l_recs[i];
1595
1596		if (le32_to_cpu(rec->e_cpos) < pos)
1597			break;
1598	}
1599
1600	return i;
1601}
1602
1603/*
1604 * Helper to calculate the punching pos and length in one run, we handle the
1605 * following three cases in order:
1606 *
1607 * - remove the entire record
1608 * - remove a partial record
1609 * - no record needs to be removed (hole-punching completed)
1610*/
1611static void ocfs2_calc_trunc_pos(struct inode *inode,
1612				 struct ocfs2_extent_list *el,
1613				 struct ocfs2_extent_rec *rec,
1614				 u32 trunc_start, u32 *trunc_cpos,
1615				 u32 *trunc_len, u32 *trunc_end,
1616				 u64 *blkno, int *done)
1617{
1618	int ret = 0;
1619	u32 coff, range;
1620
1621	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1622
1623	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1624		/*
1625		 * remove an entire extent record.
1626		 */
1627		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1628		/*
1629		 * Skip holes if any.
1630		 */
1631		if (range < *trunc_end)
1632			*trunc_end = range;
1633		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1634		*blkno = le64_to_cpu(rec->e_blkno);
1635		*trunc_end = le32_to_cpu(rec->e_cpos);
1636	} else if (range > trunc_start) {
1637		/*
1638		 * remove a partial extent record, which means we're
1639		 * removing the last extent record.
1640		 */
1641		*trunc_cpos = trunc_start;
1642		/*
1643		 * skip hole if any.
1644		 */
1645		if (range < *trunc_end)
1646			*trunc_end = range;
1647		*trunc_len = *trunc_end - trunc_start;
1648		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1649		*blkno = le64_to_cpu(rec->e_blkno) +
1650				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1651		*trunc_end = trunc_start;
1652	} else {
1653		/*
1654		 * It may have two following possibilities:
1655		 *
1656		 * - last record has been removed
1657		 * - trunc_start was within a hole
1658		 *
1659		 * both two cases mean the completion of hole punching.
1660		 */
1661		ret = 1;
1662	}
1663
1664	*done = ret;
1665}
1666
1667static int ocfs2_remove_inode_range(struct inode *inode,
1668				    struct buffer_head *di_bh, u64 byte_start,
1669				    u64 byte_len)
1670{
1671	int ret = 0, flags = 0, done = 0, i;
1672	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1673	u32 cluster_in_el;
1674	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1675	struct ocfs2_cached_dealloc_ctxt dealloc;
1676	struct address_space *mapping = inode->i_mapping;
1677	struct ocfs2_extent_tree et;
1678	struct ocfs2_path *path = NULL;
1679	struct ocfs2_extent_list *el = NULL;
1680	struct ocfs2_extent_rec *rec = NULL;
1681	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1682	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1683
1684	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1685	ocfs2_init_dealloc_ctxt(&dealloc);
1686
1687	trace_ocfs2_remove_inode_range(
1688			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1689			(unsigned long long)byte_start,
1690			(unsigned long long)byte_len);
1691
1692	if (byte_len == 0)
1693		return 0;
1694
1695	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1696		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1697					    byte_start + byte_len, 0);
1698		if (ret) {
1699			mlog_errno(ret);
1700			goto out;
1701		}
1702		/*
1703		 * There's no need to get fancy with the page cache
1704		 * truncate of an inline-data inode. We're talking
1705		 * about less than a page here, which will be cached
1706		 * in the dinode buffer anyway.
1707		 */
1708		unmap_mapping_range(mapping, 0, 0, 0);
1709		truncate_inode_pages(mapping, 0);
1710		goto out;
1711	}
1712
1713	/*
1714	 * For reflinks, we may need to CoW 2 clusters which might be
1715	 * partially zero'd later, if hole's start and end offset were
1716	 * within one cluster(means is not exactly aligned to clustersize).
1717	 */
1718
1719	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1720
1721		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1722		if (ret) {
1723			mlog_errno(ret);
1724			goto out;
1725		}
1726
1727		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1728		if (ret) {
1729			mlog_errno(ret);
1730			goto out;
1731		}
1732	}
1733
1734	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1735	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1736	cluster_in_el = trunc_end;
1737
1738	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1739	if (ret) {
1740		mlog_errno(ret);
1741		goto out;
1742	}
1743
1744	path = ocfs2_new_path_from_et(&et);
1745	if (!path) {
1746		ret = -ENOMEM;
1747		mlog_errno(ret);
1748		goto out;
1749	}
1750
1751	while (trunc_end > trunc_start) {
1752
1753		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1754				      cluster_in_el);
1755		if (ret) {
1756			mlog_errno(ret);
1757			goto out;
1758		}
1759
1760		el = path_leaf_el(path);
1761
1762		i = ocfs2_find_rec(el, trunc_end);
1763		/*
1764		 * Need to go to previous extent block.
1765		 */
1766		if (i < 0) {
1767			if (path->p_tree_depth == 0)
1768				break;
1769
1770			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1771							    path,
1772							    &cluster_in_el);
1773			if (ret) {
1774				mlog_errno(ret);
1775				goto out;
1776			}
1777
1778			/*
1779			 * We've reached the leftmost extent block,
1780			 * it's safe to leave.
1781			 */
1782			if (cluster_in_el == 0)
1783				break;
1784
1785			/*
1786			 * The 'pos' searched for previous extent block is
1787			 * always one cluster less than actual trunc_end.
1788			 */
1789			trunc_end = cluster_in_el + 1;
1790
1791			ocfs2_reinit_path(path, 1);
1792
1793			continue;
1794
1795		} else
1796			rec = &el->l_recs[i];
1797
1798		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1799				     &trunc_len, &trunc_end, &blkno, &done);
1800		if (done)
1801			break;
1802
1803		flags = rec->e_flags;
1804		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1805
1806		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1807					       phys_cpos, trunc_len, flags,
1808					       &dealloc, refcount_loc);
1809		if (ret < 0) {
1810			mlog_errno(ret);
1811			goto out;
1812		}
1813
1814		cluster_in_el = trunc_end;
1815
1816		ocfs2_reinit_path(path, 1);
1817	}
1818
1819	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1820
1821out:
 
1822	ocfs2_schedule_truncate_log_flush(osb, 1);
1823	ocfs2_run_deallocs(osb, &dealloc);
1824
1825	return ret;
1826}
1827
1828/*
1829 * Parts of this function taken from xfs_change_file_space()
1830 */
1831static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1832				     loff_t f_pos, unsigned int cmd,
1833				     struct ocfs2_space_resv *sr,
1834				     int change_size)
1835{
1836	int ret;
1837	s64 llen;
1838	loff_t size;
1839	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1840	struct buffer_head *di_bh = NULL;
1841	handle_t *handle;
1842	unsigned long long max_off = inode->i_sb->s_maxbytes;
1843
1844	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1845		return -EROFS;
1846
1847	mutex_lock(&inode->i_mutex);
1848
1849	/*
1850	 * This prevents concurrent writes on other nodes
1851	 */
1852	ret = ocfs2_rw_lock(inode, 1);
1853	if (ret) {
1854		mlog_errno(ret);
1855		goto out;
1856	}
1857
1858	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1859	if (ret) {
1860		mlog_errno(ret);
1861		goto out_rw_unlock;
1862	}
1863
1864	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1865		ret = -EPERM;
1866		goto out_inode_unlock;
1867	}
1868
1869	switch (sr->l_whence) {
1870	case 0: /*SEEK_SET*/
1871		break;
1872	case 1: /*SEEK_CUR*/
1873		sr->l_start += f_pos;
1874		break;
1875	case 2: /*SEEK_END*/
1876		sr->l_start += i_size_read(inode);
1877		break;
1878	default:
1879		ret = -EINVAL;
1880		goto out_inode_unlock;
1881	}
1882	sr->l_whence = 0;
1883
1884	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1885
1886	if (sr->l_start < 0
1887	    || sr->l_start > max_off
1888	    || (sr->l_start + llen) < 0
1889	    || (sr->l_start + llen) > max_off) {
1890		ret = -EINVAL;
1891		goto out_inode_unlock;
1892	}
1893	size = sr->l_start + sr->l_len;
1894
1895	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
 
1896		if (sr->l_len <= 0) {
1897			ret = -EINVAL;
1898			goto out_inode_unlock;
1899		}
1900	}
1901
1902	if (file && should_remove_suid(file->f_path.dentry)) {
1903		ret = __ocfs2_write_remove_suid(inode, di_bh);
1904		if (ret) {
1905			mlog_errno(ret);
1906			goto out_inode_unlock;
1907		}
1908	}
1909
1910	down_write(&OCFS2_I(inode)->ip_alloc_sem);
1911	switch (cmd) {
1912	case OCFS2_IOC_RESVSP:
1913	case OCFS2_IOC_RESVSP64:
1914		/*
1915		 * This takes unsigned offsets, but the signed ones we
1916		 * pass have been checked against overflow above.
1917		 */
1918		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1919						       sr->l_len);
1920		break;
1921	case OCFS2_IOC_UNRESVSP:
1922	case OCFS2_IOC_UNRESVSP64:
1923		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1924					       sr->l_len);
1925		break;
1926	default:
1927		ret = -EINVAL;
1928	}
1929	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1930	if (ret) {
1931		mlog_errno(ret);
1932		goto out_inode_unlock;
1933	}
1934
1935	/*
1936	 * We update c/mtime for these changes
1937	 */
1938	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1939	if (IS_ERR(handle)) {
1940		ret = PTR_ERR(handle);
1941		mlog_errno(ret);
1942		goto out_inode_unlock;
1943	}
1944
1945	if (change_size && i_size_read(inode) < size)
1946		i_size_write(inode, size);
1947
1948	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1949	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1950	if (ret < 0)
1951		mlog_errno(ret);
1952
 
 
 
1953	ocfs2_commit_trans(osb, handle);
1954
1955out_inode_unlock:
1956	brelse(di_bh);
1957	ocfs2_inode_unlock(inode, 1);
1958out_rw_unlock:
1959	ocfs2_rw_unlock(inode, 1);
1960
1961out:
1962	mutex_unlock(&inode->i_mutex);
1963	return ret;
1964}
1965
1966int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1967			    struct ocfs2_space_resv *sr)
1968{
1969	struct inode *inode = file->f_path.dentry->d_inode;
1970	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
1971
1972	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1973	    !ocfs2_writes_unwritten_extents(osb))
1974		return -ENOTTY;
1975	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1976		 !ocfs2_sparse_alloc(osb))
1977		return -ENOTTY;
1978
1979	if (!S_ISREG(inode->i_mode))
1980		return -EINVAL;
1981
1982	if (!(file->f_mode & FMODE_WRITE))
1983		return -EBADF;
1984
1985	return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
 
 
 
 
 
1986}
1987
1988static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
1989			    loff_t len)
1990{
1991	struct inode *inode = file->f_path.dentry->d_inode;
1992	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1993	struct ocfs2_space_resv sr;
1994	int change_size = 1;
1995	int cmd = OCFS2_IOC_RESVSP64;
1996
1997	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1998		return -EOPNOTSUPP;
1999	if (!ocfs2_writes_unwritten_extents(osb))
2000		return -EOPNOTSUPP;
2001
2002	if (mode & FALLOC_FL_KEEP_SIZE)
2003		change_size = 0;
2004
2005	if (mode & FALLOC_FL_PUNCH_HOLE)
2006		cmd = OCFS2_IOC_UNRESVSP64;
2007
2008	sr.l_whence = 0;
2009	sr.l_start = (s64)offset;
2010	sr.l_len = (s64)len;
2011
2012	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2013					 change_size);
2014}
2015
2016int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2017				   size_t count)
2018{
2019	int ret = 0;
2020	unsigned int extent_flags;
2021	u32 cpos, clusters, extent_len, phys_cpos;
2022	struct super_block *sb = inode->i_sb;
2023
2024	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2025	    !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
2026	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2027		return 0;
2028
2029	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2030	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2031
2032	while (clusters) {
2033		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2034					 &extent_flags);
2035		if (ret < 0) {
2036			mlog_errno(ret);
2037			goto out;
2038		}
2039
2040		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2041			ret = 1;
2042			break;
2043		}
2044
2045		if (extent_len > clusters)
2046			extent_len = clusters;
2047
2048		clusters -= extent_len;
2049		cpos += extent_len;
2050	}
2051out:
2052	return ret;
2053}
2054
 
 
 
 
 
 
 
 
 
 
2055static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2056					    struct file *file,
2057					    loff_t pos, size_t count,
2058					    int *meta_level)
2059{
2060	int ret;
2061	struct buffer_head *di_bh = NULL;
2062	u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2063	u32 clusters =
2064		ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2065
2066	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2067	if (ret) {
2068		mlog_errno(ret);
2069		goto out;
2070	}
2071
2072	*meta_level = 1;
2073
2074	ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
2075	if (ret)
2076		mlog_errno(ret);
2077out:
2078	brelse(di_bh);
2079	return ret;
2080}
2081
2082static int ocfs2_prepare_inode_for_write(struct file *file,
2083					 loff_t *ppos,
2084					 size_t count,
2085					 int appending,
2086					 int *direct_io,
2087					 int *has_refcount)
2088{
2089	int ret = 0, meta_level = 0;
2090	struct dentry *dentry = file->f_path.dentry;
2091	struct inode *inode = dentry->d_inode;
2092	loff_t saved_pos = 0, end;
 
2093
2094	/*
2095	 * We start with a read level meta lock and only jump to an ex
2096	 * if we need to make modifications here.
2097	 */
2098	for(;;) {
2099		ret = ocfs2_inode_lock(inode, NULL, meta_level);
 
 
 
 
2100		if (ret < 0) {
2101			meta_level = -1;
2102			mlog_errno(ret);
 
2103			goto out;
2104		}
2105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106		/* Clear suid / sgid if necessary. We do this here
2107		 * instead of later in the write path because
2108		 * remove_suid() calls ->setattr without any hint that
2109		 * we may have already done our cluster locking. Since
2110		 * ocfs2_setattr() *must* take cluster locks to
2111		 * proceeed, this will lead us to recursively lock the
2112		 * inode. There's also the dinode i_size state which
2113		 * can be lost via setattr during extending writes (we
2114		 * set inode->i_size at the end of a write. */
2115		if (should_remove_suid(dentry)) {
2116			if (meta_level == 0) {
2117				ocfs2_inode_unlock(inode, meta_level);
2118				meta_level = 1;
2119				continue;
2120			}
2121
2122			ret = ocfs2_write_remove_suid(inode);
2123			if (ret < 0) {
2124				mlog_errno(ret);
2125				goto out_unlock;
2126			}
2127		}
2128
2129		/* work on a copy of ppos until we're sure that we won't have
2130		 * to recalculate it due to relocking. */
2131		if (appending)
2132			saved_pos = i_size_read(inode);
2133		else
2134			saved_pos = *ppos;
2135
2136		end = saved_pos + count;
2137
2138		ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
2139		if (ret == 1) {
2140			ocfs2_inode_unlock(inode, meta_level);
2141			meta_level = -1;
2142
2143			ret = ocfs2_prepare_inode_for_refcount(inode,
2144							       file,
2145							       saved_pos,
2146							       count,
2147							       &meta_level);
2148			if (has_refcount)
2149				*has_refcount = 1;
2150			if (direct_io)
2151				*direct_io = 0;
2152		}
2153
2154		if (ret < 0) {
2155			mlog_errno(ret);
2156			goto out_unlock;
2157		}
2158
2159		/*
2160		 * Skip the O_DIRECT checks if we don't need
2161		 * them.
2162		 */
2163		if (!direct_io || !(*direct_io))
2164			break;
2165
2166		/*
2167		 * There's no sane way to do direct writes to an inode
2168		 * with inline data.
2169		 */
2170		if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2171			*direct_io = 0;
2172			break;
2173		}
2174
2175		/*
2176		 * Allowing concurrent direct writes means
2177		 * i_size changes wouldn't be synchronized, so
2178		 * one node could wind up truncating another
2179		 * nodes writes.
2180		 */
2181		if (end > i_size_read(inode)) {
2182			*direct_io = 0;
2183			break;
2184		}
2185
2186		/*
2187		 * We don't fill holes during direct io, so
2188		 * check for them here. If any are found, the
2189		 * caller will have to retake some cluster
2190		 * locks and initiate the io as buffered.
2191		 */
2192		ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
2193		if (ret == 1) {
2194			*direct_io = 0;
2195			ret = 0;
2196		} else if (ret < 0)
2197			mlog_errno(ret);
2198		break;
2199	}
2200
2201	if (appending)
2202		*ppos = saved_pos;
2203
2204out_unlock:
2205	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2206					    saved_pos, appending, count,
2207					    direct_io, has_refcount);
 
2208
2209	if (meta_level >= 0)
2210		ocfs2_inode_unlock(inode, meta_level);
2211
2212out:
2213	return ret;
2214}
2215
2216static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2217				    const struct iovec *iov,
2218				    unsigned long nr_segs,
2219				    loff_t pos)
2220{
2221	int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
2222	int can_do_direct, has_refcount = 0;
2223	ssize_t written = 0;
2224	size_t ocount;		/* original count */
2225	size_t count;		/* after file limit checks */
2226	loff_t old_size, *ppos = &iocb->ki_pos;
2227	u32 old_clusters;
2228	struct file *file = iocb->ki_filp;
2229	struct inode *inode = file->f_path.dentry->d_inode;
2230	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2231	int full_coherency = !(osb->s_mount_opt &
2232			       OCFS2_MOUNT_COHERENCY_BUFFERED);
 
 
 
 
 
2233
2234	trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2235		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2236		file->f_path.dentry->d_name.len,
2237		file->f_path.dentry->d_name.name,
2238		(unsigned int)nr_segs);
2239
2240	if (iocb->ki_left == 0)
2241		return 0;
2242
2243	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2244
2245	appending = file->f_flags & O_APPEND ? 1 : 0;
2246	direct_io = file->f_flags & O_DIRECT ? 1 : 0;
2247
2248	mutex_lock(&inode->i_mutex);
 
2249
2250	ocfs2_iocb_clear_sem_locked(iocb);
2251
2252relock:
2253	/* to match setattr's i_mutex -> rw_lock ordering */
2254	if (direct_io) {
2255		have_alloc_sem = 1;
2256		/* communicate with ocfs2_dio_end_io */
2257		ocfs2_iocb_set_sem_locked(iocb);
2258	}
2259
2260	/*
2261	 * Concurrent O_DIRECT writes are allowed with
2262	 * mount_option "coherency=buffered".
 
2263	 */
2264	rw_level = (!direct_io || full_coherency);
2265
2266	ret = ocfs2_rw_lock(inode, rw_level);
 
 
 
2267	if (ret < 0) {
2268		mlog_errno(ret);
2269		goto out_sems;
 
2270	}
2271
2272	/*
2273	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2274	 * inode_lock to guarantee coherency.
2275	 */
2276	if (direct_io && full_coherency) {
2277		/*
2278		 * We need to take and drop the inode lock to force
2279		 * other nodes to drop their caches.  Buffered I/O
2280		 * already does this in write_begin().
2281		 */
2282		ret = ocfs2_inode_lock(inode, NULL, 1);
 
 
 
2283		if (ret < 0) {
2284			mlog_errno(ret);
2285			goto out_sems;
 
2286		}
2287
2288		ocfs2_inode_unlock(inode, 1);
2289	}
2290
2291	can_do_direct = direct_io;
2292	ret = ocfs2_prepare_inode_for_write(file, ppos,
2293					    iocb->ki_left, appending,
2294					    &can_do_direct, &has_refcount);
2295	if (ret < 0) {
2296		mlog_errno(ret);
2297		goto out;
2298	}
 
2299
2300	/*
2301	 * We can't complete the direct I/O as requested, fall back to
2302	 * buffered I/O.
2303	 */
2304	if (direct_io && !can_do_direct) {
2305		ocfs2_rw_unlock(inode, rw_level);
2306
2307		have_alloc_sem = 0;
2308		rw_level = -1;
2309
2310		direct_io = 0;
2311		goto relock;
2312	}
2313
2314	/*
2315	 * To later detect whether a journal commit for sync writes is
2316	 * necessary, we sample i_size, and cluster count here.
2317	 */
2318	old_size = i_size_read(inode);
2319	old_clusters = OCFS2_I(inode)->ip_clusters;
 
2320
2321	/* communicate with ocfs2_dio_end_io */
2322	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2323
2324	ret = generic_segment_checks(iov, &nr_segs, &ocount,
2325				     VERIFY_READ);
2326	if (ret)
2327		goto out_dio;
2328
2329	count = ocount;
2330	ret = generic_write_checks(file, ppos, &count,
2331				   S_ISBLK(inode->i_mode));
2332	if (ret)
2333		goto out_dio;
2334
2335	if (direct_io) {
2336		written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2337						    ppos, count, ocount);
2338		if (written < 0) {
2339			ret = written;
2340			goto out_dio;
2341		}
2342	} else {
2343		current->backing_dev_info = file->f_mapping->backing_dev_info;
2344		written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
2345						      ppos, count, 0);
2346		current->backing_dev_info = NULL;
2347	}
2348
2349out_dio:
2350	/* buffered aio wouldn't have proper lock coverage today */
2351	BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2352
2353	if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2354	    ((file->f_flags & O_DIRECT) && !direct_io)) {
2355		ret = filemap_fdatawrite_range(file->f_mapping, pos,
2356					       pos + count - 1);
2357		if (ret < 0)
2358			written = ret;
2359
2360		if (!ret && ((old_size != i_size_read(inode)) ||
2361			     (old_clusters != OCFS2_I(inode)->ip_clusters) ||
2362			     has_refcount)) {
2363			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2364			if (ret < 0)
2365				written = ret;
2366		}
2367
2368		if (!ret)
2369			ret = filemap_fdatawait_range(file->f_mapping, pos,
2370						      pos + count - 1);
2371	}
2372
2373	/*
2374	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2375	 * function pointer which is called when o_direct io completes so that
2376	 * it can unlock our rw lock.
2377	 * Unfortunately there are error cases which call end_io and others
2378	 * that don't.  so we don't have to unlock the rw_lock if either an
2379	 * async dio is going to do it in the future or an end_io after an
2380	 * error has already done it.
2381	 */
2382	if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2383		rw_level = -1;
2384		have_alloc_sem = 0;
2385	}
2386
2387out:
2388	if (rw_level != -1)
2389		ocfs2_rw_unlock(inode, rw_level);
2390
2391out_sems:
2392	if (have_alloc_sem)
2393		ocfs2_iocb_clear_sem_locked(iocb);
2394
2395	mutex_unlock(&inode->i_mutex);
2396
2397	if (written)
2398		ret = written;
2399	return ret;
2400}
2401
2402static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2403				struct file *out,
2404				struct splice_desc *sd)
2405{
2406	int ret;
2407
2408	ret = ocfs2_prepare_inode_for_write(out, &sd->pos,
2409					    sd->total_len, 0, NULL, NULL);
2410	if (ret < 0) {
2411		mlog_errno(ret);
2412		return ret;
2413	}
2414
2415	return splice_from_pipe_feed(pipe, sd, pipe_to_file);
2416}
2417
2418static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2419				       struct file *out,
2420				       loff_t *ppos,
2421				       size_t len,
2422				       unsigned int flags)
2423{
2424	int ret;
2425	struct address_space *mapping = out->f_mapping;
2426	struct inode *inode = mapping->host;
2427	struct splice_desc sd = {
2428		.total_len = len,
2429		.flags = flags,
2430		.pos = *ppos,
2431		.u.file = out,
2432	};
2433
2434
2435	trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
2436			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2437			out->f_path.dentry->d_name.len,
2438			out->f_path.dentry->d_name.name, len);
2439
2440	if (pipe->inode)
2441		mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
2442
2443	splice_from_pipe_begin(&sd);
2444	do {
2445		ret = splice_from_pipe_next(pipe, &sd);
2446		if (ret <= 0)
2447			break;
2448
2449		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2450		ret = ocfs2_rw_lock(inode, 1);
 
 
 
2451		if (ret < 0)
2452			mlog_errno(ret);
2453		else {
2454			ret = ocfs2_splice_to_file(pipe, out, &sd);
2455			ocfs2_rw_unlock(inode, 1);
2456		}
2457		mutex_unlock(&inode->i_mutex);
2458	} while (ret > 0);
2459	splice_from_pipe_end(pipe, &sd);
2460
2461	if (pipe->inode)
2462		mutex_unlock(&pipe->inode->i_mutex);
2463
2464	if (sd.num_spliced)
2465		ret = sd.num_spliced;
2466
2467	if (ret > 0) {
2468		unsigned long nr_pages;
2469		int err;
2470
2471		nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2472
2473		err = generic_write_sync(out, *ppos, ret);
2474		if (err)
2475			ret = err;
2476		else
2477			*ppos += ret;
2478
2479		balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
 
 
 
2480	}
2481
2482	return ret;
2483}
2484
2485static ssize_t ocfs2_file_splice_read(struct file *in,
2486				      loff_t *ppos,
2487				      struct pipe_inode_info *pipe,
2488				      size_t len,
2489				      unsigned int flags)
2490{
2491	int ret = 0, lock_level = 0;
2492	struct inode *inode = in->f_path.dentry->d_inode;
2493
2494	trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2495			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2496			in->f_path.dentry->d_name.len,
2497			in->f_path.dentry->d_name.name, len);
2498
2499	/*
2500	 * See the comment in ocfs2_file_aio_read()
2501	 */
2502	ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
2503	if (ret < 0) {
2504		mlog_errno(ret);
2505		goto bail;
2506	}
2507	ocfs2_inode_unlock(inode, lock_level);
2508
2509	ret = generic_file_splice_read(in, ppos, pipe, len, flags);
 
2510
2511bail:
 
2512	return ret;
2513}
2514
2515static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2516				   const struct iovec *iov,
2517				   unsigned long nr_segs,
2518				   loff_t pos)
2519{
2520	int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2521	struct file *filp = iocb->ki_filp;
2522	struct inode *inode = filp->f_path.dentry->d_inode;
 
 
2523
2524	trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
2525			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2526			filp->f_path.dentry->d_name.len,
2527			filp->f_path.dentry->d_name.name, nr_segs);
 
2528
2529
2530	if (!inode) {
2531		ret = -EINVAL;
2532		mlog_errno(ret);
2533		goto bail;
2534	}
2535
2536	ocfs2_iocb_clear_sem_locked(iocb);
 
2537
2538	/*
2539	 * buffered reads protect themselves in ->readpage().  O_DIRECT reads
2540	 * need locks to protect pending reads from racing with truncate.
2541	 */
2542	if (filp->f_flags & O_DIRECT) {
2543		have_alloc_sem = 1;
2544		ocfs2_iocb_set_sem_locked(iocb);
 
 
2545
2546		ret = ocfs2_rw_lock(inode, 0);
2547		if (ret < 0) {
2548			mlog_errno(ret);
 
2549			goto bail;
2550		}
2551		rw_level = 0;
2552		/* communicate with ocfs2_dio_end_io */
2553		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2554	}
2555
2556	/*
2557	 * We're fine letting folks race truncates and extending
2558	 * writes with read across the cluster, just like they can
2559	 * locally. Hence no rw_lock during read.
2560	 *
2561	 * Take and drop the meta data lock to update inode fields
2562	 * like i_size. This allows the checks down below
2563	 * generic_file_aio_read() a chance of actually working.
2564	 */
2565	ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
 
2566	if (ret < 0) {
2567		mlog_errno(ret);
 
2568		goto bail;
2569	}
2570	ocfs2_inode_unlock(inode, lock_level);
2571
2572	ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2573	trace_generic_file_aio_read_ret(ret);
2574
2575	/* buffered aio wouldn't have proper lock coverage today */
2576	BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2577
2578	/* see ocfs2_file_aio_write */
2579	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2580		rw_level = -1;
2581		have_alloc_sem = 0;
2582	}
2583
2584bail:
2585	if (have_alloc_sem)
2586		ocfs2_iocb_clear_sem_locked(iocb);
2587
2588	if (rw_level != -1)
2589		ocfs2_rw_unlock(inode, rw_level);
2590
2591	return ret;
2592}
2593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2594const struct inode_operations ocfs2_file_iops = {
2595	.setattr	= ocfs2_setattr,
2596	.getattr	= ocfs2_getattr,
2597	.permission	= ocfs2_permission,
2598	.setxattr	= generic_setxattr,
2599	.getxattr	= generic_getxattr,
2600	.listxattr	= ocfs2_listxattr,
2601	.removexattr	= generic_removexattr,
2602	.fiemap		= ocfs2_fiemap,
2603	.get_acl	= ocfs2_iop_get_acl,
 
2604};
2605
2606const struct inode_operations ocfs2_special_file_iops = {
2607	.setattr	= ocfs2_setattr,
2608	.getattr	= ocfs2_getattr,
2609	.permission	= ocfs2_permission,
2610	.get_acl	= ocfs2_iop_get_acl,
 
2611};
2612
2613/*
2614 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2615 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2616 */
2617const struct file_operations ocfs2_fops = {
2618	.llseek		= generic_file_llseek,
2619	.read		= do_sync_read,
2620	.write		= do_sync_write,
2621	.mmap		= ocfs2_mmap,
2622	.fsync		= ocfs2_sync_file,
2623	.release	= ocfs2_file_release,
2624	.open		= ocfs2_file_open,
2625	.aio_read	= ocfs2_file_aio_read,
2626	.aio_write	= ocfs2_file_aio_write,
2627	.unlocked_ioctl	= ocfs2_ioctl,
2628#ifdef CONFIG_COMPAT
2629	.compat_ioctl   = ocfs2_compat_ioctl,
2630#endif
2631	.lock		= ocfs2_lock,
2632	.flock		= ocfs2_flock,
2633	.splice_read	= ocfs2_file_splice_read,
2634	.splice_write	= ocfs2_file_splice_write,
2635	.fallocate	= ocfs2_fallocate,
 
 
2636};
2637
2638const struct file_operations ocfs2_dops = {
2639	.llseek		= generic_file_llseek,
2640	.read		= generic_read_dir,
2641	.readdir	= ocfs2_readdir,
2642	.fsync		= ocfs2_sync_file,
2643	.release	= ocfs2_dir_release,
2644	.open		= ocfs2_dir_open,
2645	.unlocked_ioctl	= ocfs2_ioctl,
2646#ifdef CONFIG_COMPAT
2647	.compat_ioctl   = ocfs2_compat_ioctl,
2648#endif
2649	.lock		= ocfs2_lock,
2650	.flock		= ocfs2_flock,
2651};
2652
2653/*
2654 * POSIX-lockless variants of our file_operations.
2655 *
2656 * These will be used if the underlying cluster stack does not support
2657 * posix file locking, if the user passes the "localflocks" mount
2658 * option, or if we have a local-only fs.
2659 *
2660 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2661 * so we still want it in the case of no stack support for
2662 * plocks. Internally, it will do the right thing when asked to ignore
2663 * the cluster.
2664 */
2665const struct file_operations ocfs2_fops_no_plocks = {
2666	.llseek		= generic_file_llseek,
2667	.read		= do_sync_read,
2668	.write		= do_sync_write,
2669	.mmap		= ocfs2_mmap,
2670	.fsync		= ocfs2_sync_file,
2671	.release	= ocfs2_file_release,
2672	.open		= ocfs2_file_open,
2673	.aio_read	= ocfs2_file_aio_read,
2674	.aio_write	= ocfs2_file_aio_write,
2675	.unlocked_ioctl	= ocfs2_ioctl,
2676#ifdef CONFIG_COMPAT
2677	.compat_ioctl   = ocfs2_compat_ioctl,
2678#endif
2679	.flock		= ocfs2_flock,
2680	.splice_read	= ocfs2_file_splice_read,
2681	.splice_write	= ocfs2_file_splice_write,
2682	.fallocate	= ocfs2_fallocate,
 
 
2683};
2684
2685const struct file_operations ocfs2_dops_no_plocks = {
2686	.llseek		= generic_file_llseek,
2687	.read		= generic_read_dir,
2688	.readdir	= ocfs2_readdir,
2689	.fsync		= ocfs2_sync_file,
2690	.release	= ocfs2_dir_release,
2691	.open		= ocfs2_dir_open,
2692	.unlocked_ioctl	= ocfs2_ioctl,
2693#ifdef CONFIG_COMPAT
2694	.compat_ioctl   = ocfs2_compat_ioctl,
2695#endif
2696	.flock		= ocfs2_flock,
2697};
v4.17
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * file.c
   5 *
   6 * File open, close, extend, truncate
   7 *
   8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public
  12 * License as published by the Free Software Foundation; either
  13 * version 2 of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public
  21 * License along with this program; if not, write to the
  22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23 * Boston, MA 021110-1307, USA.
  24 */
  25
  26#include <linux/capability.h>
  27#include <linux/fs.h>
  28#include <linux/types.h>
  29#include <linux/slab.h>
  30#include <linux/highmem.h>
  31#include <linux/pagemap.h>
  32#include <linux/uio.h>
  33#include <linux/sched.h>
  34#include <linux/splice.h>
  35#include <linux/mount.h>
  36#include <linux/writeback.h>
  37#include <linux/falloc.h>
  38#include <linux/quotaops.h>
  39#include <linux/blkdev.h>
  40#include <linux/backing-dev.h>
  41
  42#include <cluster/masklog.h>
  43
  44#include "ocfs2.h"
  45
  46#include "alloc.h"
  47#include "aops.h"
  48#include "dir.h"
  49#include "dlmglue.h"
  50#include "extent_map.h"
  51#include "file.h"
  52#include "sysfile.h"
  53#include "inode.h"
  54#include "ioctl.h"
  55#include "journal.h"
  56#include "locks.h"
  57#include "mmap.h"
  58#include "suballoc.h"
  59#include "super.h"
  60#include "xattr.h"
  61#include "acl.h"
  62#include "quota.h"
  63#include "refcounttree.h"
  64#include "ocfs2_trace.h"
  65
  66#include "buffer_head_io.h"
  67
  68static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  69{
  70	struct ocfs2_file_private *fp;
  71
  72	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  73	if (!fp)
  74		return -ENOMEM;
  75
  76	fp->fp_file = file;
  77	mutex_init(&fp->fp_mutex);
  78	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  79	file->private_data = fp;
  80
  81	return 0;
  82}
  83
  84static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  85{
  86	struct ocfs2_file_private *fp = file->private_data;
  87	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  88
  89	if (fp) {
  90		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  91		ocfs2_lock_res_free(&fp->fp_flock);
  92		kfree(fp);
  93		file->private_data = NULL;
  94	}
  95}
  96
  97static int ocfs2_file_open(struct inode *inode, struct file *file)
  98{
  99	int status;
 100	int mode = file->f_flags;
 101	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 102
 103	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
 104			      (unsigned long long)oi->ip_blkno,
 105			      file->f_path.dentry->d_name.len,
 106			      file->f_path.dentry->d_name.name, mode);
 107
 108	if (file->f_mode & FMODE_WRITE) {
 109		status = dquot_initialize(inode);
 110		if (status)
 111			goto leave;
 112	}
 113
 114	spin_lock(&oi->ip_lock);
 115
 116	/* Check that the inode hasn't been wiped from disk by another
 117	 * node. If it hasn't then we're safe as long as we hold the
 118	 * spin lock until our increment of open count. */
 119	if (oi->ip_flags & OCFS2_INODE_DELETED) {
 120		spin_unlock(&oi->ip_lock);
 121
 122		status = -ENOENT;
 123		goto leave;
 124	}
 125
 126	if (mode & O_DIRECT)
 127		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
 128
 129	oi->ip_open_count++;
 130	spin_unlock(&oi->ip_lock);
 131
 132	status = ocfs2_init_file_private(inode, file);
 133	if (status) {
 134		/*
 135		 * We want to set open count back if we're failing the
 136		 * open.
 137		 */
 138		spin_lock(&oi->ip_lock);
 139		oi->ip_open_count--;
 140		spin_unlock(&oi->ip_lock);
 141	}
 142
 143	file->f_mode |= FMODE_NOWAIT;
 144
 145leave:
 146	return status;
 147}
 148
 149static int ocfs2_file_release(struct inode *inode, struct file *file)
 150{
 151	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 152
 153	spin_lock(&oi->ip_lock);
 154	if (!--oi->ip_open_count)
 155		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
 156
 157	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
 158				 oi->ip_blkno,
 159				 file->f_path.dentry->d_name.len,
 160				 file->f_path.dentry->d_name.name,
 161				 oi->ip_open_count);
 162	spin_unlock(&oi->ip_lock);
 163
 164	ocfs2_free_file_private(inode, file);
 165
 166	return 0;
 167}
 168
 169static int ocfs2_dir_open(struct inode *inode, struct file *file)
 170{
 171	return ocfs2_init_file_private(inode, file);
 172}
 173
 174static int ocfs2_dir_release(struct inode *inode, struct file *file)
 175{
 176	ocfs2_free_file_private(inode, file);
 177	return 0;
 178}
 179
 180static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
 181			   int datasync)
 182{
 183	int err = 0;
 
 184	struct inode *inode = file->f_mapping->host;
 185	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 186	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 187	journal_t *journal = osb->journal->j_journal;
 188	int ret;
 189	tid_t commit_tid;
 190	bool needs_barrier = false;
 191
 192	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
 193			      oi->ip_blkno,
 194			      file->f_path.dentry->d_name.len,
 195			      file->f_path.dentry->d_name.name,
 196			      (unsigned long long)datasync);
 197
 198	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 199		return -EROFS;
 200
 201	err = file_write_and_wait_range(file, start, end);
 202	if (err)
 203		return err;
 204
 205	commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
 206	if (journal->j_flags & JBD2_BARRIER &&
 207	    !jbd2_trans_will_send_data_barrier(journal, commit_tid))
 208		needs_barrier = true;
 209	err = jbd2_complete_transaction(journal, commit_tid);
 210	if (needs_barrier) {
 211		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 212		if (!err)
 213			err = ret;
 
 
 
 
 
 214	}
 215
 
 
 
 
 216	if (err)
 217		mlog_errno(err);
 
 218
 219	return (err < 0) ? -EIO : 0;
 220}
 221
 222int ocfs2_should_update_atime(struct inode *inode,
 223			      struct vfsmount *vfsmnt)
 224{
 225	struct timespec now;
 226	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 227
 228	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 229		return 0;
 230
 231	if ((inode->i_flags & S_NOATIME) ||
 232	    ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
 233		return 0;
 234
 235	/*
 236	 * We can be called with no vfsmnt structure - NFSD will
 237	 * sometimes do this.
 238	 *
 239	 * Note that our action here is different than touch_atime() -
 240	 * if we can't tell whether this is a noatime mount, then we
 241	 * don't know whether to trust the value of s_atime_quantum.
 242	 */
 243	if (vfsmnt == NULL)
 244		return 0;
 245
 246	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
 247	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
 248		return 0;
 249
 250	if (vfsmnt->mnt_flags & MNT_RELATIME) {
 251		if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
 252		    (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
 253			return 1;
 254
 255		return 0;
 256	}
 257
 258	now = current_time(inode);
 259	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
 260		return 0;
 261	else
 262		return 1;
 263}
 264
 265int ocfs2_update_inode_atime(struct inode *inode,
 266			     struct buffer_head *bh)
 267{
 268	int ret;
 269	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 270	handle_t *handle;
 271	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
 272
 273	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 274	if (IS_ERR(handle)) {
 275		ret = PTR_ERR(handle);
 276		mlog_errno(ret);
 277		goto out;
 278	}
 279
 280	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 281				      OCFS2_JOURNAL_ACCESS_WRITE);
 282	if (ret) {
 283		mlog_errno(ret);
 284		goto out_commit;
 285	}
 286
 287	/*
 288	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
 289	 * have i_mutex to guard against concurrent changes to other
 290	 * inode fields.
 291	 */
 292	inode->i_atime = current_time(inode);
 293	di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 294	di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 295	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 296	ocfs2_journal_dirty(handle, bh);
 297
 298out_commit:
 299	ocfs2_commit_trans(osb, handle);
 300out:
 301	return ret;
 302}
 303
 304int ocfs2_set_inode_size(handle_t *handle,
 305				struct inode *inode,
 306				struct buffer_head *fe_bh,
 307				u64 new_i_size)
 308{
 309	int status;
 310
 311	i_size_write(inode, new_i_size);
 312	inode->i_blocks = ocfs2_inode_sector_count(inode);
 313	inode->i_ctime = inode->i_mtime = current_time(inode);
 314
 315	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 316	if (status < 0) {
 317		mlog_errno(status);
 318		goto bail;
 319	}
 320
 321bail:
 322	return status;
 323}
 324
 325int ocfs2_simple_size_update(struct inode *inode,
 326			     struct buffer_head *di_bh,
 327			     u64 new_i_size)
 328{
 329	int ret;
 330	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 331	handle_t *handle = NULL;
 332
 333	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 334	if (IS_ERR(handle)) {
 335		ret = PTR_ERR(handle);
 336		mlog_errno(ret);
 337		goto out;
 338	}
 339
 340	ret = ocfs2_set_inode_size(handle, inode, di_bh,
 341				   new_i_size);
 342	if (ret < 0)
 343		mlog_errno(ret);
 344
 345	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 346	ocfs2_commit_trans(osb, handle);
 347out:
 348	return ret;
 349}
 350
 351static int ocfs2_cow_file_pos(struct inode *inode,
 352			      struct buffer_head *fe_bh,
 353			      u64 offset)
 354{
 355	int status;
 356	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 357	unsigned int num_clusters = 0;
 358	unsigned int ext_flags = 0;
 359
 360	/*
 361	 * If the new offset is aligned to the range of the cluster, there is
 362	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
 363	 * CoW either.
 364	 */
 365	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
 366		return 0;
 367
 368	status = ocfs2_get_clusters(inode, cpos, &phys,
 369				    &num_clusters, &ext_flags);
 370	if (status) {
 371		mlog_errno(status);
 372		goto out;
 373	}
 374
 375	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
 376		goto out;
 377
 378	return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
 379
 380out:
 381	return status;
 382}
 383
 384static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
 385				     struct inode *inode,
 386				     struct buffer_head *fe_bh,
 387				     u64 new_i_size)
 388{
 389	int status;
 390	handle_t *handle;
 391	struct ocfs2_dinode *di;
 392	u64 cluster_bytes;
 393
 394	/*
 395	 * We need to CoW the cluster contains the offset if it is reflinked
 396	 * since we will call ocfs2_zero_range_for_truncate later which will
 397	 * write "0" from offset to the end of the cluster.
 398	 */
 399	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
 400	if (status) {
 401		mlog_errno(status);
 402		return status;
 403	}
 404
 405	/* TODO: This needs to actually orphan the inode in this
 406	 * transaction. */
 407
 408	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 409	if (IS_ERR(handle)) {
 410		status = PTR_ERR(handle);
 411		mlog_errno(status);
 412		goto out;
 413	}
 414
 415	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
 416					 OCFS2_JOURNAL_ACCESS_WRITE);
 417	if (status < 0) {
 418		mlog_errno(status);
 419		goto out_commit;
 420	}
 421
 422	/*
 423	 * Do this before setting i_size.
 424	 */
 425	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
 426	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
 427					       cluster_bytes);
 428	if (status) {
 429		mlog_errno(status);
 430		goto out_commit;
 431	}
 432
 433	i_size_write(inode, new_i_size);
 434	inode->i_ctime = inode->i_mtime = current_time(inode);
 435
 436	di = (struct ocfs2_dinode *) fe_bh->b_data;
 437	di->i_size = cpu_to_le64(new_i_size);
 438	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
 439	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 440	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 441
 442	ocfs2_journal_dirty(handle, fe_bh);
 443
 444out_commit:
 445	ocfs2_commit_trans(osb, handle);
 446out:
 447	return status;
 448}
 449
 450int ocfs2_truncate_file(struct inode *inode,
 451			       struct buffer_head *di_bh,
 452			       u64 new_i_size)
 453{
 454	int status = 0;
 455	struct ocfs2_dinode *fe = NULL;
 456	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 457
 458	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
 459	 * already validated it */
 460	fe = (struct ocfs2_dinode *) di_bh->b_data;
 461
 462	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
 463				  (unsigned long long)le64_to_cpu(fe->i_size),
 464				  (unsigned long long)new_i_size);
 465
 466	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
 467			"Inode %llu, inode i_size = %lld != di "
 468			"i_size = %llu, i_flags = 0x%x\n",
 469			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 470			i_size_read(inode),
 471			(unsigned long long)le64_to_cpu(fe->i_size),
 472			le32_to_cpu(fe->i_flags));
 473
 474	if (new_i_size > le64_to_cpu(fe->i_size)) {
 475		trace_ocfs2_truncate_file_error(
 476			(unsigned long long)le64_to_cpu(fe->i_size),
 477			(unsigned long long)new_i_size);
 478		status = -EINVAL;
 479		mlog_errno(status);
 480		goto bail;
 481	}
 482
 
 
 
 
 
 483	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 484
 485	ocfs2_resv_discard(&osb->osb_la_resmap,
 486			   &OCFS2_I(inode)->ip_la_data_resv);
 487
 488	/*
 489	 * The inode lock forced other nodes to sync and drop their
 490	 * pages, which (correctly) happens even if we have a truncate
 491	 * without allocation change - ocfs2 cluster sizes can be much
 492	 * greater than page size, so we have to truncate them
 493	 * anyway.
 494	 */
 495	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
 496	truncate_inode_pages(inode->i_mapping, new_i_size);
 497
 498	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 499		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
 500					       i_size_read(inode), 1);
 501		if (status)
 502			mlog_errno(status);
 503
 504		goto bail_unlock_sem;
 505	}
 506
 507	/* alright, we're going to need to do a full blown alloc size
 508	 * change. Orphan the inode so that recovery can complete the
 509	 * truncate if necessary. This does the task of marking
 510	 * i_size. */
 511	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
 512	if (status < 0) {
 513		mlog_errno(status);
 514		goto bail_unlock_sem;
 515	}
 516
 517	status = ocfs2_commit_truncate(osb, inode, di_bh);
 518	if (status < 0) {
 519		mlog_errno(status);
 520		goto bail_unlock_sem;
 521	}
 522
 523	/* TODO: orphan dir cleanup here. */
 524bail_unlock_sem:
 525	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 526
 527bail:
 528	if (!status && OCFS2_I(inode)->ip_clusters == 0)
 529		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
 530
 531	return status;
 532}
 533
 534/*
 535 * extend file allocation only here.
 536 * we'll update all the disk stuff, and oip->alloc_size
 537 *
 538 * expect stuff to be locked, a transaction started and enough data /
 539 * metadata reservations in the contexts.
 540 *
 541 * Will return -EAGAIN, and a reason if a restart is needed.
 542 * If passed in, *reason will always be set, even in error.
 543 */
 544int ocfs2_add_inode_data(struct ocfs2_super *osb,
 545			 struct inode *inode,
 546			 u32 *logical_offset,
 547			 u32 clusters_to_add,
 548			 int mark_unwritten,
 549			 struct buffer_head *fe_bh,
 550			 handle_t *handle,
 551			 struct ocfs2_alloc_context *data_ac,
 552			 struct ocfs2_alloc_context *meta_ac,
 553			 enum ocfs2_alloc_restarted *reason_ret)
 554{
 555	int ret;
 556	struct ocfs2_extent_tree et;
 557
 558	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
 559	ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
 560					  clusters_to_add, mark_unwritten,
 561					  data_ac, meta_ac, reason_ret);
 562
 563	return ret;
 564}
 565
 566static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
 567				     u32 clusters_to_add, int mark_unwritten)
 568{
 569	int status = 0;
 570	int restart_func = 0;
 571	int credits;
 572	u32 prev_clusters;
 573	struct buffer_head *bh = NULL;
 574	struct ocfs2_dinode *fe = NULL;
 575	handle_t *handle = NULL;
 576	struct ocfs2_alloc_context *data_ac = NULL;
 577	struct ocfs2_alloc_context *meta_ac = NULL;
 578	enum ocfs2_alloc_restarted why = RESTART_NONE;
 579	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 580	struct ocfs2_extent_tree et;
 581	int did_quota = 0;
 582
 583	/*
 584	 * Unwritten extent only exists for file systems which
 585	 * support holes.
 586	 */
 587	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
 588
 589	status = ocfs2_read_inode_block(inode, &bh);
 590	if (status < 0) {
 591		mlog_errno(status);
 592		goto leave;
 593	}
 594	fe = (struct ocfs2_dinode *) bh->b_data;
 595
 596restart_all:
 597	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 598
 599	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
 600	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
 601				       &data_ac, &meta_ac);
 602	if (status) {
 603		mlog_errno(status);
 604		goto leave;
 605	}
 606
 607	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
 
 608	handle = ocfs2_start_trans(osb, credits);
 609	if (IS_ERR(handle)) {
 610		status = PTR_ERR(handle);
 611		handle = NULL;
 612		mlog_errno(status);
 613		goto leave;
 614	}
 615
 616restarted_transaction:
 617	trace_ocfs2_extend_allocation(
 618		(unsigned long long)OCFS2_I(inode)->ip_blkno,
 619		(unsigned long long)i_size_read(inode),
 620		le32_to_cpu(fe->i_clusters), clusters_to_add,
 621		why, restart_func);
 622
 623	status = dquot_alloc_space_nodirty(inode,
 624			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 625	if (status)
 626		goto leave;
 627	did_quota = 1;
 628
 629	/* reserve a write to the file entry early on - that we if we
 630	 * run out of credits in the allocation path, we can still
 631	 * update i_size. */
 632	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 633					 OCFS2_JOURNAL_ACCESS_WRITE);
 634	if (status < 0) {
 635		mlog_errno(status);
 636		goto leave;
 637	}
 638
 639	prev_clusters = OCFS2_I(inode)->ip_clusters;
 640
 641	status = ocfs2_add_inode_data(osb,
 642				      inode,
 643				      &logical_start,
 644				      clusters_to_add,
 645				      mark_unwritten,
 646				      bh,
 647				      handle,
 648				      data_ac,
 649				      meta_ac,
 650				      &why);
 651	if ((status < 0) && (status != -EAGAIN)) {
 652		if (status != -ENOSPC)
 653			mlog_errno(status);
 654		goto leave;
 655	}
 656	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 657	ocfs2_journal_dirty(handle, bh);
 658
 659	spin_lock(&OCFS2_I(inode)->ip_lock);
 660	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
 661	spin_unlock(&OCFS2_I(inode)->ip_lock);
 662	/* Release unused quota reservation */
 663	dquot_free_space(inode,
 664			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 665	did_quota = 0;
 666
 667	if (why != RESTART_NONE && clusters_to_add) {
 668		if (why == RESTART_META) {
 669			restart_func = 1;
 670			status = 0;
 671		} else {
 672			BUG_ON(why != RESTART_TRANS);
 673
 674			status = ocfs2_allocate_extend_trans(handle, 1);
 
 
 
 
 675			if (status < 0) {
 676				/* handle still has to be committed at
 677				 * this point. */
 678				status = -ENOMEM;
 679				mlog_errno(status);
 680				goto leave;
 681			}
 682			goto restarted_transaction;
 683		}
 684	}
 685
 686	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
 687	     le32_to_cpu(fe->i_clusters),
 688	     (unsigned long long)le64_to_cpu(fe->i_size),
 689	     OCFS2_I(inode)->ip_clusters,
 690	     (unsigned long long)i_size_read(inode));
 691
 692leave:
 693	if (status < 0 && did_quota)
 694		dquot_free_space(inode,
 695			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 696	if (handle) {
 697		ocfs2_commit_trans(osb, handle);
 698		handle = NULL;
 699	}
 700	if (data_ac) {
 701		ocfs2_free_alloc_context(data_ac);
 702		data_ac = NULL;
 703	}
 704	if (meta_ac) {
 705		ocfs2_free_alloc_context(meta_ac);
 706		meta_ac = NULL;
 707	}
 708	if ((!status) && restart_func) {
 709		restart_func = 0;
 710		goto restart_all;
 711	}
 712	brelse(bh);
 713	bh = NULL;
 714
 715	return status;
 716}
 717
 718/*
 719 * While a write will already be ordering the data, a truncate will not.
 720 * Thus, we need to explicitly order the zeroed pages.
 721 */
 722static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
 723						struct buffer_head *di_bh)
 724{
 725	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 726	handle_t *handle = NULL;
 727	int ret = 0;
 728
 729	if (!ocfs2_should_order_data(inode))
 730		goto out;
 731
 732	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 733	if (IS_ERR(handle)) {
 734		ret = -ENOMEM;
 735		mlog_errno(ret);
 736		goto out;
 737	}
 738
 739	ret = ocfs2_jbd2_file_inode(handle, inode);
 740	if (ret < 0) {
 741		mlog_errno(ret);
 742		goto out;
 743	}
 744
 745	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 746				      OCFS2_JOURNAL_ACCESS_WRITE);
 747	if (ret)
 748		mlog_errno(ret);
 749	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 750
 751out:
 752	if (ret) {
 753		if (!IS_ERR(handle))
 754			ocfs2_commit_trans(osb, handle);
 755		handle = ERR_PTR(ret);
 756	}
 757	return handle;
 758}
 759
 760/* Some parts of this taken from generic_cont_expand, which turned out
 761 * to be too fragile to do exactly what we need without us having to
 762 * worry about recursive locking in ->write_begin() and ->write_end(). */
 763static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 764				 u64 abs_to, struct buffer_head *di_bh)
 765{
 766	struct address_space *mapping = inode->i_mapping;
 767	struct page *page;
 768	unsigned long index = abs_from >> PAGE_SHIFT;
 769	handle_t *handle;
 770	int ret = 0;
 771	unsigned zero_from, zero_to, block_start, block_end;
 772	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 773
 774	BUG_ON(abs_from >= abs_to);
 775	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
 776	BUG_ON(abs_from & (inode->i_blkbits - 1));
 777
 778	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
 779	if (IS_ERR(handle)) {
 780		ret = PTR_ERR(handle);
 781		goto out;
 782	}
 783
 784	page = find_or_create_page(mapping, index, GFP_NOFS);
 785	if (!page) {
 786		ret = -ENOMEM;
 787		mlog_errno(ret);
 788		goto out_commit_trans;
 789	}
 790
 791	/* Get the offsets within the page that we want to zero */
 792	zero_from = abs_from & (PAGE_SIZE - 1);
 793	zero_to = abs_to & (PAGE_SIZE - 1);
 794	if (!zero_to)
 795		zero_to = PAGE_SIZE;
 796
 797	trace_ocfs2_write_zero_page(
 798			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 799			(unsigned long long)abs_from,
 800			(unsigned long long)abs_to,
 801			index, zero_from, zero_to);
 802
 803	/* We know that zero_from is block aligned */
 804	for (block_start = zero_from; block_start < zero_to;
 805	     block_start = block_end) {
 806		block_end = block_start + i_blocksize(inode);
 807
 808		/*
 809		 * block_start is block-aligned.  Bump it by one to force
 810		 * __block_write_begin and block_commit_write to zero the
 811		 * whole block.
 812		 */
 813		ret = __block_write_begin(page, block_start + 1, 0,
 814					  ocfs2_get_block);
 815		if (ret < 0) {
 816			mlog_errno(ret);
 817			goto out_unlock;
 818		}
 819
 
 
 
 
 
 
 
 
 820
 821		/* must not update i_size! */
 822		ret = block_commit_write(page, block_start + 1,
 823					 block_start + 1);
 824		if (ret < 0)
 825			mlog_errno(ret);
 826		else
 827			ret = 0;
 828	}
 829
 830	/*
 831	 * fs-writeback will release the dirty pages without page lock
 832	 * whose offset are over inode size, the release happens at
 833	 * block_write_full_page().
 834	 */
 835	i_size_write(inode, abs_to);
 836	inode->i_blocks = ocfs2_inode_sector_count(inode);
 837	di->i_size = cpu_to_le64((u64)i_size_read(inode));
 838	inode->i_mtime = inode->i_ctime = current_time(inode);
 839	di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
 840	di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 841	di->i_mtime_nsec = di->i_ctime_nsec;
 842	if (handle) {
 843		ocfs2_journal_dirty(handle, di_bh);
 844		ocfs2_update_inode_fsync_trans(handle, inode, 1);
 845	}
 846
 847out_unlock:
 848	unlock_page(page);
 849	put_page(page);
 850out_commit_trans:
 851	if (handle)
 852		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 853out:
 854	return ret;
 855}
 856
 857/*
 858 * Find the next range to zero.  We do this in terms of bytes because
 859 * that's what ocfs2_zero_extend() wants, and it is dealing with the
 860 * pagecache.  We may return multiple extents.
 861 *
 862 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
 863 * needs to be zeroed.  range_start and range_end return the next zeroing
 864 * range.  A subsequent call should pass the previous range_end as its
 865 * zero_start.  If range_end is 0, there's nothing to do.
 866 *
 867 * Unwritten extents are skipped over.  Refcounted extents are CoWd.
 868 */
 869static int ocfs2_zero_extend_get_range(struct inode *inode,
 870				       struct buffer_head *di_bh,
 871				       u64 zero_start, u64 zero_end,
 872				       u64 *range_start, u64 *range_end)
 873{
 874	int rc = 0, needs_cow = 0;
 875	u32 p_cpos, zero_clusters = 0;
 876	u32 zero_cpos =
 877		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 878	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
 879	unsigned int num_clusters = 0;
 880	unsigned int ext_flags = 0;
 881
 882	while (zero_cpos < last_cpos) {
 883		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
 884					&num_clusters, &ext_flags);
 885		if (rc) {
 886			mlog_errno(rc);
 887			goto out;
 888		}
 889
 890		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
 891			zero_clusters = num_clusters;
 892			if (ext_flags & OCFS2_EXT_REFCOUNTED)
 893				needs_cow = 1;
 894			break;
 895		}
 896
 897		zero_cpos += num_clusters;
 898	}
 899	if (!zero_clusters) {
 900		*range_end = 0;
 901		goto out;
 902	}
 903
 904	while ((zero_cpos + zero_clusters) < last_cpos) {
 905		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
 906					&p_cpos, &num_clusters,
 907					&ext_flags);
 908		if (rc) {
 909			mlog_errno(rc);
 910			goto out;
 911		}
 912
 913		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
 914			break;
 915		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 916			needs_cow = 1;
 917		zero_clusters += num_clusters;
 918	}
 919	if ((zero_cpos + zero_clusters) > last_cpos)
 920		zero_clusters = last_cpos - zero_cpos;
 921
 922	if (needs_cow) {
 923		rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
 924					zero_clusters, UINT_MAX);
 925		if (rc) {
 926			mlog_errno(rc);
 927			goto out;
 928		}
 929	}
 930
 931	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
 932	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
 933					     zero_cpos + zero_clusters);
 934
 935out:
 936	return rc;
 937}
 938
 939/*
 940 * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
 941 * has made sure that the entire range needs zeroing.
 942 */
 943static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
 944				   u64 range_end, struct buffer_head *di_bh)
 945{
 946	int rc = 0;
 947	u64 next_pos;
 948	u64 zero_pos = range_start;
 949
 950	trace_ocfs2_zero_extend_range(
 951			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 952			(unsigned long long)range_start,
 953			(unsigned long long)range_end);
 954	BUG_ON(range_start >= range_end);
 955
 956	while (zero_pos < range_end) {
 957		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
 958		if (next_pos > range_end)
 959			next_pos = range_end;
 960		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
 961		if (rc < 0) {
 962			mlog_errno(rc);
 963			break;
 964		}
 965		zero_pos = next_pos;
 966
 967		/*
 968		 * Very large extends have the potential to lock up
 969		 * the cpu for extended periods of time.
 970		 */
 971		cond_resched();
 972	}
 973
 974	return rc;
 975}
 976
 977int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
 978		      loff_t zero_to_size)
 979{
 980	int ret = 0;
 981	u64 zero_start, range_start = 0, range_end = 0;
 982	struct super_block *sb = inode->i_sb;
 983
 984	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
 985	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
 986				(unsigned long long)zero_start,
 987				(unsigned long long)i_size_read(inode));
 988	while (zero_start < zero_to_size) {
 989		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
 990						  zero_to_size,
 991						  &range_start,
 992						  &range_end);
 993		if (ret) {
 994			mlog_errno(ret);
 995			break;
 996		}
 997		if (!range_end)
 998			break;
 999		/* Trim the ends */
1000		if (range_start < zero_start)
1001			range_start = zero_start;
1002		if (range_end > zero_to_size)
1003			range_end = zero_to_size;
1004
1005		ret = ocfs2_zero_extend_range(inode, range_start,
1006					      range_end, di_bh);
1007		if (ret) {
1008			mlog_errno(ret);
1009			break;
1010		}
1011		zero_start = range_end;
1012	}
1013
1014	return ret;
1015}
1016
1017int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1018			  u64 new_i_size, u64 zero_to)
1019{
1020	int ret;
1021	u32 clusters_to_add;
1022	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1023
1024	/*
1025	 * Only quota files call this without a bh, and they can't be
1026	 * refcounted.
1027	 */
1028	BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1029	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1030
1031	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1032	if (clusters_to_add < oi->ip_clusters)
1033		clusters_to_add = 0;
1034	else
1035		clusters_to_add -= oi->ip_clusters;
1036
1037	if (clusters_to_add) {
1038		ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1039						clusters_to_add, 0);
1040		if (ret) {
1041			mlog_errno(ret);
1042			goto out;
1043		}
1044	}
1045
1046	/*
1047	 * Call this even if we don't add any clusters to the tree. We
1048	 * still need to zero the area between the old i_size and the
1049	 * new i_size.
1050	 */
1051	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1052	if (ret < 0)
1053		mlog_errno(ret);
1054
1055out:
1056	return ret;
1057}
1058
1059static int ocfs2_extend_file(struct inode *inode,
1060			     struct buffer_head *di_bh,
1061			     u64 new_i_size)
1062{
1063	int ret = 0;
1064	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1065
1066	BUG_ON(!di_bh);
1067
1068	/* setattr sometimes calls us like this. */
1069	if (new_i_size == 0)
1070		goto out;
1071
1072	if (i_size_read(inode) == new_i_size)
1073		goto out;
1074	BUG_ON(new_i_size < i_size_read(inode));
1075
1076	/*
1077	 * The alloc sem blocks people in read/write from reading our
1078	 * allocation until we're done changing it. We depend on
1079	 * i_mutex to block other extend/truncate calls while we're
1080	 * here.  We even have to hold it for sparse files because there
1081	 * might be some tail zeroing.
1082	 */
1083	down_write(&oi->ip_alloc_sem);
1084
1085	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1086		/*
1087		 * We can optimize small extends by keeping the inodes
1088		 * inline data.
1089		 */
1090		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1091			up_write(&oi->ip_alloc_sem);
1092			goto out_update_size;
1093		}
1094
1095		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1096		if (ret) {
1097			up_write(&oi->ip_alloc_sem);
1098			mlog_errno(ret);
1099			goto out;
1100		}
1101	}
1102
1103	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1104		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1105	else
1106		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1107					    new_i_size);
1108
1109	up_write(&oi->ip_alloc_sem);
1110
1111	if (ret < 0) {
1112		mlog_errno(ret);
1113		goto out;
1114	}
1115
1116out_update_size:
1117	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1118	if (ret < 0)
1119		mlog_errno(ret);
1120
1121out:
1122	return ret;
1123}
1124
1125int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1126{
1127	int status = 0, size_change;
1128	int inode_locked = 0;
1129	struct inode *inode = d_inode(dentry);
1130	struct super_block *sb = inode->i_sb;
1131	struct ocfs2_super *osb = OCFS2_SB(sb);
1132	struct buffer_head *bh = NULL;
1133	handle_t *handle = NULL;
1134	struct dquot *transfer_to[MAXQUOTAS] = { };
1135	int qtype;
1136	int had_lock;
1137	struct ocfs2_lock_holder oh;
1138
1139	trace_ocfs2_setattr(inode, dentry,
1140			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1141			    dentry->d_name.len, dentry->d_name.name,
1142			    attr->ia_valid, attr->ia_mode,
1143			    from_kuid(&init_user_ns, attr->ia_uid),
1144			    from_kgid(&init_user_ns, attr->ia_gid));
1145
1146	/* ensuring we don't even attempt to truncate a symlink */
1147	if (S_ISLNK(inode->i_mode))
1148		attr->ia_valid &= ~ATTR_SIZE;
1149
1150#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1151			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1152	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1153		return 0;
1154
1155	status = setattr_prepare(dentry, attr);
1156	if (status)
1157		return status;
1158
1159	if (is_quota_modification(inode, attr)) {
1160		status = dquot_initialize(inode);
1161		if (status)
1162			return status;
1163	}
1164	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1165	if (size_change) {
1166		/*
1167		 * Here we should wait dio to finish before inode lock
1168		 * to avoid a deadlock between ocfs2_setattr() and
1169		 * ocfs2_dio_end_io_write()
1170		 */
1171		inode_dio_wait(inode);
1172
1173		status = ocfs2_rw_lock(inode, 1);
1174		if (status < 0) {
1175			mlog_errno(status);
1176			goto bail;
1177		}
1178	}
1179
1180	had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1181	if (had_lock < 0) {
1182		status = had_lock;
 
1183		goto bail_unlock_rw;
1184	} else if (had_lock) {
1185		/*
1186		 * As far as we know, ocfs2_setattr() could only be the first
1187		 * VFS entry point in the call chain of recursive cluster
1188		 * locking issue.
1189		 *
1190		 * For instance:
1191		 * chmod_common()
1192		 *  notify_change()
1193		 *   ocfs2_setattr()
1194		 *    posix_acl_chmod()
1195		 *     ocfs2_iop_get_acl()
1196		 *
1197		 * But, we're not 100% sure if it's always true, because the
1198		 * ordering of the VFS entry points in the call chain is out
1199		 * of our control. So, we'd better dump the stack here to
1200		 * catch the other cases of recursive locking.
1201		 */
1202		mlog(ML_ERROR, "Another case of recursive locking:\n");
1203		dump_stack();
1204	}
1205	inode_locked = 1;
1206
1207	if (size_change) {
1208		status = inode_newsize_ok(inode, attr->ia_size);
1209		if (status)
1210			goto bail_unlock;
1211
1212		if (i_size_read(inode) >= attr->ia_size) {
 
 
1213			if (ocfs2_should_order_data(inode)) {
1214				status = ocfs2_begin_ordered_truncate(inode,
1215								      attr->ia_size);
1216				if (status)
1217					goto bail_unlock;
1218			}
1219			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1220		} else
1221			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1222		if (status < 0) {
1223			if (status != -ENOSPC)
1224				mlog_errno(status);
1225			status = -ENOSPC;
1226			goto bail_unlock;
1227		}
1228	}
1229
1230	if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1231	    (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1232		/*
1233		 * Gather pointers to quota structures so that allocation /
1234		 * freeing of quota structures happens here and not inside
1235		 * dquot_transfer() where we have problems with lock ordering
1236		 */
1237		if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1238		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1239		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1240			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1241			if (IS_ERR(transfer_to[USRQUOTA])) {
1242				status = PTR_ERR(transfer_to[USRQUOTA]);
 
1243				goto bail_unlock;
1244			}
1245		}
1246		if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1247		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1248		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1249			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1250			if (IS_ERR(transfer_to[GRPQUOTA])) {
1251				status = PTR_ERR(transfer_to[GRPQUOTA]);
 
1252				goto bail_unlock;
1253			}
1254		}
1255		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1256					   2 * ocfs2_quota_trans_credits(sb));
1257		if (IS_ERR(handle)) {
1258			status = PTR_ERR(handle);
1259			mlog_errno(status);
1260			goto bail_unlock;
1261		}
1262		status = __dquot_transfer(inode, transfer_to);
1263		if (status < 0)
1264			goto bail_commit;
1265	} else {
1266		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1267		if (IS_ERR(handle)) {
1268			status = PTR_ERR(handle);
1269			mlog_errno(status);
1270			goto bail_unlock;
1271		}
1272	}
1273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1274	setattr_copy(inode, attr);
1275	mark_inode_dirty(inode);
1276
1277	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1278	if (status < 0)
1279		mlog_errno(status);
1280
1281bail_commit:
1282	ocfs2_commit_trans(osb, handle);
1283bail_unlock:
1284	if (status && inode_locked) {
1285		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1286		inode_locked = 0;
1287	}
1288bail_unlock_rw:
1289	if (size_change)
1290		ocfs2_rw_unlock(inode, 1);
1291bail:
 
1292
1293	/* Release quota pointers in case we acquired them */
1294	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1295		dqput(transfer_to[qtype]);
1296
1297	if (!status && attr->ia_valid & ATTR_MODE) {
1298		status = ocfs2_acl_chmod(inode, bh);
1299		if (status < 0)
1300			mlog_errno(status);
1301	}
1302	if (inode_locked)
1303		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1304
1305	brelse(bh);
1306	return status;
1307}
1308
1309int ocfs2_getattr(const struct path *path, struct kstat *stat,
1310		  u32 request_mask, unsigned int flags)
 
1311{
1312	struct inode *inode = d_inode(path->dentry);
1313	struct super_block *sb = path->dentry->d_sb;
1314	struct ocfs2_super *osb = sb->s_fs_info;
1315	int err;
1316
1317	err = ocfs2_inode_revalidate(path->dentry);
1318	if (err) {
1319		if (err != -ENOENT)
1320			mlog_errno(err);
1321		goto bail;
1322	}
1323
1324	generic_fillattr(inode, stat);
1325	/*
1326	 * If there is inline data in the inode, the inode will normally not
1327	 * have data blocks allocated (it may have an external xattr block).
1328	 * Report at least one sector for such files, so tools like tar, rsync,
1329	 * others don't incorrectly think the file is completely sparse.
1330	 */
1331	if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1332		stat->blocks += (stat->size + 511)>>9;
1333
1334	/* We set the blksize from the cluster size for performance */
1335	stat->blksize = osb->s_clustersize;
1336
1337bail:
1338	return err;
1339}
1340
1341int ocfs2_permission(struct inode *inode, int mask)
1342{
1343	int ret, had_lock;
1344	struct ocfs2_lock_holder oh;
1345
1346	if (mask & MAY_NOT_BLOCK)
1347		return -ECHILD;
1348
1349	had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1350	if (had_lock < 0) {
1351		ret = had_lock;
1352		goto out;
1353	} else if (had_lock) {
1354		/* See comments in ocfs2_setattr() for details.
1355		 * The call chain of this case could be:
1356		 * do_sys_open()
1357		 *  may_open()
1358		 *   inode_permission()
1359		 *    ocfs2_permission()
1360		 *     ocfs2_iop_get_acl()
1361		 */
1362		mlog(ML_ERROR, "Another case of recursive locking:\n");
1363		dump_stack();
1364	}
1365
1366	ret = generic_permission(inode, mask);
1367
1368	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1369out:
1370	return ret;
1371}
1372
1373static int __ocfs2_write_remove_suid(struct inode *inode,
1374				     struct buffer_head *bh)
1375{
1376	int ret;
1377	handle_t *handle;
1378	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1379	struct ocfs2_dinode *di;
1380
1381	trace_ocfs2_write_remove_suid(
1382			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1383			inode->i_mode);
1384
1385	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1386	if (IS_ERR(handle)) {
1387		ret = PTR_ERR(handle);
1388		mlog_errno(ret);
1389		goto out;
1390	}
1391
1392	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1393				      OCFS2_JOURNAL_ACCESS_WRITE);
1394	if (ret < 0) {
1395		mlog_errno(ret);
1396		goto out_trans;
1397	}
1398
1399	inode->i_mode &= ~S_ISUID;
1400	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1401		inode->i_mode &= ~S_ISGID;
1402
1403	di = (struct ocfs2_dinode *) bh->b_data;
1404	di->i_mode = cpu_to_le16(inode->i_mode);
1405	ocfs2_update_inode_fsync_trans(handle, inode, 0);
1406
1407	ocfs2_journal_dirty(handle, bh);
1408
1409out_trans:
1410	ocfs2_commit_trans(osb, handle);
1411out:
1412	return ret;
1413}
1414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415static int ocfs2_write_remove_suid(struct inode *inode)
1416{
1417	int ret;
1418	struct buffer_head *bh = NULL;
1419
1420	ret = ocfs2_read_inode_block(inode, &bh);
1421	if (ret < 0) {
1422		mlog_errno(ret);
1423		goto out;
1424	}
1425
1426	ret =  __ocfs2_write_remove_suid(inode, bh);
1427out:
1428	brelse(bh);
1429	return ret;
1430}
1431
1432/*
1433 * Allocate enough extents to cover the region starting at byte offset
1434 * start for len bytes. Existing extents are skipped, any extents
1435 * added are marked as "unwritten".
1436 */
1437static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1438					    u64 start, u64 len)
1439{
1440	int ret;
1441	u32 cpos, phys_cpos, clusters, alloc_size;
1442	u64 end = start + len;
1443	struct buffer_head *di_bh = NULL;
1444
1445	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1446		ret = ocfs2_read_inode_block(inode, &di_bh);
1447		if (ret) {
1448			mlog_errno(ret);
1449			goto out;
1450		}
1451
1452		/*
1453		 * Nothing to do if the requested reservation range
1454		 * fits within the inode.
1455		 */
1456		if (ocfs2_size_fits_inline_data(di_bh, end))
1457			goto out;
1458
1459		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1460		if (ret) {
1461			mlog_errno(ret);
1462			goto out;
1463		}
1464	}
1465
1466	/*
1467	 * We consider both start and len to be inclusive.
1468	 */
1469	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1470	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1471	clusters -= cpos;
1472
1473	while (clusters) {
1474		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1475					 &alloc_size, NULL);
1476		if (ret) {
1477			mlog_errno(ret);
1478			goto out;
1479		}
1480
1481		/*
1482		 * Hole or existing extent len can be arbitrary, so
1483		 * cap it to our own allocation request.
1484		 */
1485		if (alloc_size > clusters)
1486			alloc_size = clusters;
1487
1488		if (phys_cpos) {
1489			/*
1490			 * We already have an allocation at this
1491			 * region so we can safely skip it.
1492			 */
1493			goto next;
1494		}
1495
1496		ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1497		if (ret) {
1498			if (ret != -ENOSPC)
1499				mlog_errno(ret);
1500			goto out;
1501		}
1502
1503next:
1504		cpos += alloc_size;
1505		clusters -= alloc_size;
1506	}
1507
1508	ret = 0;
1509out:
1510
1511	brelse(di_bh);
1512	return ret;
1513}
1514
1515/*
1516 * Truncate a byte range, avoiding pages within partial clusters. This
1517 * preserves those pages for the zeroing code to write to.
1518 */
1519static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1520					 u64 byte_len)
1521{
1522	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1523	loff_t start, end;
1524	struct address_space *mapping = inode->i_mapping;
1525
1526	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1527	end = byte_start + byte_len;
1528	end = end & ~(osb->s_clustersize - 1);
1529
1530	if (start < end) {
1531		unmap_mapping_range(mapping, start, end - start, 0);
1532		truncate_inode_pages_range(mapping, start, end - 1);
1533	}
1534}
1535
1536static int ocfs2_zero_partial_clusters(struct inode *inode,
1537				       u64 start, u64 len)
1538{
1539	int ret = 0;
1540	u64 tmpend = 0;
1541	u64 end = start + len;
1542	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1543	unsigned int csize = osb->s_clustersize;
1544	handle_t *handle;
1545
1546	/*
1547	 * The "start" and "end" values are NOT necessarily part of
1548	 * the range whose allocation is being deleted. Rather, this
1549	 * is what the user passed in with the request. We must zero
1550	 * partial clusters here. There's no need to worry about
1551	 * physical allocation - the zeroing code knows to skip holes.
1552	 */
1553	trace_ocfs2_zero_partial_clusters(
1554		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1555		(unsigned long long)start, (unsigned long long)end);
1556
1557	/*
1558	 * If both edges are on a cluster boundary then there's no
1559	 * zeroing required as the region is part of the allocation to
1560	 * be truncated.
1561	 */
1562	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1563		goto out;
1564
1565	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1566	if (IS_ERR(handle)) {
1567		ret = PTR_ERR(handle);
1568		mlog_errno(ret);
1569		goto out;
1570	}
1571
1572	/*
1573	 * If start is on a cluster boundary and end is somewhere in another
1574	 * cluster, we have not COWed the cluster starting at start, unless
1575	 * end is also within the same cluster. So, in this case, we skip this
1576	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1577	 * to the next one.
1578	 */
1579	if ((start & (csize - 1)) != 0) {
1580		/*
1581		 * We want to get the byte offset of the end of the 1st
1582		 * cluster.
1583		 */
1584		tmpend = (u64)osb->s_clustersize +
1585			(start & ~(osb->s_clustersize - 1));
1586		if (tmpend > end)
1587			tmpend = end;
1588
1589		trace_ocfs2_zero_partial_clusters_range1(
1590			(unsigned long long)start,
1591			(unsigned long long)tmpend);
1592
1593		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1594						    tmpend);
1595		if (ret)
1596			mlog_errno(ret);
1597	}
1598
1599	if (tmpend < end) {
1600		/*
1601		 * This may make start and end equal, but the zeroing
1602		 * code will skip any work in that case so there's no
1603		 * need to catch it up here.
1604		 */
1605		start = end & ~(osb->s_clustersize - 1);
1606
1607		trace_ocfs2_zero_partial_clusters_range2(
1608			(unsigned long long)start, (unsigned long long)end);
1609
1610		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1611		if (ret)
1612			mlog_errno(ret);
1613	}
1614	ocfs2_update_inode_fsync_trans(handle, inode, 1);
1615
1616	ocfs2_commit_trans(osb, handle);
1617out:
1618	return ret;
1619}
1620
1621static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1622{
1623	int i;
1624	struct ocfs2_extent_rec *rec = NULL;
1625
1626	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1627
1628		rec = &el->l_recs[i];
1629
1630		if (le32_to_cpu(rec->e_cpos) < pos)
1631			break;
1632	}
1633
1634	return i;
1635}
1636
1637/*
1638 * Helper to calculate the punching pos and length in one run, we handle the
1639 * following three cases in order:
1640 *
1641 * - remove the entire record
1642 * - remove a partial record
1643 * - no record needs to be removed (hole-punching completed)
1644*/
1645static void ocfs2_calc_trunc_pos(struct inode *inode,
1646				 struct ocfs2_extent_list *el,
1647				 struct ocfs2_extent_rec *rec,
1648				 u32 trunc_start, u32 *trunc_cpos,
1649				 u32 *trunc_len, u32 *trunc_end,
1650				 u64 *blkno, int *done)
1651{
1652	int ret = 0;
1653	u32 coff, range;
1654
1655	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1656
1657	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1658		/*
1659		 * remove an entire extent record.
1660		 */
1661		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1662		/*
1663		 * Skip holes if any.
1664		 */
1665		if (range < *trunc_end)
1666			*trunc_end = range;
1667		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1668		*blkno = le64_to_cpu(rec->e_blkno);
1669		*trunc_end = le32_to_cpu(rec->e_cpos);
1670	} else if (range > trunc_start) {
1671		/*
1672		 * remove a partial extent record, which means we're
1673		 * removing the last extent record.
1674		 */
1675		*trunc_cpos = trunc_start;
1676		/*
1677		 * skip hole if any.
1678		 */
1679		if (range < *trunc_end)
1680			*trunc_end = range;
1681		*trunc_len = *trunc_end - trunc_start;
1682		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1683		*blkno = le64_to_cpu(rec->e_blkno) +
1684				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1685		*trunc_end = trunc_start;
1686	} else {
1687		/*
1688		 * It may have two following possibilities:
1689		 *
1690		 * - last record has been removed
1691		 * - trunc_start was within a hole
1692		 *
1693		 * both two cases mean the completion of hole punching.
1694		 */
1695		ret = 1;
1696	}
1697
1698	*done = ret;
1699}
1700
1701int ocfs2_remove_inode_range(struct inode *inode,
1702			     struct buffer_head *di_bh, u64 byte_start,
1703			     u64 byte_len)
1704{
1705	int ret = 0, flags = 0, done = 0, i;
1706	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1707	u32 cluster_in_el;
1708	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1709	struct ocfs2_cached_dealloc_ctxt dealloc;
1710	struct address_space *mapping = inode->i_mapping;
1711	struct ocfs2_extent_tree et;
1712	struct ocfs2_path *path = NULL;
1713	struct ocfs2_extent_list *el = NULL;
1714	struct ocfs2_extent_rec *rec = NULL;
1715	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1716	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1717
1718	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1719	ocfs2_init_dealloc_ctxt(&dealloc);
1720
1721	trace_ocfs2_remove_inode_range(
1722			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1723			(unsigned long long)byte_start,
1724			(unsigned long long)byte_len);
1725
1726	if (byte_len == 0)
1727		return 0;
1728
1729	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1730		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1731					    byte_start + byte_len, 0);
1732		if (ret) {
1733			mlog_errno(ret);
1734			goto out;
1735		}
1736		/*
1737		 * There's no need to get fancy with the page cache
1738		 * truncate of an inline-data inode. We're talking
1739		 * about less than a page here, which will be cached
1740		 * in the dinode buffer anyway.
1741		 */
1742		unmap_mapping_range(mapping, 0, 0, 0);
1743		truncate_inode_pages(mapping, 0);
1744		goto out;
1745	}
1746
1747	/*
1748	 * For reflinks, we may need to CoW 2 clusters which might be
1749	 * partially zero'd later, if hole's start and end offset were
1750	 * within one cluster(means is not exactly aligned to clustersize).
1751	 */
1752
1753	if (ocfs2_is_refcount_inode(inode)) {
 
1754		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1755		if (ret) {
1756			mlog_errno(ret);
1757			goto out;
1758		}
1759
1760		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1761		if (ret) {
1762			mlog_errno(ret);
1763			goto out;
1764		}
1765	}
1766
1767	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1768	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1769	cluster_in_el = trunc_end;
1770
1771	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1772	if (ret) {
1773		mlog_errno(ret);
1774		goto out;
1775	}
1776
1777	path = ocfs2_new_path_from_et(&et);
1778	if (!path) {
1779		ret = -ENOMEM;
1780		mlog_errno(ret);
1781		goto out;
1782	}
1783
1784	while (trunc_end > trunc_start) {
1785
1786		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1787				      cluster_in_el);
1788		if (ret) {
1789			mlog_errno(ret);
1790			goto out;
1791		}
1792
1793		el = path_leaf_el(path);
1794
1795		i = ocfs2_find_rec(el, trunc_end);
1796		/*
1797		 * Need to go to previous extent block.
1798		 */
1799		if (i < 0) {
1800			if (path->p_tree_depth == 0)
1801				break;
1802
1803			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1804							    path,
1805							    &cluster_in_el);
1806			if (ret) {
1807				mlog_errno(ret);
1808				goto out;
1809			}
1810
1811			/*
1812			 * We've reached the leftmost extent block,
1813			 * it's safe to leave.
1814			 */
1815			if (cluster_in_el == 0)
1816				break;
1817
1818			/*
1819			 * The 'pos' searched for previous extent block is
1820			 * always one cluster less than actual trunc_end.
1821			 */
1822			trunc_end = cluster_in_el + 1;
1823
1824			ocfs2_reinit_path(path, 1);
1825
1826			continue;
1827
1828		} else
1829			rec = &el->l_recs[i];
1830
1831		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1832				     &trunc_len, &trunc_end, &blkno, &done);
1833		if (done)
1834			break;
1835
1836		flags = rec->e_flags;
1837		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1838
1839		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1840					       phys_cpos, trunc_len, flags,
1841					       &dealloc, refcount_loc, false);
1842		if (ret < 0) {
1843			mlog_errno(ret);
1844			goto out;
1845		}
1846
1847		cluster_in_el = trunc_end;
1848
1849		ocfs2_reinit_path(path, 1);
1850	}
1851
1852	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1853
1854out:
1855	ocfs2_free_path(path);
1856	ocfs2_schedule_truncate_log_flush(osb, 1);
1857	ocfs2_run_deallocs(osb, &dealloc);
1858
1859	return ret;
1860}
1861
1862/*
1863 * Parts of this function taken from xfs_change_file_space()
1864 */
1865static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1866				     loff_t f_pos, unsigned int cmd,
1867				     struct ocfs2_space_resv *sr,
1868				     int change_size)
1869{
1870	int ret;
1871	s64 llen;
1872	loff_t size;
1873	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1874	struct buffer_head *di_bh = NULL;
1875	handle_t *handle;
1876	unsigned long long max_off = inode->i_sb->s_maxbytes;
1877
1878	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1879		return -EROFS;
1880
1881	inode_lock(inode);
1882
1883	/*
1884	 * This prevents concurrent writes on other nodes
1885	 */
1886	ret = ocfs2_rw_lock(inode, 1);
1887	if (ret) {
1888		mlog_errno(ret);
1889		goto out;
1890	}
1891
1892	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1893	if (ret) {
1894		mlog_errno(ret);
1895		goto out_rw_unlock;
1896	}
1897
1898	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1899		ret = -EPERM;
1900		goto out_inode_unlock;
1901	}
1902
1903	switch (sr->l_whence) {
1904	case 0: /*SEEK_SET*/
1905		break;
1906	case 1: /*SEEK_CUR*/
1907		sr->l_start += f_pos;
1908		break;
1909	case 2: /*SEEK_END*/
1910		sr->l_start += i_size_read(inode);
1911		break;
1912	default:
1913		ret = -EINVAL;
1914		goto out_inode_unlock;
1915	}
1916	sr->l_whence = 0;
1917
1918	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1919
1920	if (sr->l_start < 0
1921	    || sr->l_start > max_off
1922	    || (sr->l_start + llen) < 0
1923	    || (sr->l_start + llen) > max_off) {
1924		ret = -EINVAL;
1925		goto out_inode_unlock;
1926	}
1927	size = sr->l_start + sr->l_len;
1928
1929	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1930	    cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1931		if (sr->l_len <= 0) {
1932			ret = -EINVAL;
1933			goto out_inode_unlock;
1934		}
1935	}
1936
1937	if (file && should_remove_suid(file->f_path.dentry)) {
1938		ret = __ocfs2_write_remove_suid(inode, di_bh);
1939		if (ret) {
1940			mlog_errno(ret);
1941			goto out_inode_unlock;
1942		}
1943	}
1944
1945	down_write(&OCFS2_I(inode)->ip_alloc_sem);
1946	switch (cmd) {
1947	case OCFS2_IOC_RESVSP:
1948	case OCFS2_IOC_RESVSP64:
1949		/*
1950		 * This takes unsigned offsets, but the signed ones we
1951		 * pass have been checked against overflow above.
1952		 */
1953		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1954						       sr->l_len);
1955		break;
1956	case OCFS2_IOC_UNRESVSP:
1957	case OCFS2_IOC_UNRESVSP64:
1958		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1959					       sr->l_len);
1960		break;
1961	default:
1962		ret = -EINVAL;
1963	}
1964	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1965	if (ret) {
1966		mlog_errno(ret);
1967		goto out_inode_unlock;
1968	}
1969
1970	/*
1971	 * We update c/mtime for these changes
1972	 */
1973	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1974	if (IS_ERR(handle)) {
1975		ret = PTR_ERR(handle);
1976		mlog_errno(ret);
1977		goto out_inode_unlock;
1978	}
1979
1980	if (change_size && i_size_read(inode) < size)
1981		i_size_write(inode, size);
1982
1983	inode->i_ctime = inode->i_mtime = current_time(inode);
1984	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1985	if (ret < 0)
1986		mlog_errno(ret);
1987
1988	if (file && (file->f_flags & O_SYNC))
1989		handle->h_sync = 1;
1990
1991	ocfs2_commit_trans(osb, handle);
1992
1993out_inode_unlock:
1994	brelse(di_bh);
1995	ocfs2_inode_unlock(inode, 1);
1996out_rw_unlock:
1997	ocfs2_rw_unlock(inode, 1);
1998
1999out:
2000	inode_unlock(inode);
2001	return ret;
2002}
2003
2004int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2005			    struct ocfs2_space_resv *sr)
2006{
2007	struct inode *inode = file_inode(file);
2008	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2009	int ret;
2010
2011	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2012	    !ocfs2_writes_unwritten_extents(osb))
2013		return -ENOTTY;
2014	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2015		 !ocfs2_sparse_alloc(osb))
2016		return -ENOTTY;
2017
2018	if (!S_ISREG(inode->i_mode))
2019		return -EINVAL;
2020
2021	if (!(file->f_mode & FMODE_WRITE))
2022		return -EBADF;
2023
2024	ret = mnt_want_write_file(file);
2025	if (ret)
2026		return ret;
2027	ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2028	mnt_drop_write_file(file);
2029	return ret;
2030}
2031
2032static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2033			    loff_t len)
2034{
2035	struct inode *inode = file_inode(file);
2036	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2037	struct ocfs2_space_resv sr;
2038	int change_size = 1;
2039	int cmd = OCFS2_IOC_RESVSP64;
2040
2041	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2042		return -EOPNOTSUPP;
2043	if (!ocfs2_writes_unwritten_extents(osb))
2044		return -EOPNOTSUPP;
2045
2046	if (mode & FALLOC_FL_KEEP_SIZE)
2047		change_size = 0;
2048
2049	if (mode & FALLOC_FL_PUNCH_HOLE)
2050		cmd = OCFS2_IOC_UNRESVSP64;
2051
2052	sr.l_whence = 0;
2053	sr.l_start = (s64)offset;
2054	sr.l_len = (s64)len;
2055
2056	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2057					 change_size);
2058}
2059
2060int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2061				   size_t count)
2062{
2063	int ret = 0;
2064	unsigned int extent_flags;
2065	u32 cpos, clusters, extent_len, phys_cpos;
2066	struct super_block *sb = inode->i_sb;
2067
2068	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2069	    !ocfs2_is_refcount_inode(inode) ||
2070	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2071		return 0;
2072
2073	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2074	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2075
2076	while (clusters) {
2077		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2078					 &extent_flags);
2079		if (ret < 0) {
2080			mlog_errno(ret);
2081			goto out;
2082		}
2083
2084		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2085			ret = 1;
2086			break;
2087		}
2088
2089		if (extent_len > clusters)
2090			extent_len = clusters;
2091
2092		clusters -= extent_len;
2093		cpos += extent_len;
2094	}
2095out:
2096	return ret;
2097}
2098
2099static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2100{
2101	int blockmask = inode->i_sb->s_blocksize - 1;
2102	loff_t final_size = pos + count;
2103
2104	if ((pos & blockmask) || (final_size & blockmask))
2105		return 1;
2106	return 0;
2107}
2108
2109static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2110					    struct file *file,
2111					    loff_t pos, size_t count,
2112					    int *meta_level)
2113{
2114	int ret;
2115	struct buffer_head *di_bh = NULL;
2116	u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2117	u32 clusters =
2118		ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2119
2120	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2121	if (ret) {
2122		mlog_errno(ret);
2123		goto out;
2124	}
2125
2126	*meta_level = 1;
2127
2128	ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2129	if (ret)
2130		mlog_errno(ret);
2131out:
2132	brelse(di_bh);
2133	return ret;
2134}
2135
2136static int ocfs2_prepare_inode_for_write(struct file *file,
2137					 loff_t pos, size_t count, int wait)
 
 
 
 
2138{
2139	int ret = 0, meta_level = 0, overwrite_io = 0;
2140	struct dentry *dentry = file->f_path.dentry;
2141	struct inode *inode = d_inode(dentry);
2142	struct buffer_head *di_bh = NULL;
2143	loff_t end;
2144
2145	/*
2146	 * We start with a read level meta lock and only jump to an ex
2147	 * if we need to make modifications here.
2148	 */
2149	for(;;) {
2150		if (wait)
2151			ret = ocfs2_inode_lock(inode, NULL, meta_level);
2152		else
2153			ret = ocfs2_try_inode_lock(inode,
2154				overwrite_io ? NULL : &di_bh, meta_level);
2155		if (ret < 0) {
2156			meta_level = -1;
2157			if (ret != -EAGAIN)
2158				mlog_errno(ret);
2159			goto out;
2160		}
2161
2162		/*
2163		 * Check if IO will overwrite allocated blocks in case
2164		 * IOCB_NOWAIT flag is set.
2165		 */
2166		if (!wait && !overwrite_io) {
2167			overwrite_io = 1;
2168			if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
2169				ret = -EAGAIN;
2170				goto out_unlock;
2171			}
2172
2173			ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2174			brelse(di_bh);
2175			di_bh = NULL;
2176			up_read(&OCFS2_I(inode)->ip_alloc_sem);
2177			if (ret < 0) {
2178				if (ret != -EAGAIN)
2179					mlog_errno(ret);
2180				goto out_unlock;
2181			}
2182		}
2183
2184		/* Clear suid / sgid if necessary. We do this here
2185		 * instead of later in the write path because
2186		 * remove_suid() calls ->setattr without any hint that
2187		 * we may have already done our cluster locking. Since
2188		 * ocfs2_setattr() *must* take cluster locks to
2189		 * proceed, this will lead us to recursively lock the
2190		 * inode. There's also the dinode i_size state which
2191		 * can be lost via setattr during extending writes (we
2192		 * set inode->i_size at the end of a write. */
2193		if (should_remove_suid(dentry)) {
2194			if (meta_level == 0) {
2195				ocfs2_inode_unlock(inode, meta_level);
2196				meta_level = 1;
2197				continue;
2198			}
2199
2200			ret = ocfs2_write_remove_suid(inode);
2201			if (ret < 0) {
2202				mlog_errno(ret);
2203				goto out_unlock;
2204			}
2205		}
2206
2207		end = pos + count;
 
 
 
 
 
 
 
2208
2209		ret = ocfs2_check_range_for_refcount(inode, pos, count);
2210		if (ret == 1) {
2211			ocfs2_inode_unlock(inode, meta_level);
2212			meta_level = -1;
2213
2214			ret = ocfs2_prepare_inode_for_refcount(inode,
2215							       file,
2216							       pos,
2217							       count,
2218							       &meta_level);
 
 
 
 
2219		}
2220
2221		if (ret < 0) {
2222			mlog_errno(ret);
2223			goto out_unlock;
2224		}
2225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2226		break;
2227	}
2228
 
 
 
2229out_unlock:
2230	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2231					    pos, count, wait);
2232
2233	brelse(di_bh);
2234
2235	if (meta_level >= 0)
2236		ocfs2_inode_unlock(inode, meta_level);
2237
2238out:
2239	return ret;
2240}
2241
2242static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2243				    struct iov_iter *from)
 
 
2244{
2245	int rw_level;
 
2246	ssize_t written = 0;
2247	ssize_t ret;
2248	size_t count = iov_iter_count(from);
 
 
2249	struct file *file = iocb->ki_filp;
2250	struct inode *inode = file_inode(file);
2251	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2252	int full_coherency = !(osb->s_mount_opt &
2253			       OCFS2_MOUNT_COHERENCY_BUFFERED);
2254	void *saved_ki_complete = NULL;
2255	int append_write = ((iocb->ki_pos + count) >=
2256			i_size_read(inode) ? 1 : 0);
2257	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2258	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2259
2260	trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2261		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2262		file->f_path.dentry->d_name.len,
2263		file->f_path.dentry->d_name.name,
2264		(unsigned int)from->nr_segs);	/* GRRRRR */
2265
2266	if (!direct_io && nowait)
2267		return -EOPNOTSUPP;
 
 
 
 
 
2268
2269	if (count == 0)
2270		return 0;
2271
2272	if (nowait) {
2273		if (!inode_trylock(inode))
2274			return -EAGAIN;
2275	} else
2276		inode_lock(inode);
 
 
 
 
2277
2278	/*
2279	 * Concurrent O_DIRECT writes are allowed with
2280	 * mount_option "coherency=buffered".
2281	 * For append write, we must take rw EX.
2282	 */
2283	rw_level = (!direct_io || full_coherency || append_write);
2284
2285	if (nowait)
2286		ret = ocfs2_try_rw_lock(inode, rw_level);
2287	else
2288		ret = ocfs2_rw_lock(inode, rw_level);
2289	if (ret < 0) {
2290		if (ret != -EAGAIN)
2291			mlog_errno(ret);
2292		goto out_mutex;
2293	}
2294
2295	/*
2296	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2297	 * inode_lock to guarantee coherency.
2298	 */
2299	if (direct_io && full_coherency) {
2300		/*
2301		 * We need to take and drop the inode lock to force
2302		 * other nodes to drop their caches.  Buffered I/O
2303		 * already does this in write_begin().
2304		 */
2305		if (nowait)
2306			ret = ocfs2_try_inode_lock(inode, NULL, 1);
2307		else
2308			ret = ocfs2_inode_lock(inode, NULL, 1);
2309		if (ret < 0) {
2310			if (ret != -EAGAIN)
2311				mlog_errno(ret);
2312			goto out;
2313		}
2314
2315		ocfs2_inode_unlock(inode, 1);
2316	}
2317
2318	ret = generic_write_checks(iocb, from);
2319	if (ret <= 0) {
2320		if (ret)
2321			mlog_errno(ret);
 
 
2322		goto out;
2323	}
2324	count = ret;
2325
2326	ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2327	if (ret < 0) {
2328		if (ret != -EAGAIN)
2329			mlog_errno(ret);
2330		goto out;
 
 
 
 
 
 
 
2331	}
2332
2333	if (direct_io && !is_sync_kiocb(iocb) &&
2334	    ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2335		/*
2336		 * Make it a sync io if it's an unaligned aio.
2337		 */
2338		saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2339	}
2340
2341	/* communicate with ocfs2_dio_end_io */
2342	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2343
2344	written = __generic_file_write_iter(iocb, from);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345	/* buffered aio wouldn't have proper lock coverage today */
2346	BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2347
2348	/*
2349	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2350	 * function pointer which is called when o_direct io completes so that
2351	 * it can unlock our rw lock.
2352	 * Unfortunately there are error cases which call end_io and others
2353	 * that don't.  so we don't have to unlock the rw_lock if either an
2354	 * async dio is going to do it in the future or an end_io after an
2355	 * error has already done it.
2356	 */
2357	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2358		rw_level = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2359	}
2360
2361	if (unlikely(written <= 0))
2362		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2363
2364	if (((file->f_flags & O_DSYNC) && !direct_io) ||
2365	    IS_SYNC(inode)) {
2366		ret = filemap_fdatawrite_range(file->f_mapping,
2367					       iocb->ki_pos - written,
2368					       iocb->ki_pos - 1);
2369		if (ret < 0)
2370			written = ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2371
2372		if (!ret) {
2373			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2374			if (ret < 0)
2375				written = ret;
2376		}
2377
2378		if (!ret)
2379			ret = filemap_fdatawait_range(file->f_mapping,
2380						      iocb->ki_pos - written,
2381						      iocb->ki_pos - 1);
2382	}
2383
2384out:
2385	if (saved_ki_complete)
2386		xchg(&iocb->ki_complete, saved_ki_complete);
 
 
 
 
 
 
 
 
 
 
 
 
 
2387
2388	if (rw_level != -1)
2389		ocfs2_rw_unlock(inode, rw_level);
 
 
 
 
 
 
 
2390
2391out_mutex:
2392	inode_unlock(inode);
2393
2394	if (written)
2395		ret = written;
2396	return ret;
2397}
2398
2399static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2400				   struct iov_iter *to)
 
 
2401{
2402	int ret = 0, rw_level = -1, lock_level = 0;
2403	struct file *filp = iocb->ki_filp;
2404	struct inode *inode = file_inode(filp);
2405	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2406	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2407
2408	trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2409			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2410			filp->f_path.dentry->d_name.len,
2411			filp->f_path.dentry->d_name.name,
2412			to->nr_segs);	/* GRRRRR */
2413
2414
2415	if (!inode) {
2416		ret = -EINVAL;
2417		mlog_errno(ret);
2418		goto bail;
2419	}
2420
2421	if (!direct_io && nowait)
2422		return -EOPNOTSUPP;
2423
2424	/*
2425	 * buffered reads protect themselves in ->readpage().  O_DIRECT reads
2426	 * need locks to protect pending reads from racing with truncate.
2427	 */
2428	if (direct_io) {
2429		if (nowait)
2430			ret = ocfs2_try_rw_lock(inode, 0);
2431		else
2432			ret = ocfs2_rw_lock(inode, 0);
2433
 
2434		if (ret < 0) {
2435			if (ret != -EAGAIN)
2436				mlog_errno(ret);
2437			goto bail;
2438		}
2439		rw_level = 0;
2440		/* communicate with ocfs2_dio_end_io */
2441		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2442	}
2443
2444	/*
2445	 * We're fine letting folks race truncates and extending
2446	 * writes with read across the cluster, just like they can
2447	 * locally. Hence no rw_lock during read.
2448	 *
2449	 * Take and drop the meta data lock to update inode fields
2450	 * like i_size. This allows the checks down below
2451	 * generic_file_read_iter() a chance of actually working.
2452	 */
2453	ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2454				     !nowait);
2455	if (ret < 0) {
2456		if (ret != -EAGAIN)
2457			mlog_errno(ret);
2458		goto bail;
2459	}
2460	ocfs2_inode_unlock(inode, lock_level);
2461
2462	ret = generic_file_read_iter(iocb, to);
2463	trace_generic_file_read_iter_ret(ret);
2464
2465	/* buffered aio wouldn't have proper lock coverage today */
2466	BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
2467
2468	/* see ocfs2_file_write_iter */
2469	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2470		rw_level = -1;
 
2471	}
2472
2473bail:
 
 
 
2474	if (rw_level != -1)
2475		ocfs2_rw_unlock(inode, rw_level);
2476
2477	return ret;
2478}
2479
2480/* Refer generic_file_llseek_unlocked() */
2481static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2482{
2483	struct inode *inode = file->f_mapping->host;
2484	int ret = 0;
2485
2486	inode_lock(inode);
2487
2488	switch (whence) {
2489	case SEEK_SET:
2490		break;
2491	case SEEK_END:
2492		/* SEEK_END requires the OCFS2 inode lock for the file
2493		 * because it references the file's size.
2494		 */
2495		ret = ocfs2_inode_lock(inode, NULL, 0);
2496		if (ret < 0) {
2497			mlog_errno(ret);
2498			goto out;
2499		}
2500		offset += i_size_read(inode);
2501		ocfs2_inode_unlock(inode, 0);
2502		break;
2503	case SEEK_CUR:
2504		if (offset == 0) {
2505			offset = file->f_pos;
2506			goto out;
2507		}
2508		offset += file->f_pos;
2509		break;
2510	case SEEK_DATA:
2511	case SEEK_HOLE:
2512		ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2513		if (ret)
2514			goto out;
2515		break;
2516	default:
2517		ret = -EINVAL;
2518		goto out;
2519	}
2520
2521	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2522
2523out:
2524	inode_unlock(inode);
2525	if (ret)
2526		return ret;
2527	return offset;
2528}
2529
2530static int ocfs2_file_clone_range(struct file *file_in,
2531				  loff_t pos_in,
2532				  struct file *file_out,
2533				  loff_t pos_out,
2534				  u64 len)
2535{
2536	return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
2537					 len, false);
2538}
2539
2540static ssize_t ocfs2_file_dedupe_range(struct file *src_file,
2541				       u64 loff,
2542				       u64 len,
2543				       struct file *dst_file,
2544				       u64 dst_loff)
2545{
2546	int error;
2547
2548	error = ocfs2_reflink_remap_range(src_file, loff, dst_file, dst_loff,
2549					  len, true);
2550	if (error)
2551		return error;
2552	return len;
2553}
2554
2555const struct inode_operations ocfs2_file_iops = {
2556	.setattr	= ocfs2_setattr,
2557	.getattr	= ocfs2_getattr,
2558	.permission	= ocfs2_permission,
 
 
2559	.listxattr	= ocfs2_listxattr,
 
2560	.fiemap		= ocfs2_fiemap,
2561	.get_acl	= ocfs2_iop_get_acl,
2562	.set_acl	= ocfs2_iop_set_acl,
2563};
2564
2565const struct inode_operations ocfs2_special_file_iops = {
2566	.setattr	= ocfs2_setattr,
2567	.getattr	= ocfs2_getattr,
2568	.permission	= ocfs2_permission,
2569	.get_acl	= ocfs2_iop_get_acl,
2570	.set_acl	= ocfs2_iop_set_acl,
2571};
2572
2573/*
2574 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2575 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2576 */
2577const struct file_operations ocfs2_fops = {
2578	.llseek		= ocfs2_file_llseek,
 
 
2579	.mmap		= ocfs2_mmap,
2580	.fsync		= ocfs2_sync_file,
2581	.release	= ocfs2_file_release,
2582	.open		= ocfs2_file_open,
2583	.read_iter	= ocfs2_file_read_iter,
2584	.write_iter	= ocfs2_file_write_iter,
2585	.unlocked_ioctl	= ocfs2_ioctl,
2586#ifdef CONFIG_COMPAT
2587	.compat_ioctl   = ocfs2_compat_ioctl,
2588#endif
2589	.lock		= ocfs2_lock,
2590	.flock		= ocfs2_flock,
2591	.splice_read	= generic_file_splice_read,
2592	.splice_write	= iter_file_splice_write,
2593	.fallocate	= ocfs2_fallocate,
2594	.clone_file_range = ocfs2_file_clone_range,
2595	.dedupe_file_range = ocfs2_file_dedupe_range,
2596};
2597
2598const struct file_operations ocfs2_dops = {
2599	.llseek		= generic_file_llseek,
2600	.read		= generic_read_dir,
2601	.iterate	= ocfs2_readdir,
2602	.fsync		= ocfs2_sync_file,
2603	.release	= ocfs2_dir_release,
2604	.open		= ocfs2_dir_open,
2605	.unlocked_ioctl	= ocfs2_ioctl,
2606#ifdef CONFIG_COMPAT
2607	.compat_ioctl   = ocfs2_compat_ioctl,
2608#endif
2609	.lock		= ocfs2_lock,
2610	.flock		= ocfs2_flock,
2611};
2612
2613/*
2614 * POSIX-lockless variants of our file_operations.
2615 *
2616 * These will be used if the underlying cluster stack does not support
2617 * posix file locking, if the user passes the "localflocks" mount
2618 * option, or if we have a local-only fs.
2619 *
2620 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2621 * so we still want it in the case of no stack support for
2622 * plocks. Internally, it will do the right thing when asked to ignore
2623 * the cluster.
2624 */
2625const struct file_operations ocfs2_fops_no_plocks = {
2626	.llseek		= ocfs2_file_llseek,
 
 
2627	.mmap		= ocfs2_mmap,
2628	.fsync		= ocfs2_sync_file,
2629	.release	= ocfs2_file_release,
2630	.open		= ocfs2_file_open,
2631	.read_iter	= ocfs2_file_read_iter,
2632	.write_iter	= ocfs2_file_write_iter,
2633	.unlocked_ioctl	= ocfs2_ioctl,
2634#ifdef CONFIG_COMPAT
2635	.compat_ioctl   = ocfs2_compat_ioctl,
2636#endif
2637	.flock		= ocfs2_flock,
2638	.splice_read	= generic_file_splice_read,
2639	.splice_write	= iter_file_splice_write,
2640	.fallocate	= ocfs2_fallocate,
2641	.clone_file_range = ocfs2_file_clone_range,
2642	.dedupe_file_range = ocfs2_file_dedupe_range,
2643};
2644
2645const struct file_operations ocfs2_dops_no_plocks = {
2646	.llseek		= generic_file_llseek,
2647	.read		= generic_read_dir,
2648	.iterate	= ocfs2_readdir,
2649	.fsync		= ocfs2_sync_file,
2650	.release	= ocfs2_dir_release,
2651	.open		= ocfs2_dir_open,
2652	.unlocked_ioctl	= ocfs2_ioctl,
2653#ifdef CONFIG_COMPAT
2654	.compat_ioctl   = ocfs2_compat_ioctl,
2655#endif
2656	.flock		= ocfs2_flock,
2657};