Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v3.1
   1/* -*- mode: c; c-basic-offset: 8; -*-
   2 * vim: noexpandtab sw=8 ts=8 sts=0:
   3 *
   4 * file.c
   5 *
   6 * File open, close, extend, truncate
   7 *
   8 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public
  12 * License as published by the Free Software Foundation; either
  13 * version 2 of the License, or (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public
  21 * License along with this program; if not, write to the
  22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23 * Boston, MA 021110-1307, USA.
  24 */
  25
  26#include <linux/capability.h>
  27#include <linux/fs.h>
  28#include <linux/types.h>
  29#include <linux/slab.h>
  30#include <linux/highmem.h>
  31#include <linux/pagemap.h>
  32#include <linux/uio.h>
  33#include <linux/sched.h>
  34#include <linux/splice.h>
  35#include <linux/mount.h>
  36#include <linux/writeback.h>
  37#include <linux/falloc.h>
  38#include <linux/quotaops.h>
  39#include <linux/blkdev.h>
 
  40
  41#include <cluster/masklog.h>
  42
  43#include "ocfs2.h"
  44
  45#include "alloc.h"
  46#include "aops.h"
  47#include "dir.h"
  48#include "dlmglue.h"
  49#include "extent_map.h"
  50#include "file.h"
  51#include "sysfile.h"
  52#include "inode.h"
  53#include "ioctl.h"
  54#include "journal.h"
  55#include "locks.h"
  56#include "mmap.h"
  57#include "suballoc.h"
  58#include "super.h"
  59#include "xattr.h"
  60#include "acl.h"
  61#include "quota.h"
  62#include "refcounttree.h"
  63#include "ocfs2_trace.h"
  64
  65#include "buffer_head_io.h"
  66
  67static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  68{
  69	struct ocfs2_file_private *fp;
  70
  71	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  72	if (!fp)
  73		return -ENOMEM;
  74
  75	fp->fp_file = file;
  76	mutex_init(&fp->fp_mutex);
  77	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  78	file->private_data = fp;
  79
  80	return 0;
  81}
  82
  83static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  84{
  85	struct ocfs2_file_private *fp = file->private_data;
  86	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  87
  88	if (fp) {
  89		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  90		ocfs2_lock_res_free(&fp->fp_flock);
  91		kfree(fp);
  92		file->private_data = NULL;
  93	}
  94}
  95
  96static int ocfs2_file_open(struct inode *inode, struct file *file)
  97{
  98	int status;
  99	int mode = file->f_flags;
 100	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 101
 102	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
 103			      (unsigned long long)OCFS2_I(inode)->ip_blkno,
 104			      file->f_path.dentry->d_name.len,
 105			      file->f_path.dentry->d_name.name, mode);
 106
 107	if (file->f_mode & FMODE_WRITE)
 108		dquot_initialize(inode);
 
 
 
 109
 110	spin_lock(&oi->ip_lock);
 111
 112	/* Check that the inode hasn't been wiped from disk by another
 113	 * node. If it hasn't then we're safe as long as we hold the
 114	 * spin lock until our increment of open count. */
 115	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
 116		spin_unlock(&oi->ip_lock);
 117
 118		status = -ENOENT;
 119		goto leave;
 120	}
 121
 122	if (mode & O_DIRECT)
 123		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
 124
 125	oi->ip_open_count++;
 126	spin_unlock(&oi->ip_lock);
 127
 128	status = ocfs2_init_file_private(inode, file);
 129	if (status) {
 130		/*
 131		 * We want to set open count back if we're failing the
 132		 * open.
 133		 */
 134		spin_lock(&oi->ip_lock);
 135		oi->ip_open_count--;
 136		spin_unlock(&oi->ip_lock);
 137	}
 138
 
 
 139leave:
 140	return status;
 141}
 142
 143static int ocfs2_file_release(struct inode *inode, struct file *file)
 144{
 145	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 146
 147	spin_lock(&oi->ip_lock);
 148	if (!--oi->ip_open_count)
 149		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
 150
 151	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
 152				 oi->ip_blkno,
 153				 file->f_path.dentry->d_name.len,
 154				 file->f_path.dentry->d_name.name,
 155				 oi->ip_open_count);
 156	spin_unlock(&oi->ip_lock);
 157
 158	ocfs2_free_file_private(inode, file);
 159
 160	return 0;
 161}
 162
 163static int ocfs2_dir_open(struct inode *inode, struct file *file)
 164{
 165	return ocfs2_init_file_private(inode, file);
 166}
 167
 168static int ocfs2_dir_release(struct inode *inode, struct file *file)
 169{
 170	ocfs2_free_file_private(inode, file);
 171	return 0;
 172}
 173
 174static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
 175			   int datasync)
 176{
 177	int err = 0;
 178	journal_t *journal;
 179	struct inode *inode = file->f_mapping->host;
 180	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 
 
 
 
 181
 182	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
 183			      OCFS2_I(inode)->ip_blkno,
 184			      file->f_path.dentry->d_name.len,
 185			      file->f_path.dentry->d_name.name,
 186			      (unsigned long long)datasync);
 187
 188	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
 
 
 
 189	if (err)
 190		return err;
 191
 192	/*
 193	 * Probably don't need the i_mutex at all in here, just putting it here
 194	 * to be consistent with how fsync used to be called, someone more
 195	 * familiar with the fs could possibly remove it.
 196	 */
 197	mutex_lock(&inode->i_mutex);
 198	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
 199		/*
 200		 * We still have to flush drive's caches to get data to the
 201		 * platter
 202		 */
 203		if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
 204			blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 205		goto bail;
 206	}
 207
 208	journal = osb->journal->j_journal;
 209	err = jbd2_journal_force_commit(journal);
 210
 211bail:
 212	if (err)
 213		mlog_errno(err);
 214	mutex_unlock(&inode->i_mutex);
 215
 216	return (err < 0) ? -EIO : 0;
 217}
 218
 219int ocfs2_should_update_atime(struct inode *inode,
 220			      struct vfsmount *vfsmnt)
 221{
 222	struct timespec now;
 223	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 224
 225	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 226		return 0;
 227
 228	if ((inode->i_flags & S_NOATIME) ||
 229	    ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
 230		return 0;
 231
 232	/*
 233	 * We can be called with no vfsmnt structure - NFSD will
 234	 * sometimes do this.
 235	 *
 236	 * Note that our action here is different than touch_atime() -
 237	 * if we can't tell whether this is a noatime mount, then we
 238	 * don't know whether to trust the value of s_atime_quantum.
 239	 */
 240	if (vfsmnt == NULL)
 241		return 0;
 242
 243	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
 244	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
 245		return 0;
 246
 247	if (vfsmnt->mnt_flags & MNT_RELATIME) {
 248		if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
 249		    (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
 
 
 
 
 250			return 1;
 251
 252		return 0;
 253	}
 254
 255	now = CURRENT_TIME;
 256	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
 257		return 0;
 258	else
 259		return 1;
 260}
 261
 262int ocfs2_update_inode_atime(struct inode *inode,
 263			     struct buffer_head *bh)
 264{
 265	int ret;
 266	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 267	handle_t *handle;
 268	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
 269
 270	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 271	if (IS_ERR(handle)) {
 272		ret = PTR_ERR(handle);
 273		mlog_errno(ret);
 274		goto out;
 275	}
 276
 277	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 278				      OCFS2_JOURNAL_ACCESS_WRITE);
 279	if (ret) {
 280		mlog_errno(ret);
 281		goto out_commit;
 282	}
 283
 284	/*
 285	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
 286	 * have i_mutex to guard against concurrent changes to other
 287	 * inode fields.
 288	 */
 289	inode->i_atime = CURRENT_TIME;
 290	di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 291	di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 
 292	ocfs2_journal_dirty(handle, bh);
 293
 294out_commit:
 295	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 296out:
 297	return ret;
 298}
 299
 300static int ocfs2_set_inode_size(handle_t *handle,
 301				struct inode *inode,
 302				struct buffer_head *fe_bh,
 303				u64 new_i_size)
 304{
 305	int status;
 306
 307	i_size_write(inode, new_i_size);
 308	inode->i_blocks = ocfs2_inode_sector_count(inode);
 309	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 310
 311	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 312	if (status < 0) {
 313		mlog_errno(status);
 314		goto bail;
 315	}
 316
 317bail:
 318	return status;
 319}
 320
 321int ocfs2_simple_size_update(struct inode *inode,
 322			     struct buffer_head *di_bh,
 323			     u64 new_i_size)
 324{
 325	int ret;
 326	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 327	handle_t *handle = NULL;
 328
 329	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 330	if (IS_ERR(handle)) {
 331		ret = PTR_ERR(handle);
 332		mlog_errno(ret);
 333		goto out;
 334	}
 335
 336	ret = ocfs2_set_inode_size(handle, inode, di_bh,
 337				   new_i_size);
 338	if (ret < 0)
 339		mlog_errno(ret);
 340
 
 341	ocfs2_commit_trans(osb, handle);
 342out:
 343	return ret;
 344}
 345
 346static int ocfs2_cow_file_pos(struct inode *inode,
 347			      struct buffer_head *fe_bh,
 348			      u64 offset)
 349{
 350	int status;
 351	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 352	unsigned int num_clusters = 0;
 353	unsigned int ext_flags = 0;
 354
 355	/*
 356	 * If the new offset is aligned to the range of the cluster, there is
 357	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
 358	 * CoW either.
 359	 */
 360	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
 361		return 0;
 362
 363	status = ocfs2_get_clusters(inode, cpos, &phys,
 364				    &num_clusters, &ext_flags);
 365	if (status) {
 366		mlog_errno(status);
 367		goto out;
 368	}
 369
 370	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
 371		goto out;
 372
 373	return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
 374
 375out:
 376	return status;
 377}
 378
 379static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
 380				     struct inode *inode,
 381				     struct buffer_head *fe_bh,
 382				     u64 new_i_size)
 383{
 384	int status;
 385	handle_t *handle;
 386	struct ocfs2_dinode *di;
 387	u64 cluster_bytes;
 388
 389	/*
 390	 * We need to CoW the cluster contains the offset if it is reflinked
 391	 * since we will call ocfs2_zero_range_for_truncate later which will
 392	 * write "0" from offset to the end of the cluster.
 393	 */
 394	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
 395	if (status) {
 396		mlog_errno(status);
 397		return status;
 398	}
 399
 400	/* TODO: This needs to actually orphan the inode in this
 401	 * transaction. */
 402
 403	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 404	if (IS_ERR(handle)) {
 405		status = PTR_ERR(handle);
 406		mlog_errno(status);
 407		goto out;
 408	}
 409
 410	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
 411					 OCFS2_JOURNAL_ACCESS_WRITE);
 412	if (status < 0) {
 413		mlog_errno(status);
 414		goto out_commit;
 415	}
 416
 417	/*
 418	 * Do this before setting i_size.
 419	 */
 420	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
 421	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
 422					       cluster_bytes);
 423	if (status) {
 424		mlog_errno(status);
 425		goto out_commit;
 426	}
 427
 428	i_size_write(inode, new_i_size);
 429	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 430
 431	di = (struct ocfs2_dinode *) fe_bh->b_data;
 432	di->i_size = cpu_to_le64(new_i_size);
 433	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
 434	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 
 435
 436	ocfs2_journal_dirty(handle, fe_bh);
 437
 438out_commit:
 439	ocfs2_commit_trans(osb, handle);
 440out:
 441	return status;
 442}
 443
 444static int ocfs2_truncate_file(struct inode *inode,
 445			       struct buffer_head *di_bh,
 446			       u64 new_i_size)
 447{
 448	int status = 0;
 449	struct ocfs2_dinode *fe = NULL;
 450	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 451
 452	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
 453	 * already validated it */
 454	fe = (struct ocfs2_dinode *) di_bh->b_data;
 455
 456	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
 457				  (unsigned long long)le64_to_cpu(fe->i_size),
 458				  (unsigned long long)new_i_size);
 459
 460	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
 461			"Inode %llu, inode i_size = %lld != di "
 462			"i_size = %llu, i_flags = 0x%x\n",
 463			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 464			i_size_read(inode),
 465			(unsigned long long)le64_to_cpu(fe->i_size),
 466			le32_to_cpu(fe->i_flags));
 467
 468	if (new_i_size > le64_to_cpu(fe->i_size)) {
 469		trace_ocfs2_truncate_file_error(
 470			(unsigned long long)le64_to_cpu(fe->i_size),
 471			(unsigned long long)new_i_size);
 472		status = -EINVAL;
 473		mlog_errno(status);
 474		goto bail;
 475	}
 476
 477	/* lets handle the simple truncate cases before doing any more
 478	 * cluster locking. */
 479	if (new_i_size == le64_to_cpu(fe->i_size))
 480		goto bail;
 481
 482	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 483
 484	ocfs2_resv_discard(&osb->osb_la_resmap,
 485			   &OCFS2_I(inode)->ip_la_data_resv);
 486
 487	/*
 488	 * The inode lock forced other nodes to sync and drop their
 489	 * pages, which (correctly) happens even if we have a truncate
 490	 * without allocation change - ocfs2 cluster sizes can be much
 491	 * greater than page size, so we have to truncate them
 492	 * anyway.
 493	 */
 494	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
 495	truncate_inode_pages(inode->i_mapping, new_i_size);
 496
 497	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 
 
 
 498		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
 499					       i_size_read(inode), 1);
 500		if (status)
 501			mlog_errno(status);
 502
 503		goto bail_unlock_sem;
 504	}
 505
 506	/* alright, we're going to need to do a full blown alloc size
 507	 * change. Orphan the inode so that recovery can complete the
 508	 * truncate if necessary. This does the task of marking
 509	 * i_size. */
 510	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
 511	if (status < 0) {
 512		mlog_errno(status);
 513		goto bail_unlock_sem;
 514	}
 515
 
 
 
 516	status = ocfs2_commit_truncate(osb, inode, di_bh);
 517	if (status < 0) {
 518		mlog_errno(status);
 519		goto bail_unlock_sem;
 520	}
 521
 522	/* TODO: orphan dir cleanup here. */
 523bail_unlock_sem:
 524	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 525
 526bail:
 527	if (!status && OCFS2_I(inode)->ip_clusters == 0)
 528		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
 529
 530	return status;
 531}
 532
 533/*
 534 * extend file allocation only here.
 535 * we'll update all the disk stuff, and oip->alloc_size
 536 *
 537 * expect stuff to be locked, a transaction started and enough data /
 538 * metadata reservations in the contexts.
 539 *
 540 * Will return -EAGAIN, and a reason if a restart is needed.
 541 * If passed in, *reason will always be set, even in error.
 542 */
 543int ocfs2_add_inode_data(struct ocfs2_super *osb,
 544			 struct inode *inode,
 545			 u32 *logical_offset,
 546			 u32 clusters_to_add,
 547			 int mark_unwritten,
 548			 struct buffer_head *fe_bh,
 549			 handle_t *handle,
 550			 struct ocfs2_alloc_context *data_ac,
 551			 struct ocfs2_alloc_context *meta_ac,
 552			 enum ocfs2_alloc_restarted *reason_ret)
 553{
 554	int ret;
 555	struct ocfs2_extent_tree et;
 556
 557	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
 558	ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
 559					  clusters_to_add, mark_unwritten,
 560					  data_ac, meta_ac, reason_ret);
 561
 562	return ret;
 563}
 564
 565static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
 566				     u32 clusters_to_add, int mark_unwritten)
 567{
 568	int status = 0;
 569	int restart_func = 0;
 570	int credits;
 571	u32 prev_clusters;
 572	struct buffer_head *bh = NULL;
 573	struct ocfs2_dinode *fe = NULL;
 574	handle_t *handle = NULL;
 575	struct ocfs2_alloc_context *data_ac = NULL;
 576	struct ocfs2_alloc_context *meta_ac = NULL;
 577	enum ocfs2_alloc_restarted why;
 578	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 579	struct ocfs2_extent_tree et;
 580	int did_quota = 0;
 581
 582	/*
 583	 * This function only exists for file systems which don't
 584	 * support holes.
 585	 */
 586	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
 587
 588	status = ocfs2_read_inode_block(inode, &bh);
 589	if (status < 0) {
 590		mlog_errno(status);
 591		goto leave;
 592	}
 593	fe = (struct ocfs2_dinode *) bh->b_data;
 594
 595restart_all:
 596	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 597
 598	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
 599	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
 600				       &data_ac, &meta_ac);
 601	if (status) {
 602		mlog_errno(status);
 603		goto leave;
 604	}
 605
 606	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
 607					    clusters_to_add);
 608	handle = ocfs2_start_trans(osb, credits);
 609	if (IS_ERR(handle)) {
 610		status = PTR_ERR(handle);
 611		handle = NULL;
 612		mlog_errno(status);
 613		goto leave;
 614	}
 615
 616restarted_transaction:
 617	trace_ocfs2_extend_allocation(
 618		(unsigned long long)OCFS2_I(inode)->ip_blkno,
 619		(unsigned long long)i_size_read(inode),
 620		le32_to_cpu(fe->i_clusters), clusters_to_add,
 621		why, restart_func);
 622
 623	status = dquot_alloc_space_nodirty(inode,
 624			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 625	if (status)
 626		goto leave;
 627	did_quota = 1;
 628
 629	/* reserve a write to the file entry early on - that we if we
 630	 * run out of credits in the allocation path, we can still
 631	 * update i_size. */
 632	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 633					 OCFS2_JOURNAL_ACCESS_WRITE);
 634	if (status < 0) {
 635		mlog_errno(status);
 636		goto leave;
 637	}
 638
 639	prev_clusters = OCFS2_I(inode)->ip_clusters;
 640
 641	status = ocfs2_add_inode_data(osb,
 642				      inode,
 643				      &logical_start,
 644				      clusters_to_add,
 645				      mark_unwritten,
 646				      bh,
 647				      handle,
 648				      data_ac,
 649				      meta_ac,
 650				      &why);
 651	if ((status < 0) && (status != -EAGAIN)) {
 652		if (status != -ENOSPC)
 653			mlog_errno(status);
 654		goto leave;
 655	}
 656
 657	ocfs2_journal_dirty(handle, bh);
 658
 659	spin_lock(&OCFS2_I(inode)->ip_lock);
 660	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
 661	spin_unlock(&OCFS2_I(inode)->ip_lock);
 662	/* Release unused quota reservation */
 663	dquot_free_space(inode,
 664			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 665	did_quota = 0;
 666
 667	if (why != RESTART_NONE && clusters_to_add) {
 668		if (why == RESTART_META) {
 669			restart_func = 1;
 670			status = 0;
 671		} else {
 672			BUG_ON(why != RESTART_TRANS);
 673
 674			/* TODO: This can be more intelligent. */
 675			credits = ocfs2_calc_extend_credits(osb->sb,
 676							    &fe->id2.i_list,
 677							    clusters_to_add);
 678			status = ocfs2_extend_trans(handle, credits);
 679			if (status < 0) {
 680				/* handle still has to be committed at
 681				 * this point. */
 682				status = -ENOMEM;
 683				mlog_errno(status);
 684				goto leave;
 685			}
 686			goto restarted_transaction;
 687		}
 688	}
 689
 690	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
 691	     le32_to_cpu(fe->i_clusters),
 692	     (unsigned long long)le64_to_cpu(fe->i_size),
 693	     OCFS2_I(inode)->ip_clusters,
 694	     (unsigned long long)i_size_read(inode));
 695
 696leave:
 697	if (status < 0 && did_quota)
 698		dquot_free_space(inode,
 699			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 700	if (handle) {
 701		ocfs2_commit_trans(osb, handle);
 702		handle = NULL;
 703	}
 704	if (data_ac) {
 705		ocfs2_free_alloc_context(data_ac);
 706		data_ac = NULL;
 707	}
 708	if (meta_ac) {
 709		ocfs2_free_alloc_context(meta_ac);
 710		meta_ac = NULL;
 711	}
 712	if ((!status) && restart_func) {
 713		restart_func = 0;
 714		goto restart_all;
 715	}
 716	brelse(bh);
 717	bh = NULL;
 718
 719	return status;
 720}
 721
 722/*
 723 * While a write will already be ordering the data, a truncate will not.
 724 * Thus, we need to explicitly order the zeroed pages.
 725 */
 726static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
 
 
 
 727{
 728	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 729	handle_t *handle = NULL;
 730	int ret = 0;
 731
 732	if (!ocfs2_should_order_data(inode))
 733		goto out;
 734
 735	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 736	if (IS_ERR(handle)) {
 737		ret = -ENOMEM;
 738		mlog_errno(ret);
 739		goto out;
 740	}
 741
 742	ret = ocfs2_jbd2_file_inode(handle, inode);
 743	if (ret < 0)
 744		mlog_errno(ret);
 
 
 
 
 
 
 
 
 745
 746out:
 747	if (ret) {
 748		if (!IS_ERR(handle))
 749			ocfs2_commit_trans(osb, handle);
 750		handle = ERR_PTR(ret);
 751	}
 752	return handle;
 753}
 754
 755/* Some parts of this taken from generic_cont_expand, which turned out
 756 * to be too fragile to do exactly what we need without us having to
 757 * worry about recursive locking in ->write_begin() and ->write_end(). */
 758static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 759				 u64 abs_to)
 760{
 761	struct address_space *mapping = inode->i_mapping;
 762	struct page *page;
 763	unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
 764	handle_t *handle = NULL;
 765	int ret = 0;
 766	unsigned zero_from, zero_to, block_start, block_end;
 
 767
 768	BUG_ON(abs_from >= abs_to);
 769	BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
 770	BUG_ON(abs_from & (inode->i_blkbits - 1));
 771
 772	page = find_or_create_page(mapping, index, GFP_NOFS);
 773	if (!page) {
 774		ret = -ENOMEM;
 775		mlog_errno(ret);
 
 776		goto out;
 777	}
 778
 
 
 
 
 
 
 
 
 779	/* Get the offsets within the page that we want to zero */
 780	zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
 781	zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
 782	if (!zero_to)
 783		zero_to = PAGE_CACHE_SIZE;
 784
 785	trace_ocfs2_write_zero_page(
 786			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 787			(unsigned long long)abs_from,
 788			(unsigned long long)abs_to,
 789			index, zero_from, zero_to);
 790
 791	/* We know that zero_from is block aligned */
 792	for (block_start = zero_from; block_start < zero_to;
 793	     block_start = block_end) {
 794		block_end = block_start + (1 << inode->i_blkbits);
 795
 796		/*
 797		 * block_start is block-aligned.  Bump it by one to force
 798		 * __block_write_begin and block_commit_write to zero the
 799		 * whole block.
 800		 */
 801		ret = __block_write_begin(page, block_start + 1, 0,
 802					  ocfs2_get_block);
 803		if (ret < 0) {
 804			mlog_errno(ret);
 805			goto out_unlock;
 806		}
 807
 808		if (!handle) {
 809			handle = ocfs2_zero_start_ordered_transaction(inode);
 810			if (IS_ERR(handle)) {
 811				ret = PTR_ERR(handle);
 812				handle = NULL;
 813				break;
 814			}
 815		}
 816
 817		/* must not update i_size! */
 818		ret = block_commit_write(page, block_start + 1,
 819					 block_start + 1);
 820		if (ret < 0)
 821			mlog_errno(ret);
 822		else
 823			ret = 0;
 824	}
 825
 826	if (handle)
 827		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828
 829out_unlock:
 830	unlock_page(page);
 831	page_cache_release(page);
 
 
 
 832out:
 833	return ret;
 834}
 835
 836/*
 837 * Find the next range to zero.  We do this in terms of bytes because
 838 * that's what ocfs2_zero_extend() wants, and it is dealing with the
 839 * pagecache.  We may return multiple extents.
 840 *
 841 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
 842 * needs to be zeroed.  range_start and range_end return the next zeroing
 843 * range.  A subsequent call should pass the previous range_end as its
 844 * zero_start.  If range_end is 0, there's nothing to do.
 845 *
 846 * Unwritten extents are skipped over.  Refcounted extents are CoWd.
 847 */
 848static int ocfs2_zero_extend_get_range(struct inode *inode,
 849				       struct buffer_head *di_bh,
 850				       u64 zero_start, u64 zero_end,
 851				       u64 *range_start, u64 *range_end)
 852{
 853	int rc = 0, needs_cow = 0;
 854	u32 p_cpos, zero_clusters = 0;
 855	u32 zero_cpos =
 856		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 857	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
 858	unsigned int num_clusters = 0;
 859	unsigned int ext_flags = 0;
 860
 861	while (zero_cpos < last_cpos) {
 862		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
 863					&num_clusters, &ext_flags);
 864		if (rc) {
 865			mlog_errno(rc);
 866			goto out;
 867		}
 868
 869		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
 870			zero_clusters = num_clusters;
 871			if (ext_flags & OCFS2_EXT_REFCOUNTED)
 872				needs_cow = 1;
 873			break;
 874		}
 875
 876		zero_cpos += num_clusters;
 877	}
 878	if (!zero_clusters) {
 879		*range_end = 0;
 880		goto out;
 881	}
 882
 883	while ((zero_cpos + zero_clusters) < last_cpos) {
 884		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
 885					&p_cpos, &num_clusters,
 886					&ext_flags);
 887		if (rc) {
 888			mlog_errno(rc);
 889			goto out;
 890		}
 891
 892		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
 893			break;
 894		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 895			needs_cow = 1;
 896		zero_clusters += num_clusters;
 897	}
 898	if ((zero_cpos + zero_clusters) > last_cpos)
 899		zero_clusters = last_cpos - zero_cpos;
 900
 901	if (needs_cow) {
 902		rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
 903					zero_clusters, UINT_MAX);
 904		if (rc) {
 905			mlog_errno(rc);
 906			goto out;
 907		}
 908	}
 909
 910	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
 911	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
 912					     zero_cpos + zero_clusters);
 913
 914out:
 915	return rc;
 916}
 917
 918/*
 919 * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
 920 * has made sure that the entire range needs zeroing.
 921 */
 922static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
 923				   u64 range_end)
 924{
 925	int rc = 0;
 926	u64 next_pos;
 927	u64 zero_pos = range_start;
 928
 929	trace_ocfs2_zero_extend_range(
 930			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 931			(unsigned long long)range_start,
 932			(unsigned long long)range_end);
 933	BUG_ON(range_start >= range_end);
 934
 935	while (zero_pos < range_end) {
 936		next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
 937		if (next_pos > range_end)
 938			next_pos = range_end;
 939		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
 940		if (rc < 0) {
 941			mlog_errno(rc);
 942			break;
 943		}
 944		zero_pos = next_pos;
 945
 946		/*
 947		 * Very large extends have the potential to lock up
 948		 * the cpu for extended periods of time.
 949		 */
 950		cond_resched();
 951	}
 952
 953	return rc;
 954}
 955
 956int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
 957		      loff_t zero_to_size)
 958{
 959	int ret = 0;
 960	u64 zero_start, range_start = 0, range_end = 0;
 961	struct super_block *sb = inode->i_sb;
 962
 963	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
 964	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
 965				(unsigned long long)zero_start,
 966				(unsigned long long)i_size_read(inode));
 967	while (zero_start < zero_to_size) {
 968		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
 969						  zero_to_size,
 970						  &range_start,
 971						  &range_end);
 972		if (ret) {
 973			mlog_errno(ret);
 974			break;
 975		}
 976		if (!range_end)
 977			break;
 978		/* Trim the ends */
 979		if (range_start < zero_start)
 980			range_start = zero_start;
 981		if (range_end > zero_to_size)
 982			range_end = zero_to_size;
 983
 984		ret = ocfs2_zero_extend_range(inode, range_start,
 985					      range_end);
 986		if (ret) {
 987			mlog_errno(ret);
 988			break;
 989		}
 990		zero_start = range_end;
 991	}
 992
 993	return ret;
 994}
 995
 996int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
 997			  u64 new_i_size, u64 zero_to)
 998{
 999	int ret;
1000	u32 clusters_to_add;
1001	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1002
1003	/*
1004	 * Only quota files call this without a bh, and they can't be
1005	 * refcounted.
1006	 */
1007	BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
1008	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1009
1010	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1011	if (clusters_to_add < oi->ip_clusters)
1012		clusters_to_add = 0;
1013	else
1014		clusters_to_add -= oi->ip_clusters;
1015
1016	if (clusters_to_add) {
1017		ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1018						clusters_to_add, 0);
1019		if (ret) {
1020			mlog_errno(ret);
1021			goto out;
1022		}
1023	}
1024
1025	/*
1026	 * Call this even if we don't add any clusters to the tree. We
1027	 * still need to zero the area between the old i_size and the
1028	 * new i_size.
1029	 */
1030	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1031	if (ret < 0)
1032		mlog_errno(ret);
1033
1034out:
1035	return ret;
1036}
1037
1038static int ocfs2_extend_file(struct inode *inode,
1039			     struct buffer_head *di_bh,
1040			     u64 new_i_size)
1041{
1042	int ret = 0;
1043	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1044
1045	BUG_ON(!di_bh);
1046
1047	/* setattr sometimes calls us like this. */
1048	if (new_i_size == 0)
1049		goto out;
1050
1051	if (i_size_read(inode) == new_i_size)
1052		goto out;
1053	BUG_ON(new_i_size < i_size_read(inode));
1054
1055	/*
1056	 * The alloc sem blocks people in read/write from reading our
1057	 * allocation until we're done changing it. We depend on
1058	 * i_mutex to block other extend/truncate calls while we're
1059	 * here.  We even have to hold it for sparse files because there
1060	 * might be some tail zeroing.
1061	 */
1062	down_write(&oi->ip_alloc_sem);
1063
1064	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1065		/*
1066		 * We can optimize small extends by keeping the inodes
1067		 * inline data.
1068		 */
1069		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1070			up_write(&oi->ip_alloc_sem);
1071			goto out_update_size;
1072		}
1073
1074		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1075		if (ret) {
1076			up_write(&oi->ip_alloc_sem);
1077			mlog_errno(ret);
1078			goto out;
1079		}
1080	}
1081
1082	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1083		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1084	else
1085		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1086					    new_i_size);
1087
1088	up_write(&oi->ip_alloc_sem);
1089
1090	if (ret < 0) {
1091		mlog_errno(ret);
1092		goto out;
1093	}
1094
1095out_update_size:
1096	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1097	if (ret < 0)
1098		mlog_errno(ret);
1099
1100out:
1101	return ret;
1102}
1103
1104int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
 
1105{
1106	int status = 0, size_change;
1107	struct inode *inode = dentry->d_inode;
 
1108	struct super_block *sb = inode->i_sb;
1109	struct ocfs2_super *osb = OCFS2_SB(sb);
1110	struct buffer_head *bh = NULL;
1111	handle_t *handle = NULL;
1112	struct dquot *transfer_to[MAXQUOTAS] = { };
1113	int qtype;
 
 
1114
1115	trace_ocfs2_setattr(inode, dentry,
1116			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1117			    dentry->d_name.len, dentry->d_name.name,
1118			    attr->ia_valid, attr->ia_mode,
1119			    attr->ia_uid, attr->ia_gid);
 
 
 
 
1120
1121	/* ensuring we don't even attempt to truncate a symlink */
1122	if (S_ISLNK(inode->i_mode))
1123		attr->ia_valid &= ~ATTR_SIZE;
1124
1125#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1126			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1127	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1128		return 0;
1129
1130	status = inode_change_ok(inode, attr);
1131	if (status)
1132		return status;
1133
1134	if (is_quota_modification(inode, attr))
1135		dquot_initialize(inode);
 
 
 
1136	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1137	if (size_change) {
 
 
 
 
 
 
 
1138		status = ocfs2_rw_lock(inode, 1);
1139		if (status < 0) {
1140			mlog_errno(status);
1141			goto bail;
1142		}
1143	}
1144
1145	status = ocfs2_inode_lock(inode, &bh, 1);
1146	if (status < 0) {
1147		if (status != -ENOENT)
1148			mlog_errno(status);
1149		goto bail_unlock_rw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150	}
 
1151
1152	if (size_change && attr->ia_size != i_size_read(inode)) {
1153		status = inode_newsize_ok(inode, attr->ia_size);
1154		if (status)
1155			goto bail_unlock;
1156
1157		inode_dio_wait(inode);
1158
1159		if (i_size_read(inode) > attr->ia_size) {
1160			if (ocfs2_should_order_data(inode)) {
1161				status = ocfs2_begin_ordered_truncate(inode,
1162								      attr->ia_size);
1163				if (status)
1164					goto bail_unlock;
1165			}
1166			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1167		} else
1168			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1169		if (status < 0) {
1170			if (status != -ENOSPC)
1171				mlog_errno(status);
1172			status = -ENOSPC;
1173			goto bail_unlock;
1174		}
1175	}
1176
1177	if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
1178	    (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
1179		/*
1180		 * Gather pointers to quota structures so that allocation /
1181		 * freeing of quota structures happens here and not inside
1182		 * dquot_transfer() where we have problems with lock ordering
1183		 */
1184		if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
1185		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1186		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1187			transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1188						      USRQUOTA);
1189			if (!transfer_to[USRQUOTA]) {
1190				status = -ESRCH;
1191				goto bail_unlock;
1192			}
1193		}
1194		if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
1195		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1196		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1197			transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1198						      GRPQUOTA);
1199			if (!transfer_to[GRPQUOTA]) {
1200				status = -ESRCH;
1201				goto bail_unlock;
1202			}
1203		}
 
1204		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1205					   2 * ocfs2_quota_trans_credits(sb));
1206		if (IS_ERR(handle)) {
1207			status = PTR_ERR(handle);
1208			mlog_errno(status);
1209			goto bail_unlock;
1210		}
1211		status = __dquot_transfer(inode, transfer_to);
1212		if (status < 0)
1213			goto bail_commit;
1214	} else {
 
1215		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1216		if (IS_ERR(handle)) {
1217			status = PTR_ERR(handle);
1218			mlog_errno(status);
1219			goto bail_unlock;
1220		}
1221	}
1222
1223	/*
1224	 * This will intentionally not wind up calling truncate_setsize(),
1225	 * since all the work for a size change has been done above.
1226	 * Otherwise, we could get into problems with truncate as
1227	 * ip_alloc_sem is used there to protect against i_size
1228	 * changes.
1229	 *
1230	 * XXX: this means the conditional below can probably be removed.
1231	 */
1232	if ((attr->ia_valid & ATTR_SIZE) &&
1233	    attr->ia_size != i_size_read(inode)) {
1234		status = vmtruncate(inode, attr->ia_size);
1235		if (status) {
1236			mlog_errno(status);
1237			goto bail_commit;
1238		}
1239	}
1240
1241	setattr_copy(inode, attr);
1242	mark_inode_dirty(inode);
1243
1244	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1245	if (status < 0)
1246		mlog_errno(status);
1247
1248bail_commit:
1249	ocfs2_commit_trans(osb, handle);
 
 
1250bail_unlock:
1251	ocfs2_inode_unlock(inode, 1);
 
 
 
1252bail_unlock_rw:
1253	if (size_change)
1254		ocfs2_rw_unlock(inode, 1);
1255bail:
1256	brelse(bh);
1257
1258	/* Release quota pointers in case we acquired them */
1259	for (qtype = 0; qtype < MAXQUOTAS; qtype++)
1260		dqput(transfer_to[qtype]);
1261
1262	if (!status && attr->ia_valid & ATTR_MODE) {
1263		status = ocfs2_acl_chmod(inode);
1264		if (status < 0)
1265			mlog_errno(status);
1266	}
 
 
1267
 
1268	return status;
1269}
1270
1271int ocfs2_getattr(struct vfsmount *mnt,
1272		  struct dentry *dentry,
1273		  struct kstat *stat)
1274{
1275	struct inode *inode = dentry->d_inode;
1276	struct super_block *sb = dentry->d_inode->i_sb;
1277	struct ocfs2_super *osb = sb->s_fs_info;
1278	int err;
1279
1280	err = ocfs2_inode_revalidate(dentry);
1281	if (err) {
1282		if (err != -ENOENT)
1283			mlog_errno(err);
1284		goto bail;
1285	}
1286
1287	generic_fillattr(inode, stat);
 
 
 
 
 
 
 
 
1288
1289	/* We set the blksize from the cluster size for performance */
1290	stat->blksize = osb->s_clustersize;
1291
1292bail:
1293	return err;
1294}
1295
1296int ocfs2_permission(struct inode *inode, int mask)
 
1297{
1298	int ret;
 
1299
1300	if (mask & MAY_NOT_BLOCK)
1301		return -ECHILD;
1302
1303	ret = ocfs2_inode_lock(inode, NULL, 0);
1304	if (ret) {
1305		if (ret != -ENOENT)
1306			mlog_errno(ret);
1307		goto out;
 
 
 
 
 
 
 
 
 
 
1308	}
1309
1310	ret = generic_permission(inode, mask);
1311
1312	ocfs2_inode_unlock(inode, 0);
1313out:
1314	return ret;
1315}
1316
1317static int __ocfs2_write_remove_suid(struct inode *inode,
1318				     struct buffer_head *bh)
1319{
1320	int ret;
1321	handle_t *handle;
1322	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1323	struct ocfs2_dinode *di;
1324
1325	trace_ocfs2_write_remove_suid(
1326			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1327			inode->i_mode);
1328
1329	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1330	if (IS_ERR(handle)) {
1331		ret = PTR_ERR(handle);
1332		mlog_errno(ret);
1333		goto out;
1334	}
1335
1336	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1337				      OCFS2_JOURNAL_ACCESS_WRITE);
1338	if (ret < 0) {
1339		mlog_errno(ret);
1340		goto out_trans;
1341	}
1342
1343	inode->i_mode &= ~S_ISUID;
1344	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1345		inode->i_mode &= ~S_ISGID;
1346
1347	di = (struct ocfs2_dinode *) bh->b_data;
1348	di->i_mode = cpu_to_le16(inode->i_mode);
 
1349
1350	ocfs2_journal_dirty(handle, bh);
1351
1352out_trans:
1353	ocfs2_commit_trans(osb, handle);
1354out:
1355	return ret;
1356}
1357
1358/*
1359 * Will look for holes and unwritten extents in the range starting at
1360 * pos for count bytes (inclusive).
1361 */
1362static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1363				       size_t count)
1364{
1365	int ret = 0;
1366	unsigned int extent_flags;
1367	u32 cpos, clusters, extent_len, phys_cpos;
1368	struct super_block *sb = inode->i_sb;
1369
1370	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1371	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1372
1373	while (clusters) {
1374		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1375					 &extent_flags);
1376		if (ret < 0) {
1377			mlog_errno(ret);
1378			goto out;
1379		}
1380
1381		if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1382			ret = 1;
1383			break;
1384		}
1385
1386		if (extent_len > clusters)
1387			extent_len = clusters;
1388
1389		clusters -= extent_len;
1390		cpos += extent_len;
1391	}
1392out:
1393	return ret;
1394}
1395
1396static int ocfs2_write_remove_suid(struct inode *inode)
1397{
1398	int ret;
1399	struct buffer_head *bh = NULL;
1400
1401	ret = ocfs2_read_inode_block(inode, &bh);
1402	if (ret < 0) {
1403		mlog_errno(ret);
1404		goto out;
1405	}
1406
1407	ret =  __ocfs2_write_remove_suid(inode, bh);
1408out:
1409	brelse(bh);
1410	return ret;
1411}
1412
1413/*
1414 * Allocate enough extents to cover the region starting at byte offset
1415 * start for len bytes. Existing extents are skipped, any extents
1416 * added are marked as "unwritten".
1417 */
1418static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1419					    u64 start, u64 len)
1420{
1421	int ret;
1422	u32 cpos, phys_cpos, clusters, alloc_size;
1423	u64 end = start + len;
1424	struct buffer_head *di_bh = NULL;
1425
1426	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1427		ret = ocfs2_read_inode_block(inode, &di_bh);
1428		if (ret) {
1429			mlog_errno(ret);
1430			goto out;
1431		}
1432
1433		/*
1434		 * Nothing to do if the requested reservation range
1435		 * fits within the inode.
1436		 */
1437		if (ocfs2_size_fits_inline_data(di_bh, end))
1438			goto out;
1439
1440		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1441		if (ret) {
1442			mlog_errno(ret);
1443			goto out;
1444		}
1445	}
1446
1447	/*
1448	 * We consider both start and len to be inclusive.
1449	 */
1450	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1451	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1452	clusters -= cpos;
1453
1454	while (clusters) {
1455		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1456					 &alloc_size, NULL);
1457		if (ret) {
1458			mlog_errno(ret);
1459			goto out;
1460		}
1461
1462		/*
1463		 * Hole or existing extent len can be arbitrary, so
1464		 * cap it to our own allocation request.
1465		 */
1466		if (alloc_size > clusters)
1467			alloc_size = clusters;
1468
1469		if (phys_cpos) {
1470			/*
1471			 * We already have an allocation at this
1472			 * region so we can safely skip it.
1473			 */
1474			goto next;
1475		}
1476
1477		ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1478		if (ret) {
1479			if (ret != -ENOSPC)
1480				mlog_errno(ret);
1481			goto out;
1482		}
1483
1484next:
1485		cpos += alloc_size;
1486		clusters -= alloc_size;
1487	}
1488
1489	ret = 0;
1490out:
1491
1492	brelse(di_bh);
1493	return ret;
1494}
1495
1496/*
1497 * Truncate a byte range, avoiding pages within partial clusters. This
1498 * preserves those pages for the zeroing code to write to.
1499 */
1500static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1501					 u64 byte_len)
1502{
1503	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1504	loff_t start, end;
1505	struct address_space *mapping = inode->i_mapping;
1506
1507	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1508	end = byte_start + byte_len;
1509	end = end & ~(osb->s_clustersize - 1);
1510
1511	if (start < end) {
1512		unmap_mapping_range(mapping, start, end - start, 0);
1513		truncate_inode_pages_range(mapping, start, end - 1);
1514	}
1515}
1516
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1517static int ocfs2_zero_partial_clusters(struct inode *inode,
1518				       u64 start, u64 len)
1519{
1520	int ret = 0;
1521	u64 tmpend, end = start + len;
 
1522	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1523	unsigned int csize = osb->s_clustersize;
1524	handle_t *handle;
 
1525
1526	/*
1527	 * The "start" and "end" values are NOT necessarily part of
1528	 * the range whose allocation is being deleted. Rather, this
1529	 * is what the user passed in with the request. We must zero
1530	 * partial clusters here. There's no need to worry about
1531	 * physical allocation - the zeroing code knows to skip holes.
1532	 */
1533	trace_ocfs2_zero_partial_clusters(
1534		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1535		(unsigned long long)start, (unsigned long long)end);
1536
1537	/*
1538	 * If both edges are on a cluster boundary then there's no
1539	 * zeroing required as the region is part of the allocation to
1540	 * be truncated.
1541	 */
1542	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1543		goto out;
1544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1546	if (IS_ERR(handle)) {
1547		ret = PTR_ERR(handle);
1548		mlog_errno(ret);
1549		goto out;
1550	}
1551
1552	/*
1553	 * We want to get the byte offset of the end of the 1st cluster.
1554	 */
1555	tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1556	if (tmpend > end)
1557		tmpend = end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1558
1559	trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1560						 (unsigned long long)tmpend);
1561
1562	ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1563	if (ret)
1564		mlog_errno(ret);
1565
1566	if (tmpend < end) {
1567		/*
1568		 * This may make start and end equal, but the zeroing
1569		 * code will skip any work in that case so there's no
1570		 * need to catch it up here.
1571		 */
1572		start = end & ~(osb->s_clustersize - 1);
1573
1574		trace_ocfs2_zero_partial_clusters_range2(
1575			(unsigned long long)start, (unsigned long long)end);
1576
1577		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1578		if (ret)
1579			mlog_errno(ret);
1580	}
 
1581
1582	ocfs2_commit_trans(osb, handle);
1583out:
1584	return ret;
1585}
1586
1587static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1588{
1589	int i;
1590	struct ocfs2_extent_rec *rec = NULL;
1591
1592	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1593
1594		rec = &el->l_recs[i];
1595
1596		if (le32_to_cpu(rec->e_cpos) < pos)
1597			break;
1598	}
1599
1600	return i;
1601}
1602
1603/*
1604 * Helper to calculate the punching pos and length in one run, we handle the
1605 * following three cases in order:
1606 *
1607 * - remove the entire record
1608 * - remove a partial record
1609 * - no record needs to be removed (hole-punching completed)
1610*/
1611static void ocfs2_calc_trunc_pos(struct inode *inode,
1612				 struct ocfs2_extent_list *el,
1613				 struct ocfs2_extent_rec *rec,
1614				 u32 trunc_start, u32 *trunc_cpos,
1615				 u32 *trunc_len, u32 *trunc_end,
1616				 u64 *blkno, int *done)
1617{
1618	int ret = 0;
1619	u32 coff, range;
1620
1621	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1622
1623	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1624		/*
1625		 * remove an entire extent record.
1626		 */
1627		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1628		/*
1629		 * Skip holes if any.
1630		 */
1631		if (range < *trunc_end)
1632			*trunc_end = range;
1633		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1634		*blkno = le64_to_cpu(rec->e_blkno);
1635		*trunc_end = le32_to_cpu(rec->e_cpos);
1636	} else if (range > trunc_start) {
1637		/*
1638		 * remove a partial extent record, which means we're
1639		 * removing the last extent record.
1640		 */
1641		*trunc_cpos = trunc_start;
1642		/*
1643		 * skip hole if any.
1644		 */
1645		if (range < *trunc_end)
1646			*trunc_end = range;
1647		*trunc_len = *trunc_end - trunc_start;
1648		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1649		*blkno = le64_to_cpu(rec->e_blkno) +
1650				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1651		*trunc_end = trunc_start;
1652	} else {
1653		/*
1654		 * It may have two following possibilities:
1655		 *
1656		 * - last record has been removed
1657		 * - trunc_start was within a hole
1658		 *
1659		 * both two cases mean the completion of hole punching.
1660		 */
1661		ret = 1;
1662	}
1663
1664	*done = ret;
1665}
1666
1667static int ocfs2_remove_inode_range(struct inode *inode,
1668				    struct buffer_head *di_bh, u64 byte_start,
1669				    u64 byte_len)
1670{
1671	int ret = 0, flags = 0, done = 0, i;
1672	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1673	u32 cluster_in_el;
1674	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1675	struct ocfs2_cached_dealloc_ctxt dealloc;
1676	struct address_space *mapping = inode->i_mapping;
1677	struct ocfs2_extent_tree et;
1678	struct ocfs2_path *path = NULL;
1679	struct ocfs2_extent_list *el = NULL;
1680	struct ocfs2_extent_rec *rec = NULL;
1681	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1682	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1683
1684	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1685	ocfs2_init_dealloc_ctxt(&dealloc);
1686
1687	trace_ocfs2_remove_inode_range(
1688			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1689			(unsigned long long)byte_start,
1690			(unsigned long long)byte_len);
1691
1692	if (byte_len == 0)
1693		return 0;
1694
1695	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 
 
 
 
 
 
 
 
1696		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1697					    byte_start + byte_len, 0);
1698		if (ret) {
1699			mlog_errno(ret);
1700			goto out;
1701		}
1702		/*
1703		 * There's no need to get fancy with the page cache
1704		 * truncate of an inline-data inode. We're talking
1705		 * about less than a page here, which will be cached
1706		 * in the dinode buffer anyway.
1707		 */
1708		unmap_mapping_range(mapping, 0, 0, 0);
1709		truncate_inode_pages(mapping, 0);
1710		goto out;
1711	}
1712
1713	/*
1714	 * For reflinks, we may need to CoW 2 clusters which might be
1715	 * partially zero'd later, if hole's start and end offset were
1716	 * within one cluster(means is not exactly aligned to clustersize).
1717	 */
1718
1719	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1720
1721		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1722		if (ret) {
1723			mlog_errno(ret);
1724			goto out;
1725		}
1726
1727		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1728		if (ret) {
1729			mlog_errno(ret);
1730			goto out;
1731		}
1732	}
1733
1734	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1735	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1736	cluster_in_el = trunc_end;
1737
1738	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1739	if (ret) {
1740		mlog_errno(ret);
1741		goto out;
1742	}
1743
1744	path = ocfs2_new_path_from_et(&et);
1745	if (!path) {
1746		ret = -ENOMEM;
1747		mlog_errno(ret);
1748		goto out;
1749	}
1750
1751	while (trunc_end > trunc_start) {
1752
1753		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1754				      cluster_in_el);
1755		if (ret) {
1756			mlog_errno(ret);
1757			goto out;
1758		}
1759
1760		el = path_leaf_el(path);
1761
1762		i = ocfs2_find_rec(el, trunc_end);
1763		/*
1764		 * Need to go to previous extent block.
1765		 */
1766		if (i < 0) {
1767			if (path->p_tree_depth == 0)
1768				break;
1769
1770			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1771							    path,
1772							    &cluster_in_el);
1773			if (ret) {
1774				mlog_errno(ret);
1775				goto out;
1776			}
1777
1778			/*
1779			 * We've reached the leftmost extent block,
1780			 * it's safe to leave.
1781			 */
1782			if (cluster_in_el == 0)
1783				break;
1784
1785			/*
1786			 * The 'pos' searched for previous extent block is
1787			 * always one cluster less than actual trunc_end.
1788			 */
1789			trunc_end = cluster_in_el + 1;
1790
1791			ocfs2_reinit_path(path, 1);
1792
1793			continue;
1794
1795		} else
1796			rec = &el->l_recs[i];
1797
1798		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1799				     &trunc_len, &trunc_end, &blkno, &done);
1800		if (done)
1801			break;
1802
1803		flags = rec->e_flags;
1804		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1805
1806		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1807					       phys_cpos, trunc_len, flags,
1808					       &dealloc, refcount_loc);
1809		if (ret < 0) {
1810			mlog_errno(ret);
1811			goto out;
1812		}
1813
1814		cluster_in_el = trunc_end;
1815
1816		ocfs2_reinit_path(path, 1);
1817	}
1818
1819	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1820
1821out:
 
1822	ocfs2_schedule_truncate_log_flush(osb, 1);
1823	ocfs2_run_deallocs(osb, &dealloc);
1824
1825	return ret;
1826}
1827
1828/*
1829 * Parts of this function taken from xfs_change_file_space()
1830 */
1831static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1832				     loff_t f_pos, unsigned int cmd,
1833				     struct ocfs2_space_resv *sr,
1834				     int change_size)
1835{
1836	int ret;
1837	s64 llen;
1838	loff_t size;
1839	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1840	struct buffer_head *di_bh = NULL;
1841	handle_t *handle;
1842	unsigned long long max_off = inode->i_sb->s_maxbytes;
1843
1844	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1845		return -EROFS;
1846
1847	mutex_lock(&inode->i_mutex);
1848
 
 
1849	/*
1850	 * This prevents concurrent writes on other nodes
1851	 */
1852	ret = ocfs2_rw_lock(inode, 1);
1853	if (ret) {
1854		mlog_errno(ret);
1855		goto out;
1856	}
1857
1858	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1859	if (ret) {
1860		mlog_errno(ret);
1861		goto out_rw_unlock;
1862	}
1863
1864	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1865		ret = -EPERM;
1866		goto out_inode_unlock;
1867	}
1868
1869	switch (sr->l_whence) {
1870	case 0: /*SEEK_SET*/
1871		break;
1872	case 1: /*SEEK_CUR*/
1873		sr->l_start += f_pos;
1874		break;
1875	case 2: /*SEEK_END*/
1876		sr->l_start += i_size_read(inode);
1877		break;
1878	default:
1879		ret = -EINVAL;
1880		goto out_inode_unlock;
1881	}
1882	sr->l_whence = 0;
1883
1884	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1885
1886	if (sr->l_start < 0
1887	    || sr->l_start > max_off
1888	    || (sr->l_start + llen) < 0
1889	    || (sr->l_start + llen) > max_off) {
1890		ret = -EINVAL;
1891		goto out_inode_unlock;
1892	}
1893	size = sr->l_start + sr->l_len;
1894
1895	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
 
1896		if (sr->l_len <= 0) {
1897			ret = -EINVAL;
1898			goto out_inode_unlock;
1899		}
1900	}
1901
1902	if (file && should_remove_suid(file->f_path.dentry)) {
1903		ret = __ocfs2_write_remove_suid(inode, di_bh);
1904		if (ret) {
1905			mlog_errno(ret);
1906			goto out_inode_unlock;
1907		}
1908	}
1909
1910	down_write(&OCFS2_I(inode)->ip_alloc_sem);
1911	switch (cmd) {
1912	case OCFS2_IOC_RESVSP:
1913	case OCFS2_IOC_RESVSP64:
1914		/*
1915		 * This takes unsigned offsets, but the signed ones we
1916		 * pass have been checked against overflow above.
1917		 */
1918		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1919						       sr->l_len);
1920		break;
1921	case OCFS2_IOC_UNRESVSP:
1922	case OCFS2_IOC_UNRESVSP64:
1923		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1924					       sr->l_len);
1925		break;
1926	default:
1927		ret = -EINVAL;
1928	}
 
 
 
 
 
 
 
 
 
1929	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1930	if (ret) {
1931		mlog_errno(ret);
1932		goto out_inode_unlock;
1933	}
1934
1935	/*
1936	 * We update c/mtime for these changes
1937	 */
1938	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1939	if (IS_ERR(handle)) {
1940		ret = PTR_ERR(handle);
1941		mlog_errno(ret);
1942		goto out_inode_unlock;
1943	}
1944
1945	if (change_size && i_size_read(inode) < size)
1946		i_size_write(inode, size);
1947
1948	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1949	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1950	if (ret < 0)
1951		mlog_errno(ret);
1952
 
 
 
1953	ocfs2_commit_trans(osb, handle);
1954
1955out_inode_unlock:
1956	brelse(di_bh);
1957	ocfs2_inode_unlock(inode, 1);
1958out_rw_unlock:
1959	ocfs2_rw_unlock(inode, 1);
1960
1961out:
1962	mutex_unlock(&inode->i_mutex);
1963	return ret;
1964}
1965
1966int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1967			    struct ocfs2_space_resv *sr)
1968{
1969	struct inode *inode = file->f_path.dentry->d_inode;
1970	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
1971
1972	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1973	    !ocfs2_writes_unwritten_extents(osb))
1974		return -ENOTTY;
1975	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1976		 !ocfs2_sparse_alloc(osb))
1977		return -ENOTTY;
1978
1979	if (!S_ISREG(inode->i_mode))
1980		return -EINVAL;
1981
1982	if (!(file->f_mode & FMODE_WRITE))
1983		return -EBADF;
1984
1985	return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
 
 
 
 
 
1986}
1987
1988static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
1989			    loff_t len)
1990{
1991	struct inode *inode = file->f_path.dentry->d_inode;
1992	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1993	struct ocfs2_space_resv sr;
1994	int change_size = 1;
1995	int cmd = OCFS2_IOC_RESVSP64;
 
1996
1997	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1998		return -EOPNOTSUPP;
1999	if (!ocfs2_writes_unwritten_extents(osb))
2000		return -EOPNOTSUPP;
2001
2002	if (mode & FALLOC_FL_KEEP_SIZE)
2003		change_size = 0;
 
 
 
 
 
2004
2005	if (mode & FALLOC_FL_PUNCH_HOLE)
2006		cmd = OCFS2_IOC_UNRESVSP64;
2007
2008	sr.l_whence = 0;
2009	sr.l_start = (s64)offset;
2010	sr.l_len = (s64)len;
2011
2012	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2013					 change_size);
2014}
2015
2016int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2017				   size_t count)
2018{
2019	int ret = 0;
2020	unsigned int extent_flags;
2021	u32 cpos, clusters, extent_len, phys_cpos;
2022	struct super_block *sb = inode->i_sb;
2023
2024	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2025	    !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
2026	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2027		return 0;
2028
2029	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2030	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2031
2032	while (clusters) {
2033		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2034					 &extent_flags);
2035		if (ret < 0) {
2036			mlog_errno(ret);
2037			goto out;
2038		}
2039
2040		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2041			ret = 1;
2042			break;
2043		}
2044
2045		if (extent_len > clusters)
2046			extent_len = clusters;
2047
2048		clusters -= extent_len;
2049		cpos += extent_len;
2050	}
2051out:
2052	return ret;
2053}
2054
2055static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2056					    struct file *file,
2057					    loff_t pos, size_t count,
2058					    int *meta_level)
2059{
2060	int ret;
2061	struct buffer_head *di_bh = NULL;
2062	u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2063	u32 clusters =
2064		ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2065
2066	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2067	if (ret) {
2068		mlog_errno(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2069		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2070	}
2071
2072	*meta_level = 1;
2073
2074	ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
2075	if (ret)
2076		mlog_errno(ret);
 
2077out:
2078	brelse(di_bh);
2079	return ret;
2080}
2081
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082static int ocfs2_prepare_inode_for_write(struct file *file,
2083					 loff_t *ppos,
2084					 size_t count,
2085					 int appending,
2086					 int *direct_io,
2087					 int *has_refcount)
2088{
2089	int ret = 0, meta_level = 0;
 
2090	struct dentry *dentry = file->f_path.dentry;
2091	struct inode *inode = dentry->d_inode;
2092	loff_t saved_pos = 0, end;
 
 
2093
2094	/*
2095	 * We start with a read level meta lock and only jump to an ex
2096	 * if we need to make modifications here.
2097	 */
2098	for(;;) {
2099		ret = ocfs2_inode_lock(inode, NULL, meta_level);
 
 
 
 
2100		if (ret < 0) {
2101			meta_level = -1;
2102			mlog_errno(ret);
2103			goto out;
2104		}
2105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106		/* Clear suid / sgid if necessary. We do this here
2107		 * instead of later in the write path because
2108		 * remove_suid() calls ->setattr without any hint that
2109		 * we may have already done our cluster locking. Since
2110		 * ocfs2_setattr() *must* take cluster locks to
2111		 * proceeed, this will lead us to recursively lock the
2112		 * inode. There's also the dinode i_size state which
2113		 * can be lost via setattr during extending writes (we
2114		 * set inode->i_size at the end of a write. */
2115		if (should_remove_suid(dentry)) {
2116			if (meta_level == 0) {
2117				ocfs2_inode_unlock(inode, meta_level);
 
 
 
2118				meta_level = 1;
2119				continue;
2120			}
2121
2122			ret = ocfs2_write_remove_suid(inode);
2123			if (ret < 0) {
2124				mlog_errno(ret);
2125				goto out_unlock;
2126			}
2127		}
2128
2129		/* work on a copy of ppos until we're sure that we won't have
2130		 * to recalculate it due to relocking. */
2131		if (appending)
2132			saved_pos = i_size_read(inode);
2133		else
2134			saved_pos = *ppos;
2135
2136		end = saved_pos + count;
2137
2138		ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
2139		if (ret == 1) {
2140			ocfs2_inode_unlock(inode, meta_level);
2141			meta_level = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2142
2143			ret = ocfs2_prepare_inode_for_refcount(inode,
2144							       file,
2145							       saved_pos,
2146							       count,
2147							       &meta_level);
2148			if (has_refcount)
2149				*has_refcount = 1;
2150			if (direct_io)
2151				*direct_io = 0;
2152		}
2153
2154		if (ret < 0) {
2155			mlog_errno(ret);
 
2156			goto out_unlock;
2157		}
2158
2159		/*
2160		 * Skip the O_DIRECT checks if we don't need
2161		 * them.
2162		 */
2163		if (!direct_io || !(*direct_io))
2164			break;
2165
2166		/*
2167		 * There's no sane way to do direct writes to an inode
2168		 * with inline data.
2169		 */
2170		if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2171			*direct_io = 0;
2172			break;
2173		}
2174
2175		/*
2176		 * Allowing concurrent direct writes means
2177		 * i_size changes wouldn't be synchronized, so
2178		 * one node could wind up truncating another
2179		 * nodes writes.
2180		 */
2181		if (end > i_size_read(inode)) {
2182			*direct_io = 0;
2183			break;
2184		}
2185
2186		/*
2187		 * We don't fill holes during direct io, so
2188		 * check for them here. If any are found, the
2189		 * caller will have to retake some cluster
2190		 * locks and initiate the io as buffered.
2191		 */
2192		ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
2193		if (ret == 1) {
2194			*direct_io = 0;
2195			ret = 0;
2196		} else if (ret < 0)
2197			mlog_errno(ret);
2198		break;
2199	}
2200
2201	if (appending)
2202		*ppos = saved_pos;
2203
2204out_unlock:
2205	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2206					    saved_pos, appending, count,
2207					    direct_io, has_refcount);
2208
2209	if (meta_level >= 0)
2210		ocfs2_inode_unlock(inode, meta_level);
 
 
2211
2212out:
2213	return ret;
2214}
2215
2216static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2217				    const struct iovec *iov,
2218				    unsigned long nr_segs,
2219				    loff_t pos)
2220{
2221	int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
2222	int can_do_direct, has_refcount = 0;
2223	ssize_t written = 0;
2224	size_t ocount;		/* original count */
2225	size_t count;		/* after file limit checks */
2226	loff_t old_size, *ppos = &iocb->ki_pos;
2227	u32 old_clusters;
2228	struct file *file = iocb->ki_filp;
2229	struct inode *inode = file->f_path.dentry->d_inode;
2230	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2231	int full_coherency = !(osb->s_mount_opt &
2232			       OCFS2_MOUNT_COHERENCY_BUFFERED);
 
 
 
 
 
2233
2234	trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2235		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2236		file->f_path.dentry->d_name.len,
2237		file->f_path.dentry->d_name.name,
2238		(unsigned int)nr_segs);
2239
2240	if (iocb->ki_left == 0)
2241		return 0;
2242
2243	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
 
2244
2245	appending = file->f_flags & O_APPEND ? 1 : 0;
2246	direct_io = file->f_flags & O_DIRECT ? 1 : 0;
2247
2248	mutex_lock(&inode->i_mutex);
 
 
 
 
2249
2250	ocfs2_iocb_clear_sem_locked(iocb);
2251
2252relock:
2253	/* to match setattr's i_mutex -> rw_lock ordering */
2254	if (direct_io) {
2255		have_alloc_sem = 1;
2256		/* communicate with ocfs2_dio_end_io */
2257		ocfs2_iocb_set_sem_locked(iocb);
2258	}
2259
2260	/*
2261	 * Concurrent O_DIRECT writes are allowed with
2262	 * mount_option "coherency=buffered".
 
2263	 */
2264	rw_level = (!direct_io || full_coherency);
2265
2266	ret = ocfs2_rw_lock(inode, rw_level);
 
 
 
2267	if (ret < 0) {
2268		mlog_errno(ret);
2269		goto out_sems;
 
2270	}
2271
2272	/*
2273	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2274	 * inode_lock to guarantee coherency.
2275	 */
2276	if (direct_io && full_coherency) {
2277		/*
2278		 * We need to take and drop the inode lock to force
2279		 * other nodes to drop their caches.  Buffered I/O
2280		 * already does this in write_begin().
2281		 */
2282		ret = ocfs2_inode_lock(inode, NULL, 1);
 
 
 
2283		if (ret < 0) {
2284			mlog_errno(ret);
2285			goto out_sems;
 
2286		}
2287
2288		ocfs2_inode_unlock(inode, 1);
2289	}
2290
2291	can_do_direct = direct_io;
2292	ret = ocfs2_prepare_inode_for_write(file, ppos,
2293					    iocb->ki_left, appending,
2294					    &can_do_direct, &has_refcount);
2295	if (ret < 0) {
2296		mlog_errno(ret);
2297		goto out;
2298	}
 
2299
2300	/*
2301	 * We can't complete the direct I/O as requested, fall back to
2302	 * buffered I/O.
2303	 */
2304	if (direct_io && !can_do_direct) {
2305		ocfs2_rw_unlock(inode, rw_level);
2306
2307		have_alloc_sem = 0;
2308		rw_level = -1;
2309
2310		direct_io = 0;
2311		goto relock;
2312	}
2313
2314	/*
2315	 * To later detect whether a journal commit for sync writes is
2316	 * necessary, we sample i_size, and cluster count here.
2317	 */
2318	old_size = i_size_read(inode);
2319	old_clusters = OCFS2_I(inode)->ip_clusters;
 
2320
2321	/* communicate with ocfs2_dio_end_io */
2322	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2323
2324	ret = generic_segment_checks(iov, &nr_segs, &ocount,
2325				     VERIFY_READ);
2326	if (ret)
2327		goto out_dio;
2328
2329	count = ocount;
2330	ret = generic_write_checks(file, ppos, &count,
2331				   S_ISBLK(inode->i_mode));
2332	if (ret)
2333		goto out_dio;
2334
2335	if (direct_io) {
2336		written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2337						    ppos, count, ocount);
2338		if (written < 0) {
2339			ret = written;
2340			goto out_dio;
2341		}
2342	} else {
2343		current->backing_dev_info = file->f_mapping->backing_dev_info;
2344		written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
2345						      ppos, count, 0);
2346		current->backing_dev_info = NULL;
2347	}
2348
2349out_dio:
2350	/* buffered aio wouldn't have proper lock coverage today */
2351	BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2352
2353	if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2354	    ((file->f_flags & O_DIRECT) && !direct_io)) {
2355		ret = filemap_fdatawrite_range(file->f_mapping, pos,
2356					       pos + count - 1);
2357		if (ret < 0)
2358			written = ret;
2359
2360		if (!ret && ((old_size != i_size_read(inode)) ||
2361			     (old_clusters != OCFS2_I(inode)->ip_clusters) ||
2362			     has_refcount)) {
2363			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2364			if (ret < 0)
2365				written = ret;
2366		}
2367
2368		if (!ret)
2369			ret = filemap_fdatawait_range(file->f_mapping, pos,
2370						      pos + count - 1);
2371	}
2372
2373	/*
2374	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2375	 * function pointer which is called when o_direct io completes so that
2376	 * it can unlock our rw lock.
2377	 * Unfortunately there are error cases which call end_io and others
2378	 * that don't.  so we don't have to unlock the rw_lock if either an
2379	 * async dio is going to do it in the future or an end_io after an
2380	 * error has already done it.
2381	 */
2382	if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2383		rw_level = -1;
2384		have_alloc_sem = 0;
2385	}
2386
2387out:
2388	if (rw_level != -1)
2389		ocfs2_rw_unlock(inode, rw_level);
2390
2391out_sems:
2392	if (have_alloc_sem)
2393		ocfs2_iocb_clear_sem_locked(iocb);
2394
2395	mutex_unlock(&inode->i_mutex);
2396
2397	if (written)
2398		ret = written;
2399	return ret;
2400}
2401
2402static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2403				struct file *out,
2404				struct splice_desc *sd)
2405{
2406	int ret;
2407
2408	ret = ocfs2_prepare_inode_for_write(out, &sd->pos,
2409					    sd->total_len, 0, NULL, NULL);
2410	if (ret < 0) {
2411		mlog_errno(ret);
2412		return ret;
2413	}
2414
2415	return splice_from_pipe_feed(pipe, sd, pipe_to_file);
2416}
2417
2418static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2419				       struct file *out,
2420				       loff_t *ppos,
2421				       size_t len,
2422				       unsigned int flags)
2423{
2424	int ret;
2425	struct address_space *mapping = out->f_mapping;
2426	struct inode *inode = mapping->host;
2427	struct splice_desc sd = {
2428		.total_len = len,
2429		.flags = flags,
2430		.pos = *ppos,
2431		.u.file = out,
2432	};
2433
2434
2435	trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
2436			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2437			out->f_path.dentry->d_name.len,
2438			out->f_path.dentry->d_name.name, len);
2439
2440	if (pipe->inode)
2441		mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
2442
2443	splice_from_pipe_begin(&sd);
2444	do {
2445		ret = splice_from_pipe_next(pipe, &sd);
2446		if (ret <= 0)
2447			break;
2448
2449		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2450		ret = ocfs2_rw_lock(inode, 1);
 
 
 
2451		if (ret < 0)
2452			mlog_errno(ret);
2453		else {
2454			ret = ocfs2_splice_to_file(pipe, out, &sd);
2455			ocfs2_rw_unlock(inode, 1);
2456		}
2457		mutex_unlock(&inode->i_mutex);
2458	} while (ret > 0);
2459	splice_from_pipe_end(pipe, &sd);
2460
2461	if (pipe->inode)
2462		mutex_unlock(&pipe->inode->i_mutex);
2463
2464	if (sd.num_spliced)
2465		ret = sd.num_spliced;
2466
2467	if (ret > 0) {
2468		unsigned long nr_pages;
2469		int err;
2470
2471		nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2472
2473		err = generic_write_sync(out, *ppos, ret);
2474		if (err)
2475			ret = err;
2476		else
2477			*ppos += ret;
2478
2479		balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
 
 
 
2480	}
2481
2482	return ret;
2483}
2484
2485static ssize_t ocfs2_file_splice_read(struct file *in,
2486				      loff_t *ppos,
2487				      struct pipe_inode_info *pipe,
2488				      size_t len,
2489				      unsigned int flags)
2490{
2491	int ret = 0, lock_level = 0;
2492	struct inode *inode = in->f_path.dentry->d_inode;
2493
2494	trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2495			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2496			in->f_path.dentry->d_name.len,
2497			in->f_path.dentry->d_name.name, len);
2498
2499	/*
2500	 * See the comment in ocfs2_file_aio_read()
2501	 */
2502	ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
2503	if (ret < 0) {
2504		mlog_errno(ret);
2505		goto bail;
2506	}
2507	ocfs2_inode_unlock(inode, lock_level);
2508
2509	ret = generic_file_splice_read(in, ppos, pipe, len, flags);
 
2510
2511bail:
 
2512	return ret;
2513}
2514
2515static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2516				   const struct iovec *iov,
2517				   unsigned long nr_segs,
2518				   loff_t pos)
2519{
2520	int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2521	struct file *filp = iocb->ki_filp;
2522	struct inode *inode = filp->f_path.dentry->d_inode;
 
 
2523
2524	trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
2525			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2526			filp->f_path.dentry->d_name.len,
2527			filp->f_path.dentry->d_name.name, nr_segs);
 
2528
2529
2530	if (!inode) {
2531		ret = -EINVAL;
2532		mlog_errno(ret);
2533		goto bail;
2534	}
2535
2536	ocfs2_iocb_clear_sem_locked(iocb);
 
 
 
2537
2538	/*
2539	 * buffered reads protect themselves in ->readpage().  O_DIRECT reads
2540	 * need locks to protect pending reads from racing with truncate.
2541	 */
2542	if (filp->f_flags & O_DIRECT) {
2543		have_alloc_sem = 1;
2544		ocfs2_iocb_set_sem_locked(iocb);
 
 
2545
2546		ret = ocfs2_rw_lock(inode, 0);
2547		if (ret < 0) {
2548			mlog_errno(ret);
 
2549			goto bail;
2550		}
2551		rw_level = 0;
2552		/* communicate with ocfs2_dio_end_io */
2553		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2554	}
2555
2556	/*
2557	 * We're fine letting folks race truncates and extending
2558	 * writes with read across the cluster, just like they can
2559	 * locally. Hence no rw_lock during read.
2560	 *
2561	 * Take and drop the meta data lock to update inode fields
2562	 * like i_size. This allows the checks down below
2563	 * generic_file_aio_read() a chance of actually working.
2564	 */
2565	ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
 
2566	if (ret < 0) {
2567		mlog_errno(ret);
 
2568		goto bail;
2569	}
2570	ocfs2_inode_unlock(inode, lock_level);
2571
2572	ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2573	trace_generic_file_aio_read_ret(ret);
2574
2575	/* buffered aio wouldn't have proper lock coverage today */
2576	BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2577
2578	/* see ocfs2_file_aio_write */
2579	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2580		rw_level = -1;
2581		have_alloc_sem = 0;
2582	}
2583
2584bail:
2585	if (have_alloc_sem)
2586		ocfs2_iocb_clear_sem_locked(iocb);
2587
2588	if (rw_level != -1)
2589		ocfs2_rw_unlock(inode, rw_level);
2590
2591	return ret;
2592}
2593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2594const struct inode_operations ocfs2_file_iops = {
2595	.setattr	= ocfs2_setattr,
2596	.getattr	= ocfs2_getattr,
2597	.permission	= ocfs2_permission,
2598	.setxattr	= generic_setxattr,
2599	.getxattr	= generic_getxattr,
2600	.listxattr	= ocfs2_listxattr,
2601	.removexattr	= generic_removexattr,
2602	.fiemap		= ocfs2_fiemap,
2603	.get_acl	= ocfs2_iop_get_acl,
 
 
 
2604};
2605
2606const struct inode_operations ocfs2_special_file_iops = {
2607	.setattr	= ocfs2_setattr,
2608	.getattr	= ocfs2_getattr,
 
2609	.permission	= ocfs2_permission,
2610	.get_acl	= ocfs2_iop_get_acl,
 
2611};
2612
2613/*
2614 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2615 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2616 */
2617const struct file_operations ocfs2_fops = {
2618	.llseek		= generic_file_llseek,
2619	.read		= do_sync_read,
2620	.write		= do_sync_write,
2621	.mmap		= ocfs2_mmap,
2622	.fsync		= ocfs2_sync_file,
2623	.release	= ocfs2_file_release,
2624	.open		= ocfs2_file_open,
2625	.aio_read	= ocfs2_file_aio_read,
2626	.aio_write	= ocfs2_file_aio_write,
2627	.unlocked_ioctl	= ocfs2_ioctl,
2628#ifdef CONFIG_COMPAT
2629	.compat_ioctl   = ocfs2_compat_ioctl,
2630#endif
2631	.lock		= ocfs2_lock,
2632	.flock		= ocfs2_flock,
2633	.splice_read	= ocfs2_file_splice_read,
2634	.splice_write	= ocfs2_file_splice_write,
2635	.fallocate	= ocfs2_fallocate,
 
 
2636};
2637
 
2638const struct file_operations ocfs2_dops = {
2639	.llseek		= generic_file_llseek,
2640	.read		= generic_read_dir,
2641	.readdir	= ocfs2_readdir,
2642	.fsync		= ocfs2_sync_file,
2643	.release	= ocfs2_dir_release,
2644	.open		= ocfs2_dir_open,
2645	.unlocked_ioctl	= ocfs2_ioctl,
2646#ifdef CONFIG_COMPAT
2647	.compat_ioctl   = ocfs2_compat_ioctl,
2648#endif
2649	.lock		= ocfs2_lock,
2650	.flock		= ocfs2_flock,
 
2651};
2652
2653/*
2654 * POSIX-lockless variants of our file_operations.
2655 *
2656 * These will be used if the underlying cluster stack does not support
2657 * posix file locking, if the user passes the "localflocks" mount
2658 * option, or if we have a local-only fs.
2659 *
2660 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2661 * so we still want it in the case of no stack support for
2662 * plocks. Internally, it will do the right thing when asked to ignore
2663 * the cluster.
2664 */
2665const struct file_operations ocfs2_fops_no_plocks = {
2666	.llseek		= generic_file_llseek,
2667	.read		= do_sync_read,
2668	.write		= do_sync_write,
2669	.mmap		= ocfs2_mmap,
2670	.fsync		= ocfs2_sync_file,
2671	.release	= ocfs2_file_release,
2672	.open		= ocfs2_file_open,
2673	.aio_read	= ocfs2_file_aio_read,
2674	.aio_write	= ocfs2_file_aio_write,
2675	.unlocked_ioctl	= ocfs2_ioctl,
2676#ifdef CONFIG_COMPAT
2677	.compat_ioctl   = ocfs2_compat_ioctl,
2678#endif
2679	.flock		= ocfs2_flock,
2680	.splice_read	= ocfs2_file_splice_read,
2681	.splice_write	= ocfs2_file_splice_write,
2682	.fallocate	= ocfs2_fallocate,
 
2683};
2684
2685const struct file_operations ocfs2_dops_no_plocks = {
2686	.llseek		= generic_file_llseek,
2687	.read		= generic_read_dir,
2688	.readdir	= ocfs2_readdir,
2689	.fsync		= ocfs2_sync_file,
2690	.release	= ocfs2_dir_release,
2691	.open		= ocfs2_dir_open,
2692	.unlocked_ioctl	= ocfs2_ioctl,
2693#ifdef CONFIG_COMPAT
2694	.compat_ioctl   = ocfs2_compat_ioctl,
2695#endif
2696	.flock		= ocfs2_flock,
2697};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
 
   3 * file.c
   4 *
   5 * File open, close, extend, truncate
   6 *
   7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/fs.h>
  12#include <linux/types.h>
  13#include <linux/slab.h>
  14#include <linux/highmem.h>
  15#include <linux/pagemap.h>
  16#include <linux/uio.h>
  17#include <linux/sched.h>
  18#include <linux/splice.h>
  19#include <linux/mount.h>
  20#include <linux/writeback.h>
  21#include <linux/falloc.h>
  22#include <linux/quotaops.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25
  26#include <cluster/masklog.h>
  27
  28#include "ocfs2.h"
  29
  30#include "alloc.h"
  31#include "aops.h"
  32#include "dir.h"
  33#include "dlmglue.h"
  34#include "extent_map.h"
  35#include "file.h"
  36#include "sysfile.h"
  37#include "inode.h"
  38#include "ioctl.h"
  39#include "journal.h"
  40#include "locks.h"
  41#include "mmap.h"
  42#include "suballoc.h"
  43#include "super.h"
  44#include "xattr.h"
  45#include "acl.h"
  46#include "quota.h"
  47#include "refcounttree.h"
  48#include "ocfs2_trace.h"
  49
  50#include "buffer_head_io.h"
  51
  52static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  53{
  54	struct ocfs2_file_private *fp;
  55
  56	fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  57	if (!fp)
  58		return -ENOMEM;
  59
  60	fp->fp_file = file;
  61	mutex_init(&fp->fp_mutex);
  62	ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  63	file->private_data = fp;
  64
  65	return 0;
  66}
  67
  68static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  69{
  70	struct ocfs2_file_private *fp = file->private_data;
  71	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  72
  73	if (fp) {
  74		ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  75		ocfs2_lock_res_free(&fp->fp_flock);
  76		kfree(fp);
  77		file->private_data = NULL;
  78	}
  79}
  80
  81static int ocfs2_file_open(struct inode *inode, struct file *file)
  82{
  83	int status;
  84	int mode = file->f_flags;
  85	struct ocfs2_inode_info *oi = OCFS2_I(inode);
  86
  87	trace_ocfs2_file_open(inode, file, file->f_path.dentry,
  88			      (unsigned long long)oi->ip_blkno,
  89			      file->f_path.dentry->d_name.len,
  90			      file->f_path.dentry->d_name.name, mode);
  91
  92	if (file->f_mode & FMODE_WRITE) {
  93		status = dquot_initialize(inode);
  94		if (status)
  95			goto leave;
  96	}
  97
  98	spin_lock(&oi->ip_lock);
  99
 100	/* Check that the inode hasn't been wiped from disk by another
 101	 * node. If it hasn't then we're safe as long as we hold the
 102	 * spin lock until our increment of open count. */
 103	if (oi->ip_flags & OCFS2_INODE_DELETED) {
 104		spin_unlock(&oi->ip_lock);
 105
 106		status = -ENOENT;
 107		goto leave;
 108	}
 109
 110	if (mode & O_DIRECT)
 111		oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
 112
 113	oi->ip_open_count++;
 114	spin_unlock(&oi->ip_lock);
 115
 116	status = ocfs2_init_file_private(inode, file);
 117	if (status) {
 118		/*
 119		 * We want to set open count back if we're failing the
 120		 * open.
 121		 */
 122		spin_lock(&oi->ip_lock);
 123		oi->ip_open_count--;
 124		spin_unlock(&oi->ip_lock);
 125	}
 126
 127	file->f_mode |= FMODE_NOWAIT;
 128
 129leave:
 130	return status;
 131}
 132
 133static int ocfs2_file_release(struct inode *inode, struct file *file)
 134{
 135	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 136
 137	spin_lock(&oi->ip_lock);
 138	if (!--oi->ip_open_count)
 139		oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
 140
 141	trace_ocfs2_file_release(inode, file, file->f_path.dentry,
 142				 oi->ip_blkno,
 143				 file->f_path.dentry->d_name.len,
 144				 file->f_path.dentry->d_name.name,
 145				 oi->ip_open_count);
 146	spin_unlock(&oi->ip_lock);
 147
 148	ocfs2_free_file_private(inode, file);
 149
 150	return 0;
 151}
 152
 153static int ocfs2_dir_open(struct inode *inode, struct file *file)
 154{
 155	return ocfs2_init_file_private(inode, file);
 156}
 157
 158static int ocfs2_dir_release(struct inode *inode, struct file *file)
 159{
 160	ocfs2_free_file_private(inode, file);
 161	return 0;
 162}
 163
 164static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
 165			   int datasync)
 166{
 167	int err = 0;
 
 168	struct inode *inode = file->f_mapping->host;
 169	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 170	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 171	journal_t *journal = osb->journal->j_journal;
 172	int ret;
 173	tid_t commit_tid;
 174	bool needs_barrier = false;
 175
 176	trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
 177			      oi->ip_blkno,
 178			      file->f_path.dentry->d_name.len,
 179			      file->f_path.dentry->d_name.name,
 180			      (unsigned long long)datasync);
 181
 182	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 183		return -EROFS;
 184
 185	err = file_write_and_wait_range(file, start, end);
 186	if (err)
 187		return err;
 188
 189	commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
 190	if (journal->j_flags & JBD2_BARRIER &&
 191	    !jbd2_trans_will_send_data_barrier(journal, commit_tid))
 192		needs_barrier = true;
 193	err = jbd2_complete_transaction(journal, commit_tid);
 194	if (needs_barrier) {
 195		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
 196		if (!err)
 197			err = ret;
 
 
 
 
 
 198	}
 199
 
 
 
 
 200	if (err)
 201		mlog_errno(err);
 
 202
 203	return (err < 0) ? -EIO : 0;
 204}
 205
 206int ocfs2_should_update_atime(struct inode *inode,
 207			      struct vfsmount *vfsmnt)
 208{
 209	struct timespec64 now;
 210	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 211
 212	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
 213		return 0;
 214
 215	if ((inode->i_flags & S_NOATIME) ||
 216	    ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
 217		return 0;
 218
 219	/*
 220	 * We can be called with no vfsmnt structure - NFSD will
 221	 * sometimes do this.
 222	 *
 223	 * Note that our action here is different than touch_atime() -
 224	 * if we can't tell whether this is a noatime mount, then we
 225	 * don't know whether to trust the value of s_atime_quantum.
 226	 */
 227	if (vfsmnt == NULL)
 228		return 0;
 229
 230	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
 231	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
 232		return 0;
 233
 234	if (vfsmnt->mnt_flags & MNT_RELATIME) {
 235		struct timespec64 ctime = inode_get_ctime(inode);
 236		struct timespec64 atime = inode_get_atime(inode);
 237		struct timespec64 mtime = inode_get_mtime(inode);
 238
 239		if ((timespec64_compare(&atime, &mtime) <= 0) ||
 240		    (timespec64_compare(&atime, &ctime) <= 0))
 241			return 1;
 242
 243		return 0;
 244	}
 245
 246	now = current_time(inode);
 247	if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
 248		return 0;
 249	else
 250		return 1;
 251}
 252
 253int ocfs2_update_inode_atime(struct inode *inode,
 254			     struct buffer_head *bh)
 255{
 256	int ret;
 257	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 258	handle_t *handle;
 259	struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
 260
 261	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 262	if (IS_ERR(handle)) {
 263		ret = PTR_ERR(handle);
 264		mlog_errno(ret);
 265		goto out;
 266	}
 267
 268	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 269				      OCFS2_JOURNAL_ACCESS_WRITE);
 270	if (ret) {
 271		mlog_errno(ret);
 272		goto out_commit;
 273	}
 274
 275	/*
 276	 * Don't use ocfs2_mark_inode_dirty() here as we don't always
 277	 * have i_rwsem to guard against concurrent changes to other
 278	 * inode fields.
 279	 */
 280	inode_set_atime_to_ts(inode, current_time(inode));
 281	di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
 282	di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
 283	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 284	ocfs2_journal_dirty(handle, bh);
 285
 286out_commit:
 287	ocfs2_commit_trans(osb, handle);
 288out:
 289	return ret;
 290}
 291
 292int ocfs2_set_inode_size(handle_t *handle,
 293				struct inode *inode,
 294				struct buffer_head *fe_bh,
 295				u64 new_i_size)
 296{
 297	int status;
 298
 299	i_size_write(inode, new_i_size);
 300	inode->i_blocks = ocfs2_inode_sector_count(inode);
 301	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 302
 303	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 304	if (status < 0) {
 305		mlog_errno(status);
 306		goto bail;
 307	}
 308
 309bail:
 310	return status;
 311}
 312
 313int ocfs2_simple_size_update(struct inode *inode,
 314			     struct buffer_head *di_bh,
 315			     u64 new_i_size)
 316{
 317	int ret;
 318	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 319	handle_t *handle = NULL;
 320
 321	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 322	if (IS_ERR(handle)) {
 323		ret = PTR_ERR(handle);
 324		mlog_errno(ret);
 325		goto out;
 326	}
 327
 328	ret = ocfs2_set_inode_size(handle, inode, di_bh,
 329				   new_i_size);
 330	if (ret < 0)
 331		mlog_errno(ret);
 332
 333	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 334	ocfs2_commit_trans(osb, handle);
 335out:
 336	return ret;
 337}
 338
 339static int ocfs2_cow_file_pos(struct inode *inode,
 340			      struct buffer_head *fe_bh,
 341			      u64 offset)
 342{
 343	int status;
 344	u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 345	unsigned int num_clusters = 0;
 346	unsigned int ext_flags = 0;
 347
 348	/*
 349	 * If the new offset is aligned to the range of the cluster, there is
 350	 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
 351	 * CoW either.
 352	 */
 353	if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
 354		return 0;
 355
 356	status = ocfs2_get_clusters(inode, cpos, &phys,
 357				    &num_clusters, &ext_flags);
 358	if (status) {
 359		mlog_errno(status);
 360		goto out;
 361	}
 362
 363	if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
 364		goto out;
 365
 366	return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
 367
 368out:
 369	return status;
 370}
 371
 372static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
 373				     struct inode *inode,
 374				     struct buffer_head *fe_bh,
 375				     u64 new_i_size)
 376{
 377	int status;
 378	handle_t *handle;
 379	struct ocfs2_dinode *di;
 380	u64 cluster_bytes;
 381
 382	/*
 383	 * We need to CoW the cluster contains the offset if it is reflinked
 384	 * since we will call ocfs2_zero_range_for_truncate later which will
 385	 * write "0" from offset to the end of the cluster.
 386	 */
 387	status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
 388	if (status) {
 389		mlog_errno(status);
 390		return status;
 391	}
 392
 393	/* TODO: This needs to actually orphan the inode in this
 394	 * transaction. */
 395
 396	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 397	if (IS_ERR(handle)) {
 398		status = PTR_ERR(handle);
 399		mlog_errno(status);
 400		goto out;
 401	}
 402
 403	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
 404					 OCFS2_JOURNAL_ACCESS_WRITE);
 405	if (status < 0) {
 406		mlog_errno(status);
 407		goto out_commit;
 408	}
 409
 410	/*
 411	 * Do this before setting i_size.
 412	 */
 413	cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
 414	status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
 415					       cluster_bytes);
 416	if (status) {
 417		mlog_errno(status);
 418		goto out_commit;
 419	}
 420
 421	i_size_write(inode, new_i_size);
 422	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 423
 424	di = (struct ocfs2_dinode *) fe_bh->b_data;
 425	di->i_size = cpu_to_le64(new_i_size);
 426	di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
 427	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
 428	ocfs2_update_inode_fsync_trans(handle, inode, 0);
 429
 430	ocfs2_journal_dirty(handle, fe_bh);
 431
 432out_commit:
 433	ocfs2_commit_trans(osb, handle);
 434out:
 435	return status;
 436}
 437
 438int ocfs2_truncate_file(struct inode *inode,
 439			       struct buffer_head *di_bh,
 440			       u64 new_i_size)
 441{
 442	int status = 0;
 443	struct ocfs2_dinode *fe = NULL;
 444	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 445
 446	/* We trust di_bh because it comes from ocfs2_inode_lock(), which
 447	 * already validated it */
 448	fe = (struct ocfs2_dinode *) di_bh->b_data;
 449
 450	trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
 451				  (unsigned long long)le64_to_cpu(fe->i_size),
 452				  (unsigned long long)new_i_size);
 453
 454	mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
 455			"Inode %llu, inode i_size = %lld != di "
 456			"i_size = %llu, i_flags = 0x%x\n",
 457			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 458			i_size_read(inode),
 459			(unsigned long long)le64_to_cpu(fe->i_size),
 460			le32_to_cpu(fe->i_flags));
 461
 462	if (new_i_size > le64_to_cpu(fe->i_size)) {
 463		trace_ocfs2_truncate_file_error(
 464			(unsigned long long)le64_to_cpu(fe->i_size),
 465			(unsigned long long)new_i_size);
 466		status = -EINVAL;
 467		mlog_errno(status);
 468		goto bail;
 469	}
 470
 
 
 
 
 
 471	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 472
 473	ocfs2_resv_discard(&osb->osb_la_resmap,
 474			   &OCFS2_I(inode)->ip_la_data_resv);
 475
 476	/*
 477	 * The inode lock forced other nodes to sync and drop their
 478	 * pages, which (correctly) happens even if we have a truncate
 479	 * without allocation change - ocfs2 cluster sizes can be much
 480	 * greater than page size, so we have to truncate them
 481	 * anyway.
 482	 */
 
 
 483
 484	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 485		unmap_mapping_range(inode->i_mapping,
 486				    new_i_size + PAGE_SIZE - 1, 0, 1);
 487		truncate_inode_pages(inode->i_mapping, new_i_size);
 488		status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
 489					       i_size_read(inode), 1);
 490		if (status)
 491			mlog_errno(status);
 492
 493		goto bail_unlock_sem;
 494	}
 495
 496	/* alright, we're going to need to do a full blown alloc size
 497	 * change. Orphan the inode so that recovery can complete the
 498	 * truncate if necessary. This does the task of marking
 499	 * i_size. */
 500	status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
 501	if (status < 0) {
 502		mlog_errno(status);
 503		goto bail_unlock_sem;
 504	}
 505
 506	unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
 507	truncate_inode_pages(inode->i_mapping, new_i_size);
 508
 509	status = ocfs2_commit_truncate(osb, inode, di_bh);
 510	if (status < 0) {
 511		mlog_errno(status);
 512		goto bail_unlock_sem;
 513	}
 514
 515	/* TODO: orphan dir cleanup here. */
 516bail_unlock_sem:
 517	up_write(&OCFS2_I(inode)->ip_alloc_sem);
 518
 519bail:
 520	if (!status && OCFS2_I(inode)->ip_clusters == 0)
 521		status = ocfs2_try_remove_refcount_tree(inode, di_bh);
 522
 523	return status;
 524}
 525
 526/*
 527 * extend file allocation only here.
 528 * we'll update all the disk stuff, and oip->alloc_size
 529 *
 530 * expect stuff to be locked, a transaction started and enough data /
 531 * metadata reservations in the contexts.
 532 *
 533 * Will return -EAGAIN, and a reason if a restart is needed.
 534 * If passed in, *reason will always be set, even in error.
 535 */
 536int ocfs2_add_inode_data(struct ocfs2_super *osb,
 537			 struct inode *inode,
 538			 u32 *logical_offset,
 539			 u32 clusters_to_add,
 540			 int mark_unwritten,
 541			 struct buffer_head *fe_bh,
 542			 handle_t *handle,
 543			 struct ocfs2_alloc_context *data_ac,
 544			 struct ocfs2_alloc_context *meta_ac,
 545			 enum ocfs2_alloc_restarted *reason_ret)
 546{
 
 547	struct ocfs2_extent_tree et;
 548
 549	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
 550	return ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
 551					   clusters_to_add, mark_unwritten,
 552					   data_ac, meta_ac, reason_ret);
 
 
 553}
 554
 555static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
 556				   u32 clusters_to_add, int mark_unwritten)
 557{
 558	int status = 0;
 559	int restart_func = 0;
 560	int credits;
 561	u32 prev_clusters;
 562	struct buffer_head *bh = NULL;
 563	struct ocfs2_dinode *fe = NULL;
 564	handle_t *handle = NULL;
 565	struct ocfs2_alloc_context *data_ac = NULL;
 566	struct ocfs2_alloc_context *meta_ac = NULL;
 567	enum ocfs2_alloc_restarted why = RESTART_NONE;
 568	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 569	struct ocfs2_extent_tree et;
 570	int did_quota = 0;
 571
 572	/*
 573	 * Unwritten extent only exists for file systems which
 574	 * support holes.
 575	 */
 576	BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
 577
 578	status = ocfs2_read_inode_block(inode, &bh);
 579	if (status < 0) {
 580		mlog_errno(status);
 581		goto leave;
 582	}
 583	fe = (struct ocfs2_dinode *) bh->b_data;
 584
 585restart_all:
 586	BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 587
 588	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
 589	status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
 590				       &data_ac, &meta_ac);
 591	if (status) {
 592		mlog_errno(status);
 593		goto leave;
 594	}
 595
 596	credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
 
 597	handle = ocfs2_start_trans(osb, credits);
 598	if (IS_ERR(handle)) {
 599		status = PTR_ERR(handle);
 600		handle = NULL;
 601		mlog_errno(status);
 602		goto leave;
 603	}
 604
 605restarted_transaction:
 606	trace_ocfs2_extend_allocation(
 607		(unsigned long long)OCFS2_I(inode)->ip_blkno,
 608		(unsigned long long)i_size_read(inode),
 609		le32_to_cpu(fe->i_clusters), clusters_to_add,
 610		why, restart_func);
 611
 612	status = dquot_alloc_space_nodirty(inode,
 613			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 614	if (status)
 615		goto leave;
 616	did_quota = 1;
 617
 618	/* reserve a write to the file entry early on - that we if we
 619	 * run out of credits in the allocation path, we can still
 620	 * update i_size. */
 621	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
 622					 OCFS2_JOURNAL_ACCESS_WRITE);
 623	if (status < 0) {
 624		mlog_errno(status);
 625		goto leave;
 626	}
 627
 628	prev_clusters = OCFS2_I(inode)->ip_clusters;
 629
 630	status = ocfs2_add_inode_data(osb,
 631				      inode,
 632				      &logical_start,
 633				      clusters_to_add,
 634				      mark_unwritten,
 635				      bh,
 636				      handle,
 637				      data_ac,
 638				      meta_ac,
 639				      &why);
 640	if ((status < 0) && (status != -EAGAIN)) {
 641		if (status != -ENOSPC)
 642			mlog_errno(status);
 643		goto leave;
 644	}
 645	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 646	ocfs2_journal_dirty(handle, bh);
 647
 648	spin_lock(&OCFS2_I(inode)->ip_lock);
 649	clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
 650	spin_unlock(&OCFS2_I(inode)->ip_lock);
 651	/* Release unused quota reservation */
 652	dquot_free_space(inode,
 653			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 654	did_quota = 0;
 655
 656	if (why != RESTART_NONE && clusters_to_add) {
 657		if (why == RESTART_META) {
 658			restart_func = 1;
 659			status = 0;
 660		} else {
 661			BUG_ON(why != RESTART_TRANS);
 662
 663			status = ocfs2_allocate_extend_trans(handle, 1);
 
 
 
 
 664			if (status < 0) {
 665				/* handle still has to be committed at
 666				 * this point. */
 667				status = -ENOMEM;
 668				mlog_errno(status);
 669				goto leave;
 670			}
 671			goto restarted_transaction;
 672		}
 673	}
 674
 675	trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
 676	     le32_to_cpu(fe->i_clusters),
 677	     (unsigned long long)le64_to_cpu(fe->i_size),
 678	     OCFS2_I(inode)->ip_clusters,
 679	     (unsigned long long)i_size_read(inode));
 680
 681leave:
 682	if (status < 0 && did_quota)
 683		dquot_free_space(inode,
 684			ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
 685	if (handle) {
 686		ocfs2_commit_trans(osb, handle);
 687		handle = NULL;
 688	}
 689	if (data_ac) {
 690		ocfs2_free_alloc_context(data_ac);
 691		data_ac = NULL;
 692	}
 693	if (meta_ac) {
 694		ocfs2_free_alloc_context(meta_ac);
 695		meta_ac = NULL;
 696	}
 697	if ((!status) && restart_func) {
 698		restart_func = 0;
 699		goto restart_all;
 700	}
 701	brelse(bh);
 702	bh = NULL;
 703
 704	return status;
 705}
 706
 707/*
 708 * While a write will already be ordering the data, a truncate will not.
 709 * Thus, we need to explicitly order the zeroed pages.
 710 */
 711static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
 712						      struct buffer_head *di_bh,
 713						      loff_t start_byte,
 714						      loff_t length)
 715{
 716	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 717	handle_t *handle = NULL;
 718	int ret = 0;
 719
 720	if (!ocfs2_should_order_data(inode))
 721		goto out;
 722
 723	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 724	if (IS_ERR(handle)) {
 725		ret = -ENOMEM;
 726		mlog_errno(ret);
 727		goto out;
 728	}
 729
 730	ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
 731	if (ret < 0) {
 732		mlog_errno(ret);
 733		goto out;
 734	}
 735
 736	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
 737				      OCFS2_JOURNAL_ACCESS_WRITE);
 738	if (ret)
 739		mlog_errno(ret);
 740	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 741
 742out:
 743	if (ret) {
 744		if (!IS_ERR(handle))
 745			ocfs2_commit_trans(osb, handle);
 746		handle = ERR_PTR(ret);
 747	}
 748	return handle;
 749}
 750
 751/* Some parts of this taken from generic_cont_expand, which turned out
 752 * to be too fragile to do exactly what we need without us having to
 753 * worry about recursive locking in ->write_begin() and ->write_end(). */
 754static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
 755				 u64 abs_to, struct buffer_head *di_bh)
 756{
 757	struct address_space *mapping = inode->i_mapping;
 758	struct folio *folio;
 759	unsigned long index = abs_from >> PAGE_SHIFT;
 760	handle_t *handle;
 761	int ret = 0;
 762	unsigned zero_from, zero_to, block_start, block_end;
 763	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 764
 765	BUG_ON(abs_from >= abs_to);
 766	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
 767	BUG_ON(abs_from & (inode->i_blkbits - 1));
 768
 769	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
 770						      abs_from,
 771						      abs_to - abs_from);
 772	if (IS_ERR(handle)) {
 773		ret = PTR_ERR(handle);
 774		goto out;
 775	}
 776
 777	folio = __filemap_get_folio(mapping, index,
 778			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
 779	if (IS_ERR(folio)) {
 780		ret = PTR_ERR(folio);
 781		mlog_errno(ret);
 782		goto out_commit_trans;
 783	}
 784
 785	/* Get the offsets within the page that we want to zero */
 786	zero_from = abs_from & (PAGE_SIZE - 1);
 787	zero_to = abs_to & (PAGE_SIZE - 1);
 788	if (!zero_to)
 789		zero_to = PAGE_SIZE;
 790
 791	trace_ocfs2_write_zero_page(
 792			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 793			(unsigned long long)abs_from,
 794			(unsigned long long)abs_to,
 795			index, zero_from, zero_to);
 796
 797	/* We know that zero_from is block aligned */
 798	for (block_start = zero_from; block_start < zero_to;
 799	     block_start = block_end) {
 800		block_end = block_start + i_blocksize(inode);
 801
 802		/*
 803		 * block_start is block-aligned.  Bump it by one to force
 804		 * __block_write_begin and block_commit_write to zero the
 805		 * whole block.
 806		 */
 807		ret = __block_write_begin(folio, block_start + 1, 0,
 808					  ocfs2_get_block);
 809		if (ret < 0) {
 810			mlog_errno(ret);
 811			goto out_unlock;
 812		}
 813
 
 
 
 
 
 
 
 
 814
 815		/* must not update i_size! */
 816		block_commit_write(&folio->page, block_start + 1, block_start + 1);
 
 
 
 
 
 817	}
 818
 819	/*
 820	 * fs-writeback will release the dirty pages without page lock
 821	 * whose offset are over inode size, the release happens at
 822	 * block_write_full_folio().
 823	 */
 824	i_size_write(inode, abs_to);
 825	inode->i_blocks = ocfs2_inode_sector_count(inode);
 826	di->i_size = cpu_to_le64((u64)i_size_read(inode));
 827	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 828	di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
 829	di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
 830	di->i_mtime_nsec = di->i_ctime_nsec;
 831	if (handle) {
 832		ocfs2_journal_dirty(handle, di_bh);
 833		ocfs2_update_inode_fsync_trans(handle, inode, 1);
 834	}
 835
 836out_unlock:
 837	folio_unlock(folio);
 838	folio_put(folio);
 839out_commit_trans:
 840	if (handle)
 841		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 842out:
 843	return ret;
 844}
 845
 846/*
 847 * Find the next range to zero.  We do this in terms of bytes because
 848 * that's what ocfs2_zero_extend() wants, and it is dealing with the
 849 * pagecache.  We may return multiple extents.
 850 *
 851 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
 852 * needs to be zeroed.  range_start and range_end return the next zeroing
 853 * range.  A subsequent call should pass the previous range_end as its
 854 * zero_start.  If range_end is 0, there's nothing to do.
 855 *
 856 * Unwritten extents are skipped over.  Refcounted extents are CoWd.
 857 */
 858static int ocfs2_zero_extend_get_range(struct inode *inode,
 859				       struct buffer_head *di_bh,
 860				       u64 zero_start, u64 zero_end,
 861				       u64 *range_start, u64 *range_end)
 862{
 863	int rc = 0, needs_cow = 0;
 864	u32 p_cpos, zero_clusters = 0;
 865	u32 zero_cpos =
 866		zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
 867	u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
 868	unsigned int num_clusters = 0;
 869	unsigned int ext_flags = 0;
 870
 871	while (zero_cpos < last_cpos) {
 872		rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
 873					&num_clusters, &ext_flags);
 874		if (rc) {
 875			mlog_errno(rc);
 876			goto out;
 877		}
 878
 879		if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
 880			zero_clusters = num_clusters;
 881			if (ext_flags & OCFS2_EXT_REFCOUNTED)
 882				needs_cow = 1;
 883			break;
 884		}
 885
 886		zero_cpos += num_clusters;
 887	}
 888	if (!zero_clusters) {
 889		*range_end = 0;
 890		goto out;
 891	}
 892
 893	while ((zero_cpos + zero_clusters) < last_cpos) {
 894		rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
 895					&p_cpos, &num_clusters,
 896					&ext_flags);
 897		if (rc) {
 898			mlog_errno(rc);
 899			goto out;
 900		}
 901
 902		if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
 903			break;
 904		if (ext_flags & OCFS2_EXT_REFCOUNTED)
 905			needs_cow = 1;
 906		zero_clusters += num_clusters;
 907	}
 908	if ((zero_cpos + zero_clusters) > last_cpos)
 909		zero_clusters = last_cpos - zero_cpos;
 910
 911	if (needs_cow) {
 912		rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
 913					zero_clusters, UINT_MAX);
 914		if (rc) {
 915			mlog_errno(rc);
 916			goto out;
 917		}
 918	}
 919
 920	*range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
 921	*range_end = ocfs2_clusters_to_bytes(inode->i_sb,
 922					     zero_cpos + zero_clusters);
 923
 924out:
 925	return rc;
 926}
 927
 928/*
 929 * Zero one range returned from ocfs2_zero_extend_get_range().  The caller
 930 * has made sure that the entire range needs zeroing.
 931 */
 932static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
 933				   u64 range_end, struct buffer_head *di_bh)
 934{
 935	int rc = 0;
 936	u64 next_pos;
 937	u64 zero_pos = range_start;
 938
 939	trace_ocfs2_zero_extend_range(
 940			(unsigned long long)OCFS2_I(inode)->ip_blkno,
 941			(unsigned long long)range_start,
 942			(unsigned long long)range_end);
 943	BUG_ON(range_start >= range_end);
 944
 945	while (zero_pos < range_end) {
 946		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
 947		if (next_pos > range_end)
 948			next_pos = range_end;
 949		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
 950		if (rc < 0) {
 951			mlog_errno(rc);
 952			break;
 953		}
 954		zero_pos = next_pos;
 955
 956		/*
 957		 * Very large extends have the potential to lock up
 958		 * the cpu for extended periods of time.
 959		 */
 960		cond_resched();
 961	}
 962
 963	return rc;
 964}
 965
 966int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
 967		      loff_t zero_to_size)
 968{
 969	int ret = 0;
 970	u64 zero_start, range_start = 0, range_end = 0;
 971	struct super_block *sb = inode->i_sb;
 972
 973	zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
 974	trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
 975				(unsigned long long)zero_start,
 976				(unsigned long long)i_size_read(inode));
 977	while (zero_start < zero_to_size) {
 978		ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
 979						  zero_to_size,
 980						  &range_start,
 981						  &range_end);
 982		if (ret) {
 983			mlog_errno(ret);
 984			break;
 985		}
 986		if (!range_end)
 987			break;
 988		/* Trim the ends */
 989		if (range_start < zero_start)
 990			range_start = zero_start;
 991		if (range_end > zero_to_size)
 992			range_end = zero_to_size;
 993
 994		ret = ocfs2_zero_extend_range(inode, range_start,
 995					      range_end, di_bh);
 996		if (ret) {
 997			mlog_errno(ret);
 998			break;
 999		}
1000		zero_start = range_end;
1001	}
1002
1003	return ret;
1004}
1005
1006int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1007			  u64 new_i_size, u64 zero_to)
1008{
1009	int ret;
1010	u32 clusters_to_add;
1011	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1012
1013	/*
1014	 * Only quota files call this without a bh, and they can't be
1015	 * refcounted.
1016	 */
1017	BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1018	BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1019
1020	clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1021	if (clusters_to_add < oi->ip_clusters)
1022		clusters_to_add = 0;
1023	else
1024		clusters_to_add -= oi->ip_clusters;
1025
1026	if (clusters_to_add) {
1027		ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1028					      clusters_to_add, 0);
1029		if (ret) {
1030			mlog_errno(ret);
1031			goto out;
1032		}
1033	}
1034
1035	/*
1036	 * Call this even if we don't add any clusters to the tree. We
1037	 * still need to zero the area between the old i_size and the
1038	 * new i_size.
1039	 */
1040	ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1041	if (ret < 0)
1042		mlog_errno(ret);
1043
1044out:
1045	return ret;
1046}
1047
1048static int ocfs2_extend_file(struct inode *inode,
1049			     struct buffer_head *di_bh,
1050			     u64 new_i_size)
1051{
1052	int ret = 0;
1053	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1054
1055	BUG_ON(!di_bh);
1056
1057	/* setattr sometimes calls us like this. */
1058	if (new_i_size == 0)
1059		goto out;
1060
1061	if (i_size_read(inode) == new_i_size)
1062		goto out;
1063	BUG_ON(new_i_size < i_size_read(inode));
1064
1065	/*
1066	 * The alloc sem blocks people in read/write from reading our
1067	 * allocation until we're done changing it. We depend on
1068	 * i_rwsem to block other extend/truncate calls while we're
1069	 * here.  We even have to hold it for sparse files because there
1070	 * might be some tail zeroing.
1071	 */
1072	down_write(&oi->ip_alloc_sem);
1073
1074	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1075		/*
1076		 * We can optimize small extends by keeping the inodes
1077		 * inline data.
1078		 */
1079		if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1080			up_write(&oi->ip_alloc_sem);
1081			goto out_update_size;
1082		}
1083
1084		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1085		if (ret) {
1086			up_write(&oi->ip_alloc_sem);
1087			mlog_errno(ret);
1088			goto out;
1089		}
1090	}
1091
1092	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1093		ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1094	else
1095		ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1096					    new_i_size);
1097
1098	up_write(&oi->ip_alloc_sem);
1099
1100	if (ret < 0) {
1101		mlog_errno(ret);
1102		goto out;
1103	}
1104
1105out_update_size:
1106	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1107	if (ret < 0)
1108		mlog_errno(ret);
1109
1110out:
1111	return ret;
1112}
1113
1114int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1115		  struct iattr *attr)
1116{
1117	int status = 0, size_change;
1118	int inode_locked = 0;
1119	struct inode *inode = d_inode(dentry);
1120	struct super_block *sb = inode->i_sb;
1121	struct ocfs2_super *osb = OCFS2_SB(sb);
1122	struct buffer_head *bh = NULL;
1123	handle_t *handle = NULL;
1124	struct dquot *transfer_to[MAXQUOTAS] = { };
1125	int qtype;
1126	int had_lock;
1127	struct ocfs2_lock_holder oh;
1128
1129	trace_ocfs2_setattr(inode, dentry,
1130			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
1131			    dentry->d_name.len, dentry->d_name.name,
1132			    attr->ia_valid,
1133				attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0,
1134				attr->ia_valid & ATTR_UID ?
1135					from_kuid(&init_user_ns, attr->ia_uid) : 0,
1136				attr->ia_valid & ATTR_GID ?
1137					from_kgid(&init_user_ns, attr->ia_gid) : 0);
1138
1139	/* ensuring we don't even attempt to truncate a symlink */
1140	if (S_ISLNK(inode->i_mode))
1141		attr->ia_valid &= ~ATTR_SIZE;
1142
1143#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1144			   | ATTR_GID | ATTR_UID | ATTR_MODE)
1145	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1146		return 0;
1147
1148	status = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1149	if (status)
1150		return status;
1151
1152	if (is_quota_modification(&nop_mnt_idmap, inode, attr)) {
1153		status = dquot_initialize(inode);
1154		if (status)
1155			return status;
1156	}
1157	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1158	if (size_change) {
1159		/*
1160		 * Here we should wait dio to finish before inode lock
1161		 * to avoid a deadlock between ocfs2_setattr() and
1162		 * ocfs2_dio_end_io_write()
1163		 */
1164		inode_dio_wait(inode);
1165
1166		status = ocfs2_rw_lock(inode, 1);
1167		if (status < 0) {
1168			mlog_errno(status);
1169			goto bail;
1170		}
1171	}
1172
1173	had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1174	if (had_lock < 0) {
1175		status = had_lock;
 
1176		goto bail_unlock_rw;
1177	} else if (had_lock) {
1178		/*
1179		 * As far as we know, ocfs2_setattr() could only be the first
1180		 * VFS entry point in the call chain of recursive cluster
1181		 * locking issue.
1182		 *
1183		 * For instance:
1184		 * chmod_common()
1185		 *  notify_change()
1186		 *   ocfs2_setattr()
1187		 *    posix_acl_chmod()
1188		 *     ocfs2_iop_get_acl()
1189		 *
1190		 * But, we're not 100% sure if it's always true, because the
1191		 * ordering of the VFS entry points in the call chain is out
1192		 * of our control. So, we'd better dump the stack here to
1193		 * catch the other cases of recursive locking.
1194		 */
1195		mlog(ML_ERROR, "Another case of recursive locking:\n");
1196		dump_stack();
1197	}
1198	inode_locked = 1;
1199
1200	if (size_change) {
1201		status = inode_newsize_ok(inode, attr->ia_size);
1202		if (status)
1203			goto bail_unlock;
1204
1205		if (i_size_read(inode) >= attr->ia_size) {
 
 
1206			if (ocfs2_should_order_data(inode)) {
1207				status = ocfs2_begin_ordered_truncate(inode,
1208								      attr->ia_size);
1209				if (status)
1210					goto bail_unlock;
1211			}
1212			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1213		} else
1214			status = ocfs2_extend_file(inode, bh, attr->ia_size);
1215		if (status < 0) {
1216			if (status != -ENOSPC)
1217				mlog_errno(status);
1218			status = -ENOSPC;
1219			goto bail_unlock;
1220		}
1221	}
1222
1223	if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1224	    (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1225		/*
1226		 * Gather pointers to quota structures so that allocation /
1227		 * freeing of quota structures happens here and not inside
1228		 * dquot_transfer() where we have problems with lock ordering
1229		 */
1230		if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1231		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1232		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1233			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1234			if (IS_ERR(transfer_to[USRQUOTA])) {
1235				status = PTR_ERR(transfer_to[USRQUOTA]);
1236				transfer_to[USRQUOTA] = NULL;
1237				goto bail_unlock;
1238			}
1239		}
1240		if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1241		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1242		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1243			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1244			if (IS_ERR(transfer_to[GRPQUOTA])) {
1245				status = PTR_ERR(transfer_to[GRPQUOTA]);
1246				transfer_to[GRPQUOTA] = NULL;
1247				goto bail_unlock;
1248			}
1249		}
1250		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1251		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1252					   2 * ocfs2_quota_trans_credits(sb));
1253		if (IS_ERR(handle)) {
1254			status = PTR_ERR(handle);
1255			mlog_errno(status);
1256			goto bail_unlock_alloc;
1257		}
1258		status = __dquot_transfer(inode, transfer_to);
1259		if (status < 0)
1260			goto bail_commit;
1261	} else {
1262		down_write(&OCFS2_I(inode)->ip_alloc_sem);
1263		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1264		if (IS_ERR(handle)) {
1265			status = PTR_ERR(handle);
1266			mlog_errno(status);
1267			goto bail_unlock_alloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268		}
1269	}
1270
1271	setattr_copy(&nop_mnt_idmap, inode, attr);
1272	mark_inode_dirty(inode);
1273
1274	status = ocfs2_mark_inode_dirty(handle, inode, bh);
1275	if (status < 0)
1276		mlog_errno(status);
1277
1278bail_commit:
1279	ocfs2_commit_trans(osb, handle);
1280bail_unlock_alloc:
1281	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1282bail_unlock:
1283	if (status && inode_locked) {
1284		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1285		inode_locked = 0;
1286	}
1287bail_unlock_rw:
1288	if (size_change)
1289		ocfs2_rw_unlock(inode, 1);
1290bail:
 
1291
1292	/* Release quota pointers in case we acquired them */
1293	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1294		dqput(transfer_to[qtype]);
1295
1296	if (!status && attr->ia_valid & ATTR_MODE) {
1297		status = ocfs2_acl_chmod(inode, bh);
1298		if (status < 0)
1299			mlog_errno(status);
1300	}
1301	if (inode_locked)
1302		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1303
1304	brelse(bh);
1305	return status;
1306}
1307
1308int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path,
1309		  struct kstat *stat, u32 request_mask, unsigned int flags)
 
1310{
1311	struct inode *inode = d_inode(path->dentry);
1312	struct super_block *sb = path->dentry->d_sb;
1313	struct ocfs2_super *osb = sb->s_fs_info;
1314	int err;
1315
1316	err = ocfs2_inode_revalidate(path->dentry);
1317	if (err) {
1318		if (err != -ENOENT)
1319			mlog_errno(err);
1320		goto bail;
1321	}
1322
1323	generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1324	/*
1325	 * If there is inline data in the inode, the inode will normally not
1326	 * have data blocks allocated (it may have an external xattr block).
1327	 * Report at least one sector for such files, so tools like tar, rsync,
1328	 * others don't incorrectly think the file is completely sparse.
1329	 */
1330	if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1331		stat->blocks += (stat->size + 511)>>9;
1332
1333	/* We set the blksize from the cluster size for performance */
1334	stat->blksize = osb->s_clustersize;
1335
1336bail:
1337	return err;
1338}
1339
1340int ocfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
1341		     int mask)
1342{
1343	int ret, had_lock;
1344	struct ocfs2_lock_holder oh;
1345
1346	if (mask & MAY_NOT_BLOCK)
1347		return -ECHILD;
1348
1349	had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1350	if (had_lock < 0) {
1351		ret = had_lock;
1352		goto out;
1353	} else if (had_lock) {
1354		/* See comments in ocfs2_setattr() for details.
1355		 * The call chain of this case could be:
1356		 * do_sys_open()
1357		 *  may_open()
1358		 *   inode_permission()
1359		 *    ocfs2_permission()
1360		 *     ocfs2_iop_get_acl()
1361		 */
1362		mlog(ML_ERROR, "Another case of recursive locking:\n");
1363		dump_stack();
1364	}
1365
1366	ret = generic_permission(&nop_mnt_idmap, inode, mask);
1367
1368	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1369out:
1370	return ret;
1371}
1372
1373static int __ocfs2_write_remove_suid(struct inode *inode,
1374				     struct buffer_head *bh)
1375{
1376	int ret;
1377	handle_t *handle;
1378	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1379	struct ocfs2_dinode *di;
1380
1381	trace_ocfs2_write_remove_suid(
1382			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1383			inode->i_mode);
1384
1385	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1386	if (IS_ERR(handle)) {
1387		ret = PTR_ERR(handle);
1388		mlog_errno(ret);
1389		goto out;
1390	}
1391
1392	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1393				      OCFS2_JOURNAL_ACCESS_WRITE);
1394	if (ret < 0) {
1395		mlog_errno(ret);
1396		goto out_trans;
1397	}
1398
1399	inode->i_mode &= ~S_ISUID;
1400	if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1401		inode->i_mode &= ~S_ISGID;
1402
1403	di = (struct ocfs2_dinode *) bh->b_data;
1404	di->i_mode = cpu_to_le16(inode->i_mode);
1405	ocfs2_update_inode_fsync_trans(handle, inode, 0);
1406
1407	ocfs2_journal_dirty(handle, bh);
1408
1409out_trans:
1410	ocfs2_commit_trans(osb, handle);
1411out:
1412	return ret;
1413}
1414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415static int ocfs2_write_remove_suid(struct inode *inode)
1416{
1417	int ret;
1418	struct buffer_head *bh = NULL;
1419
1420	ret = ocfs2_read_inode_block(inode, &bh);
1421	if (ret < 0) {
1422		mlog_errno(ret);
1423		goto out;
1424	}
1425
1426	ret =  __ocfs2_write_remove_suid(inode, bh);
1427out:
1428	brelse(bh);
1429	return ret;
1430}
1431
1432/*
1433 * Allocate enough extents to cover the region starting at byte offset
1434 * start for len bytes. Existing extents are skipped, any extents
1435 * added are marked as "unwritten".
1436 */
1437static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1438					    u64 start, u64 len)
1439{
1440	int ret;
1441	u32 cpos, phys_cpos, clusters, alloc_size;
1442	u64 end = start + len;
1443	struct buffer_head *di_bh = NULL;
1444
1445	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1446		ret = ocfs2_read_inode_block(inode, &di_bh);
1447		if (ret) {
1448			mlog_errno(ret);
1449			goto out;
1450		}
1451
1452		/*
1453		 * Nothing to do if the requested reservation range
1454		 * fits within the inode.
1455		 */
1456		if (ocfs2_size_fits_inline_data(di_bh, end))
1457			goto out;
1458
1459		ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1460		if (ret) {
1461			mlog_errno(ret);
1462			goto out;
1463		}
1464	}
1465
1466	/*
1467	 * We consider both start and len to be inclusive.
1468	 */
1469	cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1470	clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1471	clusters -= cpos;
1472
1473	while (clusters) {
1474		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1475					 &alloc_size, NULL);
1476		if (ret) {
1477			mlog_errno(ret);
1478			goto out;
1479		}
1480
1481		/*
1482		 * Hole or existing extent len can be arbitrary, so
1483		 * cap it to our own allocation request.
1484		 */
1485		if (alloc_size > clusters)
1486			alloc_size = clusters;
1487
1488		if (phys_cpos) {
1489			/*
1490			 * We already have an allocation at this
1491			 * region so we can safely skip it.
1492			 */
1493			goto next;
1494		}
1495
1496		ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1497		if (ret) {
1498			if (ret != -ENOSPC)
1499				mlog_errno(ret);
1500			goto out;
1501		}
1502
1503next:
1504		cpos += alloc_size;
1505		clusters -= alloc_size;
1506	}
1507
1508	ret = 0;
1509out:
1510
1511	brelse(di_bh);
1512	return ret;
1513}
1514
1515/*
1516 * Truncate a byte range, avoiding pages within partial clusters. This
1517 * preserves those pages for the zeroing code to write to.
1518 */
1519static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1520					 u64 byte_len)
1521{
1522	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1523	loff_t start, end;
1524	struct address_space *mapping = inode->i_mapping;
1525
1526	start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1527	end = byte_start + byte_len;
1528	end = end & ~(osb->s_clustersize - 1);
1529
1530	if (start < end) {
1531		unmap_mapping_range(mapping, start, end - start, 0);
1532		truncate_inode_pages_range(mapping, start, end - 1);
1533	}
1534}
1535
1536/*
1537 * zero out partial blocks of one cluster.
1538 *
1539 * start: file offset where zero starts, will be made upper block aligned.
1540 * len: it will be trimmed to the end of current cluster if "start + len"
1541 *      is bigger than it.
1542 */
1543static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1544					u64 start, u64 len)
1545{
1546	int ret;
1547	u64 start_block, end_block, nr_blocks;
1548	u64 p_block, offset;
1549	u32 cluster, p_cluster, nr_clusters;
1550	struct super_block *sb = inode->i_sb;
1551	u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1552
1553	if (start + len < end)
1554		end = start + len;
1555
1556	start_block = ocfs2_blocks_for_bytes(sb, start);
1557	end_block = ocfs2_blocks_for_bytes(sb, end);
1558	nr_blocks = end_block - start_block;
1559	if (!nr_blocks)
1560		return 0;
1561
1562	cluster = ocfs2_bytes_to_clusters(sb, start);
1563	ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1564				&nr_clusters, NULL);
1565	if (ret)
1566		return ret;
1567	if (!p_cluster)
1568		return 0;
1569
1570	offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1571	p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1572	return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1573}
1574
1575static int ocfs2_zero_partial_clusters(struct inode *inode,
1576				       u64 start, u64 len)
1577{
1578	int ret = 0;
1579	u64 tmpend = 0;
1580	u64 end = start + len;
1581	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1582	unsigned int csize = osb->s_clustersize;
1583	handle_t *handle;
1584	loff_t isize = i_size_read(inode);
1585
1586	/*
1587	 * The "start" and "end" values are NOT necessarily part of
1588	 * the range whose allocation is being deleted. Rather, this
1589	 * is what the user passed in with the request. We must zero
1590	 * partial clusters here. There's no need to worry about
1591	 * physical allocation - the zeroing code knows to skip holes.
1592	 */
1593	trace_ocfs2_zero_partial_clusters(
1594		(unsigned long long)OCFS2_I(inode)->ip_blkno,
1595		(unsigned long long)start, (unsigned long long)end);
1596
1597	/*
1598	 * If both edges are on a cluster boundary then there's no
1599	 * zeroing required as the region is part of the allocation to
1600	 * be truncated.
1601	 */
1602	if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1603		goto out;
1604
1605	/* No page cache for EOF blocks, issue zero out to disk. */
1606	if (end > isize) {
1607		/*
1608		 * zeroout eof blocks in last cluster starting from
1609		 * "isize" even "start" > "isize" because it is
1610		 * complicated to zeroout just at "start" as "start"
1611		 * may be not aligned with block size, buffer write
1612		 * would be required to do that, but out of eof buffer
1613		 * write is not supported.
1614		 */
1615		ret = ocfs2_zeroout_partial_cluster(inode, isize,
1616					end - isize);
1617		if (ret) {
1618			mlog_errno(ret);
1619			goto out;
1620		}
1621		if (start >= isize)
1622			goto out;
1623		end = isize;
1624	}
1625	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1626	if (IS_ERR(handle)) {
1627		ret = PTR_ERR(handle);
1628		mlog_errno(ret);
1629		goto out;
1630	}
1631
1632	/*
1633	 * If start is on a cluster boundary and end is somewhere in another
1634	 * cluster, we have not COWed the cluster starting at start, unless
1635	 * end is also within the same cluster. So, in this case, we skip this
1636	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1637	 * to the next one.
1638	 */
1639	if ((start & (csize - 1)) != 0) {
1640		/*
1641		 * We want to get the byte offset of the end of the 1st
1642		 * cluster.
1643		 */
1644		tmpend = (u64)osb->s_clustersize +
1645			(start & ~(osb->s_clustersize - 1));
1646		if (tmpend > end)
1647			tmpend = end;
1648
1649		trace_ocfs2_zero_partial_clusters_range1(
1650			(unsigned long long)start,
1651			(unsigned long long)tmpend);
1652
1653		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1654						    tmpend);
1655		if (ret)
1656			mlog_errno(ret);
1657	}
 
1658
1659	if (tmpend < end) {
1660		/*
1661		 * This may make start and end equal, but the zeroing
1662		 * code will skip any work in that case so there's no
1663		 * need to catch it up here.
1664		 */
1665		start = end & ~(osb->s_clustersize - 1);
1666
1667		trace_ocfs2_zero_partial_clusters_range2(
1668			(unsigned long long)start, (unsigned long long)end);
1669
1670		ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1671		if (ret)
1672			mlog_errno(ret);
1673	}
1674	ocfs2_update_inode_fsync_trans(handle, inode, 1);
1675
1676	ocfs2_commit_trans(osb, handle);
1677out:
1678	return ret;
1679}
1680
1681static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1682{
1683	int i;
1684	struct ocfs2_extent_rec *rec = NULL;
1685
1686	for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1687
1688		rec = &el->l_recs[i];
1689
1690		if (le32_to_cpu(rec->e_cpos) < pos)
1691			break;
1692	}
1693
1694	return i;
1695}
1696
1697/*
1698 * Helper to calculate the punching pos and length in one run, we handle the
1699 * following three cases in order:
1700 *
1701 * - remove the entire record
1702 * - remove a partial record
1703 * - no record needs to be removed (hole-punching completed)
1704*/
1705static void ocfs2_calc_trunc_pos(struct inode *inode,
1706				 struct ocfs2_extent_list *el,
1707				 struct ocfs2_extent_rec *rec,
1708				 u32 trunc_start, u32 *trunc_cpos,
1709				 u32 *trunc_len, u32 *trunc_end,
1710				 u64 *blkno, int *done)
1711{
1712	int ret = 0;
1713	u32 coff, range;
1714
1715	range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1716
1717	if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1718		/*
1719		 * remove an entire extent record.
1720		 */
1721		*trunc_cpos = le32_to_cpu(rec->e_cpos);
1722		/*
1723		 * Skip holes if any.
1724		 */
1725		if (range < *trunc_end)
1726			*trunc_end = range;
1727		*trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1728		*blkno = le64_to_cpu(rec->e_blkno);
1729		*trunc_end = le32_to_cpu(rec->e_cpos);
1730	} else if (range > trunc_start) {
1731		/*
1732		 * remove a partial extent record, which means we're
1733		 * removing the last extent record.
1734		 */
1735		*trunc_cpos = trunc_start;
1736		/*
1737		 * skip hole if any.
1738		 */
1739		if (range < *trunc_end)
1740			*trunc_end = range;
1741		*trunc_len = *trunc_end - trunc_start;
1742		coff = trunc_start - le32_to_cpu(rec->e_cpos);
1743		*blkno = le64_to_cpu(rec->e_blkno) +
1744				ocfs2_clusters_to_blocks(inode->i_sb, coff);
1745		*trunc_end = trunc_start;
1746	} else {
1747		/*
1748		 * It may have two following possibilities:
1749		 *
1750		 * - last record has been removed
1751		 * - trunc_start was within a hole
1752		 *
1753		 * both two cases mean the completion of hole punching.
1754		 */
1755		ret = 1;
1756	}
1757
1758	*done = ret;
1759}
1760
1761int ocfs2_remove_inode_range(struct inode *inode,
1762			     struct buffer_head *di_bh, u64 byte_start,
1763			     u64 byte_len)
1764{
1765	int ret = 0, flags = 0, done = 0, i;
1766	u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1767	u32 cluster_in_el;
1768	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1769	struct ocfs2_cached_dealloc_ctxt dealloc;
1770	struct address_space *mapping = inode->i_mapping;
1771	struct ocfs2_extent_tree et;
1772	struct ocfs2_path *path = NULL;
1773	struct ocfs2_extent_list *el = NULL;
1774	struct ocfs2_extent_rec *rec = NULL;
1775	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1776	u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1777
1778	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1779	ocfs2_init_dealloc_ctxt(&dealloc);
1780
1781	trace_ocfs2_remove_inode_range(
1782			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1783			(unsigned long long)byte_start,
1784			(unsigned long long)byte_len);
1785
1786	if (byte_len == 0)
1787		return 0;
1788
1789	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1790		int id_count = ocfs2_max_inline_data_with_xattr(inode->i_sb, di);
1791
1792		if (byte_start > id_count || byte_start + byte_len > id_count) {
1793			ret = -EINVAL;
1794			mlog_errno(ret);
1795			goto out;
1796		}
1797
1798		ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1799					    byte_start + byte_len, 0);
1800		if (ret) {
1801			mlog_errno(ret);
1802			goto out;
1803		}
1804		/*
1805		 * There's no need to get fancy with the page cache
1806		 * truncate of an inline-data inode. We're talking
1807		 * about less than a page here, which will be cached
1808		 * in the dinode buffer anyway.
1809		 */
1810		unmap_mapping_range(mapping, 0, 0, 0);
1811		truncate_inode_pages(mapping, 0);
1812		goto out;
1813	}
1814
1815	/*
1816	 * For reflinks, we may need to CoW 2 clusters which might be
1817	 * partially zero'd later, if hole's start and end offset were
1818	 * within one cluster(means is not exactly aligned to clustersize).
1819	 */
1820
1821	if (ocfs2_is_refcount_inode(inode)) {
 
1822		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1823		if (ret) {
1824			mlog_errno(ret);
1825			goto out;
1826		}
1827
1828		ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1829		if (ret) {
1830			mlog_errno(ret);
1831			goto out;
1832		}
1833	}
1834
1835	trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1836	trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1837	cluster_in_el = trunc_end;
1838
1839	ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1840	if (ret) {
1841		mlog_errno(ret);
1842		goto out;
1843	}
1844
1845	path = ocfs2_new_path_from_et(&et);
1846	if (!path) {
1847		ret = -ENOMEM;
1848		mlog_errno(ret);
1849		goto out;
1850	}
1851
1852	while (trunc_end > trunc_start) {
1853
1854		ret = ocfs2_find_path(INODE_CACHE(inode), path,
1855				      cluster_in_el);
1856		if (ret) {
1857			mlog_errno(ret);
1858			goto out;
1859		}
1860
1861		el = path_leaf_el(path);
1862
1863		i = ocfs2_find_rec(el, trunc_end);
1864		/*
1865		 * Need to go to previous extent block.
1866		 */
1867		if (i < 0) {
1868			if (path->p_tree_depth == 0)
1869				break;
1870
1871			ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1872							    path,
1873							    &cluster_in_el);
1874			if (ret) {
1875				mlog_errno(ret);
1876				goto out;
1877			}
1878
1879			/*
1880			 * We've reached the leftmost extent block,
1881			 * it's safe to leave.
1882			 */
1883			if (cluster_in_el == 0)
1884				break;
1885
1886			/*
1887			 * The 'pos' searched for previous extent block is
1888			 * always one cluster less than actual trunc_end.
1889			 */
1890			trunc_end = cluster_in_el + 1;
1891
1892			ocfs2_reinit_path(path, 1);
1893
1894			continue;
1895
1896		} else
1897			rec = &el->l_recs[i];
1898
1899		ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1900				     &trunc_len, &trunc_end, &blkno, &done);
1901		if (done)
1902			break;
1903
1904		flags = rec->e_flags;
1905		phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1906
1907		ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1908					       phys_cpos, trunc_len, flags,
1909					       &dealloc, refcount_loc, false);
1910		if (ret < 0) {
1911			mlog_errno(ret);
1912			goto out;
1913		}
1914
1915		cluster_in_el = trunc_end;
1916
1917		ocfs2_reinit_path(path, 1);
1918	}
1919
1920	ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1921
1922out:
1923	ocfs2_free_path(path);
1924	ocfs2_schedule_truncate_log_flush(osb, 1);
1925	ocfs2_run_deallocs(osb, &dealloc);
1926
1927	return ret;
1928}
1929
1930/*
1931 * Parts of this function taken from xfs_change_file_space()
1932 */
1933static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1934				     loff_t f_pos, unsigned int cmd,
1935				     struct ocfs2_space_resv *sr,
1936				     int change_size)
1937{
1938	int ret;
1939	s64 llen;
1940	loff_t size, orig_isize;
1941	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1942	struct buffer_head *di_bh = NULL;
1943	handle_t *handle;
1944	unsigned long long max_off = inode->i_sb->s_maxbytes;
1945
1946	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1947		return -EROFS;
1948
1949	inode_lock(inode);
1950
1951	/* Wait all existing dio workers, newcomers will block on i_rwsem */
1952	inode_dio_wait(inode);
1953	/*
1954	 * This prevents concurrent writes on other nodes
1955	 */
1956	ret = ocfs2_rw_lock(inode, 1);
1957	if (ret) {
1958		mlog_errno(ret);
1959		goto out;
1960	}
1961
1962	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1963	if (ret) {
1964		mlog_errno(ret);
1965		goto out_rw_unlock;
1966	}
1967
1968	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1969		ret = -EPERM;
1970		goto out_inode_unlock;
1971	}
1972
1973	switch (sr->l_whence) {
1974	case 0: /*SEEK_SET*/
1975		break;
1976	case 1: /*SEEK_CUR*/
1977		sr->l_start += f_pos;
1978		break;
1979	case 2: /*SEEK_END*/
1980		sr->l_start += i_size_read(inode);
1981		break;
1982	default:
1983		ret = -EINVAL;
1984		goto out_inode_unlock;
1985	}
1986	sr->l_whence = 0;
1987
1988	llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1989
1990	if (sr->l_start < 0
1991	    || sr->l_start > max_off
1992	    || (sr->l_start + llen) < 0
1993	    || (sr->l_start + llen) > max_off) {
1994		ret = -EINVAL;
1995		goto out_inode_unlock;
1996	}
1997	size = sr->l_start + sr->l_len;
1998
1999	if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
2000	    cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
2001		if (sr->l_len <= 0) {
2002			ret = -EINVAL;
2003			goto out_inode_unlock;
2004		}
2005	}
2006
2007	if (file && setattr_should_drop_suidgid(&nop_mnt_idmap, file_inode(file))) {
2008		ret = __ocfs2_write_remove_suid(inode, di_bh);
2009		if (ret) {
2010			mlog_errno(ret);
2011			goto out_inode_unlock;
2012		}
2013	}
2014
2015	down_write(&OCFS2_I(inode)->ip_alloc_sem);
2016	switch (cmd) {
2017	case OCFS2_IOC_RESVSP:
2018	case OCFS2_IOC_RESVSP64:
2019		/*
2020		 * This takes unsigned offsets, but the signed ones we
2021		 * pass have been checked against overflow above.
2022		 */
2023		ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2024						       sr->l_len);
2025		break;
2026	case OCFS2_IOC_UNRESVSP:
2027	case OCFS2_IOC_UNRESVSP64:
2028		ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2029					       sr->l_len);
2030		break;
2031	default:
2032		ret = -EINVAL;
2033	}
2034
2035	orig_isize = i_size_read(inode);
2036	/* zeroout eof blocks in the cluster. */
2037	if (!ret && change_size && orig_isize < size) {
2038		ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2039					size - orig_isize);
2040		if (!ret)
2041			i_size_write(inode, size);
2042	}
2043	up_write(&OCFS2_I(inode)->ip_alloc_sem);
2044	if (ret) {
2045		mlog_errno(ret);
2046		goto out_inode_unlock;
2047	}
2048
2049	/*
2050	 * We update c/mtime for these changes
2051	 */
2052	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2053	if (IS_ERR(handle)) {
2054		ret = PTR_ERR(handle);
2055		mlog_errno(ret);
2056		goto out_inode_unlock;
2057	}
2058
2059	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 
 
 
2060	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2061	if (ret < 0)
2062		mlog_errno(ret);
2063
2064	if (file && (file->f_flags & O_SYNC))
2065		handle->h_sync = 1;
2066
2067	ocfs2_commit_trans(osb, handle);
2068
2069out_inode_unlock:
2070	brelse(di_bh);
2071	ocfs2_inode_unlock(inode, 1);
2072out_rw_unlock:
2073	ocfs2_rw_unlock(inode, 1);
2074
2075out:
2076	inode_unlock(inode);
2077	return ret;
2078}
2079
2080int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2081			    struct ocfs2_space_resv *sr)
2082{
2083	struct inode *inode = file_inode(file);
2084	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2085	int ret;
2086
2087	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2088	    !ocfs2_writes_unwritten_extents(osb))
2089		return -ENOTTY;
2090	else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2091		 !ocfs2_sparse_alloc(osb))
2092		return -ENOTTY;
2093
2094	if (!S_ISREG(inode->i_mode))
2095		return -EINVAL;
2096
2097	if (!(file->f_mode & FMODE_WRITE))
2098		return -EBADF;
2099
2100	ret = mnt_want_write_file(file);
2101	if (ret)
2102		return ret;
2103	ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2104	mnt_drop_write_file(file);
2105	return ret;
2106}
2107
2108static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2109			    loff_t len)
2110{
2111	struct inode *inode = file_inode(file);
2112	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2113	struct ocfs2_space_resv sr;
2114	int change_size = 1;
2115	int cmd = OCFS2_IOC_RESVSP64;
2116	int ret = 0;
2117
2118	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2119		return -EOPNOTSUPP;
2120	if (!ocfs2_writes_unwritten_extents(osb))
2121		return -EOPNOTSUPP;
2122
2123	if (mode & FALLOC_FL_KEEP_SIZE) {
2124		change_size = 0;
2125	} else {
2126		ret = inode_newsize_ok(inode, offset + len);
2127		if (ret)
2128			return ret;
2129	}
2130
2131	if (mode & FALLOC_FL_PUNCH_HOLE)
2132		cmd = OCFS2_IOC_UNRESVSP64;
2133
2134	sr.l_whence = 0;
2135	sr.l_start = (s64)offset;
2136	sr.l_len = (s64)len;
2137
2138	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2139					 change_size);
2140}
2141
2142int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2143				   size_t count)
2144{
2145	int ret = 0;
2146	unsigned int extent_flags;
2147	u32 cpos, clusters, extent_len, phys_cpos;
2148	struct super_block *sb = inode->i_sb;
2149
2150	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2151	    !ocfs2_is_refcount_inode(inode) ||
2152	    OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2153		return 0;
2154
2155	cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2156	clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2157
2158	while (clusters) {
2159		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2160					 &extent_flags);
2161		if (ret < 0) {
2162			mlog_errno(ret);
2163			goto out;
2164		}
2165
2166		if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2167			ret = 1;
2168			break;
2169		}
2170
2171		if (extent_len > clusters)
2172			extent_len = clusters;
2173
2174		clusters -= extent_len;
2175		cpos += extent_len;
2176	}
2177out:
2178	return ret;
2179}
2180
2181static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
 
 
 
2182{
2183	int blockmask = inode->i_sb->s_blocksize - 1;
2184	loff_t final_size = pos + count;
 
 
 
2185
2186	if ((pos & blockmask) || (final_size & blockmask))
2187		return 1;
2188	return 0;
2189}
2190
2191static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2192					    struct buffer_head **di_bh,
2193					    int meta_level,
2194					    int write_sem,
2195					    int wait)
2196{
2197	int ret = 0;
2198
2199	if (wait)
2200		ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2201	else
2202		ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2203	if (ret < 0)
2204		goto out;
2205
2206	if (wait) {
2207		if (write_sem)
2208			down_write(&OCFS2_I(inode)->ip_alloc_sem);
2209		else
2210			down_read(&OCFS2_I(inode)->ip_alloc_sem);
2211	} else {
2212		if (write_sem)
2213			ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2214		else
2215			ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2216
2217		if (!ret) {
2218			ret = -EAGAIN;
2219			goto out_unlock;
2220		}
2221	}
2222
2223	return ret;
2224
2225out_unlock:
2226	brelse(*di_bh);
2227	*di_bh = NULL;
2228	ocfs2_inode_unlock(inode, meta_level);
2229out:
 
2230	return ret;
2231}
2232
2233static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2234					       struct buffer_head **di_bh,
2235					       int meta_level,
2236					       int write_sem)
2237{
2238	if (write_sem)
2239		up_write(&OCFS2_I(inode)->ip_alloc_sem);
2240	else
2241		up_read(&OCFS2_I(inode)->ip_alloc_sem);
2242
2243	brelse(*di_bh);
2244	*di_bh = NULL;
2245
2246	if (meta_level >= 0)
2247		ocfs2_inode_unlock(inode, meta_level);
2248}
2249
2250static int ocfs2_prepare_inode_for_write(struct file *file,
2251					 loff_t pos, size_t count, int wait)
 
 
 
 
2252{
2253	int ret = 0, meta_level = 0, overwrite_io = 0;
2254	int write_sem = 0;
2255	struct dentry *dentry = file->f_path.dentry;
2256	struct inode *inode = d_inode(dentry);
2257	struct buffer_head *di_bh = NULL;
2258	u32 cpos;
2259	u32 clusters;
2260
2261	/*
2262	 * We start with a read level meta lock and only jump to an ex
2263	 * if we need to make modifications here.
2264	 */
2265	for(;;) {
2266		ret = ocfs2_inode_lock_for_extent_tree(inode,
2267						       &di_bh,
2268						       meta_level,
2269						       write_sem,
2270						       wait);
2271		if (ret < 0) {
2272			if (ret != -EAGAIN)
2273				mlog_errno(ret);
2274			goto out;
2275		}
2276
2277		/*
2278		 * Check if IO will overwrite allocated blocks in case
2279		 * IOCB_NOWAIT flag is set.
2280		 */
2281		if (!wait && !overwrite_io) {
2282			overwrite_io = 1;
2283
2284			ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2285			if (ret < 0) {
2286				if (ret != -EAGAIN)
2287					mlog_errno(ret);
2288				goto out_unlock;
2289			}
2290		}
2291
2292		/* Clear suid / sgid if necessary. We do this here
2293		 * instead of later in the write path because
2294		 * remove_suid() calls ->setattr without any hint that
2295		 * we may have already done our cluster locking. Since
2296		 * ocfs2_setattr() *must* take cluster locks to
2297		 * proceed, this will lead us to recursively lock the
2298		 * inode. There's also the dinode i_size state which
2299		 * can be lost via setattr during extending writes (we
2300		 * set inode->i_size at the end of a write. */
2301		if (setattr_should_drop_suidgid(&nop_mnt_idmap, inode)) {
2302			if (meta_level == 0) {
2303				ocfs2_inode_unlock_for_extent_tree(inode,
2304								   &di_bh,
2305								   meta_level,
2306								   write_sem);
2307				meta_level = 1;
2308				continue;
2309			}
2310
2311			ret = ocfs2_write_remove_suid(inode);
2312			if (ret < 0) {
2313				mlog_errno(ret);
2314				goto out_unlock;
2315			}
2316		}
2317
2318		ret = ocfs2_check_range_for_refcount(inode, pos, count);
 
 
 
 
 
 
 
 
 
2319		if (ret == 1) {
2320			ocfs2_inode_unlock_for_extent_tree(inode,
2321							   &di_bh,
2322							   meta_level,
2323							   write_sem);
2324			meta_level = 1;
2325			write_sem = 1;
2326			ret = ocfs2_inode_lock_for_extent_tree(inode,
2327							       &di_bh,
2328							       meta_level,
2329							       write_sem,
2330							       wait);
2331			if (ret < 0) {
2332				if (ret != -EAGAIN)
2333					mlog_errno(ret);
2334				goto out;
2335			}
2336
2337			cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2338			clusters =
2339				ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2340			ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
 
 
 
 
 
2341		}
2342
2343		if (ret < 0) {
2344			if (ret != -EAGAIN)
2345				mlog_errno(ret);
2346			goto out_unlock;
2347		}
2348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2349		break;
2350	}
2351
 
 
 
2352out_unlock:
2353	trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2354					    pos, count, wait);
 
2355
2356	ocfs2_inode_unlock_for_extent_tree(inode,
2357					   &di_bh,
2358					   meta_level,
2359					   write_sem);
2360
2361out:
2362	return ret;
2363}
2364
2365static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2366				    struct iov_iter *from)
 
 
2367{
2368	int rw_level;
 
2369	ssize_t written = 0;
2370	ssize_t ret;
2371	size_t count = iov_iter_count(from);
 
 
2372	struct file *file = iocb->ki_filp;
2373	struct inode *inode = file_inode(file);
2374	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2375	int full_coherency = !(osb->s_mount_opt &
2376			       OCFS2_MOUNT_COHERENCY_BUFFERED);
2377	void *saved_ki_complete = NULL;
2378	int append_write = ((iocb->ki_pos + count) >=
2379			i_size_read(inode) ? 1 : 0);
2380	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2381	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2382
2383	trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2384		(unsigned long long)OCFS2_I(inode)->ip_blkno,
2385		file->f_path.dentry->d_name.len,
2386		file->f_path.dentry->d_name.name,
2387		(unsigned int)from->nr_segs);	/* GRRRRR */
 
 
 
2388
2389	if (!direct_io && nowait)
2390		return -EOPNOTSUPP;
2391
2392	if (count == 0)
2393		return 0;
2394
2395	if (nowait) {
2396		if (!inode_trylock(inode))
2397			return -EAGAIN;
2398	} else
2399		inode_lock(inode);
2400
2401	ocfs2_iocb_init_rw_locked(iocb);
 
 
 
 
 
 
 
 
2402
2403	/*
2404	 * Concurrent O_DIRECT writes are allowed with
2405	 * mount_option "coherency=buffered".
2406	 * For append write, we must take rw EX.
2407	 */
2408	rw_level = (!direct_io || full_coherency || append_write);
2409
2410	if (nowait)
2411		ret = ocfs2_try_rw_lock(inode, rw_level);
2412	else
2413		ret = ocfs2_rw_lock(inode, rw_level);
2414	if (ret < 0) {
2415		if (ret != -EAGAIN)
2416			mlog_errno(ret);
2417		goto out_mutex;
2418	}
2419
2420	/*
2421	 * O_DIRECT writes with "coherency=full" need to take EX cluster
2422	 * inode_lock to guarantee coherency.
2423	 */
2424	if (direct_io && full_coherency) {
2425		/*
2426		 * We need to take and drop the inode lock to force
2427		 * other nodes to drop their caches.  Buffered I/O
2428		 * already does this in write_begin().
2429		 */
2430		if (nowait)
2431			ret = ocfs2_try_inode_lock(inode, NULL, 1);
2432		else
2433			ret = ocfs2_inode_lock(inode, NULL, 1);
2434		if (ret < 0) {
2435			if (ret != -EAGAIN)
2436				mlog_errno(ret);
2437			goto out;
2438		}
2439
2440		ocfs2_inode_unlock(inode, 1);
2441	}
2442
2443	ret = generic_write_checks(iocb, from);
2444	if (ret <= 0) {
2445		if (ret)
2446			mlog_errno(ret);
 
 
2447		goto out;
2448	}
2449	count = ret;
2450
2451	ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2452	if (ret < 0) {
2453		if (ret != -EAGAIN)
2454			mlog_errno(ret);
2455		goto out;
 
 
 
 
 
 
 
2456	}
2457
2458	if (direct_io && !is_sync_kiocb(iocb) &&
2459	    ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2460		/*
2461		 * Make it a sync io if it's an unaligned aio.
2462		 */
2463		saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2464	}
2465
2466	/* communicate with ocfs2_dio_end_io */
2467	ocfs2_iocb_set_rw_locked(iocb, rw_level);
2468
2469	written = __generic_file_write_iter(iocb, from);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2470	/* buffered aio wouldn't have proper lock coverage today */
2471	BUG_ON(written == -EIOCBQUEUED && !direct_io);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2472
2473	/*
2474	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2475	 * function pointer which is called when o_direct io completes so that
2476	 * it can unlock our rw lock.
2477	 * Unfortunately there are error cases which call end_io and others
2478	 * that don't.  so we don't have to unlock the rw_lock if either an
2479	 * async dio is going to do it in the future or an end_io after an
2480	 * error has already done it.
2481	 */
2482	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2483		rw_level = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2484	}
2485
2486	if (unlikely(written <= 0))
2487		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2488
2489	if (((file->f_flags & O_DSYNC) && !direct_io) ||
2490	    IS_SYNC(inode)) {
2491		ret = filemap_fdatawrite_range(file->f_mapping,
2492					       iocb->ki_pos - written,
2493					       iocb->ki_pos - 1);
2494		if (ret < 0)
2495			written = ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2496
2497		if (!ret) {
2498			ret = jbd2_journal_force_commit(osb->journal->j_journal);
2499			if (ret < 0)
2500				written = ret;
2501		}
2502
2503		if (!ret)
2504			ret = filemap_fdatawait_range(file->f_mapping,
2505						      iocb->ki_pos - written,
2506						      iocb->ki_pos - 1);
2507	}
2508
2509out:
2510	if (saved_ki_complete)
2511		xchg(&iocb->ki_complete, saved_ki_complete);
 
 
 
 
 
 
 
 
 
 
 
 
 
2512
2513	if (rw_level != -1)
2514		ocfs2_rw_unlock(inode, rw_level);
 
 
 
 
 
 
 
2515
2516out_mutex:
2517	inode_unlock(inode);
2518
2519	if (written)
2520		ret = written;
2521	return ret;
2522}
2523
2524static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2525				   struct iov_iter *to)
 
 
2526{
2527	int ret = 0, rw_level = -1, lock_level = 0;
2528	struct file *filp = iocb->ki_filp;
2529	struct inode *inode = file_inode(filp);
2530	int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2531	int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2532
2533	trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2534			(unsigned long long)OCFS2_I(inode)->ip_blkno,
2535			filp->f_path.dentry->d_name.len,
2536			filp->f_path.dentry->d_name.name,
2537			to->nr_segs);	/* GRRRRR */
2538
2539
2540	if (!inode) {
2541		ret = -EINVAL;
2542		mlog_errno(ret);
2543		goto bail;
2544	}
2545
2546	if (!direct_io && nowait)
2547		return -EOPNOTSUPP;
2548
2549	ocfs2_iocb_init_rw_locked(iocb);
2550
2551	/*
2552	 * buffered reads protect themselves in ->read_folio().  O_DIRECT reads
2553	 * need locks to protect pending reads from racing with truncate.
2554	 */
2555	if (direct_io) {
2556		if (nowait)
2557			ret = ocfs2_try_rw_lock(inode, 0);
2558		else
2559			ret = ocfs2_rw_lock(inode, 0);
2560
 
2561		if (ret < 0) {
2562			if (ret != -EAGAIN)
2563				mlog_errno(ret);
2564			goto bail;
2565		}
2566		rw_level = 0;
2567		/* communicate with ocfs2_dio_end_io */
2568		ocfs2_iocb_set_rw_locked(iocb, rw_level);
2569	}
2570
2571	/*
2572	 * We're fine letting folks race truncates and extending
2573	 * writes with read across the cluster, just like they can
2574	 * locally. Hence no rw_lock during read.
2575	 *
2576	 * Take and drop the meta data lock to update inode fields
2577	 * like i_size. This allows the checks down below
2578	 * copy_splice_read() a chance of actually working.
2579	 */
2580	ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2581				     !nowait);
2582	if (ret < 0) {
2583		if (ret != -EAGAIN)
2584			mlog_errno(ret);
2585		goto bail;
2586	}
2587	ocfs2_inode_unlock(inode, lock_level);
2588
2589	ret = generic_file_read_iter(iocb, to);
2590	trace_generic_file_read_iter_ret(ret);
2591
2592	/* buffered aio wouldn't have proper lock coverage today */
2593	BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2594
2595	/* see ocfs2_file_write_iter */
2596	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2597		rw_level = -1;
 
2598	}
2599
2600bail:
 
 
 
2601	if (rw_level != -1)
2602		ocfs2_rw_unlock(inode, rw_level);
2603
2604	return ret;
2605}
2606
2607static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos,
2608				      struct pipe_inode_info *pipe,
2609				      size_t len, unsigned int flags)
2610{
2611	struct inode *inode = file_inode(in);
2612	ssize_t ret = 0;
2613	int lock_level = 0;
2614
2615	trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2616				     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2617				     in->f_path.dentry->d_name.len,
2618				     in->f_path.dentry->d_name.name,
2619				     flags);
2620
2621	/*
2622	 * We're fine letting folks race truncates and extending writes with
2623	 * read across the cluster, just like they can locally.  Hence no
2624	 * rw_lock during read.
2625	 *
2626	 * Take and drop the meta data lock to update inode fields like i_size.
2627	 * This allows the checks down below filemap_splice_read() a chance of
2628	 * actually working.
2629	 */
2630	ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level, 1);
2631	if (ret < 0) {
2632		if (ret != -EAGAIN)
2633			mlog_errno(ret);
2634		goto bail;
2635	}
2636	ocfs2_inode_unlock(inode, lock_level);
2637
2638	ret = filemap_splice_read(in, ppos, pipe, len, flags);
2639	trace_filemap_splice_read_ret(ret);
2640bail:
2641	return ret;
2642}
2643
2644/* Refer generic_file_llseek_unlocked() */
2645static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2646{
2647	struct inode *inode = file->f_mapping->host;
2648	int ret = 0;
2649
2650	inode_lock(inode);
2651
2652	switch (whence) {
2653	case SEEK_SET:
2654		break;
2655	case SEEK_END:
2656		/* SEEK_END requires the OCFS2 inode lock for the file
2657		 * because it references the file's size.
2658		 */
2659		ret = ocfs2_inode_lock(inode, NULL, 0);
2660		if (ret < 0) {
2661			mlog_errno(ret);
2662			goto out;
2663		}
2664		offset += i_size_read(inode);
2665		ocfs2_inode_unlock(inode, 0);
2666		break;
2667	case SEEK_CUR:
2668		if (offset == 0) {
2669			offset = file->f_pos;
2670			goto out;
2671		}
2672		offset += file->f_pos;
2673		break;
2674	case SEEK_DATA:
2675	case SEEK_HOLE:
2676		ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2677		if (ret)
2678			goto out;
2679		break;
2680	default:
2681		ret = -EINVAL;
2682		goto out;
2683	}
2684
2685	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2686
2687out:
2688	inode_unlock(inode);
2689	if (ret)
2690		return ret;
2691	return offset;
2692}
2693
2694static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2695				     struct file *file_out, loff_t pos_out,
2696				     loff_t len, unsigned int remap_flags)
2697{
2698	struct inode *inode_in = file_inode(file_in);
2699	struct inode *inode_out = file_inode(file_out);
2700	struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2701	struct buffer_head *in_bh = NULL, *out_bh = NULL;
2702	bool same_inode = (inode_in == inode_out);
2703	loff_t remapped = 0;
2704	ssize_t ret;
2705
2706	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2707		return -EINVAL;
2708	if (!ocfs2_refcount_tree(osb))
2709		return -EOPNOTSUPP;
2710	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
2711		return -EROFS;
2712
2713	/* Lock both files against IO */
2714	ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2715	if (ret)
2716		return ret;
2717
2718	/* Check file eligibility and prepare for block sharing. */
2719	ret = -EINVAL;
2720	if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2721	    (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2722		goto out_unlock;
2723
2724	ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2725			&len, remap_flags);
2726	if (ret < 0 || len == 0)
2727		goto out_unlock;
2728
2729	/* Lock out changes to the allocation maps and remap. */
2730	down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2731	if (!same_inode)
2732		down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2733				  SINGLE_DEPTH_NESTING);
2734
2735	/* Zap any page cache for the destination file's range. */
2736	truncate_inode_pages_range(&inode_out->i_data,
2737				   round_down(pos_out, PAGE_SIZE),
2738				   round_up(pos_out + len, PAGE_SIZE) - 1);
2739
2740	remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2741			inode_out, out_bh, pos_out, len);
2742	up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2743	if (!same_inode)
2744		up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2745	if (remapped < 0) {
2746		ret = remapped;
2747		mlog_errno(ret);
2748		goto out_unlock;
2749	}
2750
2751	/*
2752	 * Empty the extent map so that we may get the right extent
2753	 * record from the disk.
2754	 */
2755	ocfs2_extent_map_trunc(inode_in, 0);
2756	ocfs2_extent_map_trunc(inode_out, 0);
2757
2758	ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2759	if (ret) {
2760		mlog_errno(ret);
2761		goto out_unlock;
2762	}
2763
2764out_unlock:
2765	ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2766	return remapped > 0 ? remapped : ret;
2767}
2768
2769static loff_t ocfs2_dir_llseek(struct file *file, loff_t offset, int whence)
2770{
2771	struct ocfs2_file_private *fp = file->private_data;
2772
2773	return generic_llseek_cookie(file, offset, whence, &fp->cookie);
2774}
2775
2776const struct inode_operations ocfs2_file_iops = {
2777	.setattr	= ocfs2_setattr,
2778	.getattr	= ocfs2_getattr,
2779	.permission	= ocfs2_permission,
 
 
2780	.listxattr	= ocfs2_listxattr,
 
2781	.fiemap		= ocfs2_fiemap,
2782	.get_inode_acl	= ocfs2_iop_get_acl,
2783	.set_acl	= ocfs2_iop_set_acl,
2784	.fileattr_get	= ocfs2_fileattr_get,
2785	.fileattr_set	= ocfs2_fileattr_set,
2786};
2787
2788const struct inode_operations ocfs2_special_file_iops = {
2789	.setattr	= ocfs2_setattr,
2790	.getattr	= ocfs2_getattr,
2791	.listxattr	= ocfs2_listxattr,
2792	.permission	= ocfs2_permission,
2793	.get_inode_acl	= ocfs2_iop_get_acl,
2794	.set_acl	= ocfs2_iop_set_acl,
2795};
2796
2797/*
2798 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2799 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2800 */
2801const struct file_operations ocfs2_fops = {
2802	.llseek		= ocfs2_file_llseek,
 
 
2803	.mmap		= ocfs2_mmap,
2804	.fsync		= ocfs2_sync_file,
2805	.release	= ocfs2_file_release,
2806	.open		= ocfs2_file_open,
2807	.read_iter	= ocfs2_file_read_iter,
2808	.write_iter	= ocfs2_file_write_iter,
2809	.unlocked_ioctl	= ocfs2_ioctl,
2810#ifdef CONFIG_COMPAT
2811	.compat_ioctl   = ocfs2_compat_ioctl,
2812#endif
2813	.lock		= ocfs2_lock,
2814	.flock		= ocfs2_flock,
2815	.splice_read	= ocfs2_file_splice_read,
2816	.splice_write	= iter_file_splice_write,
2817	.fallocate	= ocfs2_fallocate,
2818	.remap_file_range = ocfs2_remap_file_range,
2819	.fop_flags	= FOP_ASYNC_LOCK,
2820};
2821
2822WRAP_DIR_ITER(ocfs2_readdir) // FIXME!
2823const struct file_operations ocfs2_dops = {
2824	.llseek		= ocfs2_dir_llseek,
2825	.read		= generic_read_dir,
2826	.iterate_shared	= shared_ocfs2_readdir,
2827	.fsync		= ocfs2_sync_file,
2828	.release	= ocfs2_dir_release,
2829	.open		= ocfs2_dir_open,
2830	.unlocked_ioctl	= ocfs2_ioctl,
2831#ifdef CONFIG_COMPAT
2832	.compat_ioctl   = ocfs2_compat_ioctl,
2833#endif
2834	.lock		= ocfs2_lock,
2835	.flock		= ocfs2_flock,
2836	.fop_flags	= FOP_ASYNC_LOCK,
2837};
2838
2839/*
2840 * POSIX-lockless variants of our file_operations.
2841 *
2842 * These will be used if the underlying cluster stack does not support
2843 * posix file locking, if the user passes the "localflocks" mount
2844 * option, or if we have a local-only fs.
2845 *
2846 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2847 * so we still want it in the case of no stack support for
2848 * plocks. Internally, it will do the right thing when asked to ignore
2849 * the cluster.
2850 */
2851const struct file_operations ocfs2_fops_no_plocks = {
2852	.llseek		= ocfs2_file_llseek,
 
 
2853	.mmap		= ocfs2_mmap,
2854	.fsync		= ocfs2_sync_file,
2855	.release	= ocfs2_file_release,
2856	.open		= ocfs2_file_open,
2857	.read_iter	= ocfs2_file_read_iter,
2858	.write_iter	= ocfs2_file_write_iter,
2859	.unlocked_ioctl	= ocfs2_ioctl,
2860#ifdef CONFIG_COMPAT
2861	.compat_ioctl   = ocfs2_compat_ioctl,
2862#endif
2863	.flock		= ocfs2_flock,
2864	.splice_read	= filemap_splice_read,
2865	.splice_write	= iter_file_splice_write,
2866	.fallocate	= ocfs2_fallocate,
2867	.remap_file_range = ocfs2_remap_file_range,
2868};
2869
2870const struct file_operations ocfs2_dops_no_plocks = {
2871	.llseek		= ocfs2_dir_llseek,
2872	.read		= generic_read_dir,
2873	.iterate_shared	= shared_ocfs2_readdir,
2874	.fsync		= ocfs2_sync_file,
2875	.release	= ocfs2_dir_release,
2876	.open		= ocfs2_dir_open,
2877	.unlocked_ioctl	= ocfs2_ioctl,
2878#ifdef CONFIG_COMPAT
2879	.compat_ioctl   = ocfs2_compat_ioctl,
2880#endif
2881	.flock		= ocfs2_flock,
2882};