Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_da_format.h"
  26#include "xfs_da_btree.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_bmap.h"
  31#include "xfs_bmap_util.h"
  32#include "xfs_error.h"
  33#include "xfs_dir2.h"
  34#include "xfs_dir2_priv.h"
  35#include "xfs_ioctl.h"
  36#include "xfs_trace.h"
  37#include "xfs_log.h"
  38#include "xfs_icache.h"
  39#include "xfs_pnfs.h"
  40#include "xfs_iomap.h"
  41#include "xfs_reflink.h"
 
  42
  43#include <linux/dcache.h>
  44#include <linux/falloc.h>
  45#include <linux/pagevec.h>
  46#include <linux/backing-dev.h>
  47#include <linux/mman.h>
 
 
  48
  49static const struct vm_operations_struct xfs_file_vm_ops;
  50
  51int
  52xfs_update_prealloc_flags(
 
 
 
 
  53	struct xfs_inode	*ip,
  54	enum xfs_prealloc_flags	flags)
 
  55{
  56	struct xfs_trans	*tp;
  57	int			error;
  58
  59	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
  60			0, 0, 0, &tp);
  61	if (error)
  62		return error;
  63
  64	xfs_ilock(ip, XFS_ILOCK_EXCL);
  65	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
  66
  67	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  68		VFS_I(ip)->i_mode &= ~S_ISUID;
  69		if (VFS_I(ip)->i_mode & S_IXGRP)
  70			VFS_I(ip)->i_mode &= ~S_ISGID;
  71		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  72	}
  73
  74	if (flags & XFS_PREALLOC_SET)
  75		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  76	if (flags & XFS_PREALLOC_CLEAR)
  77		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  78
  79	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  80	if (flags & XFS_PREALLOC_SYNC)
  81		xfs_trans_set_sync(tp);
  82	return xfs_trans_commit(tp);
  83}
  84
  85/*
  86 * Fsync operations on directories are much simpler than on regular files,
  87 * as there is no file data to flush, and thus also no need for explicit
  88 * cache flush operations, and there are no non-transaction metadata updates
  89 * on directories either.
  90 */
  91STATIC int
  92xfs_dir_fsync(
  93	struct file		*file,
  94	loff_t			start,
  95	loff_t			end,
  96	int			datasync)
  97{
  98	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
  99	struct xfs_mount	*mp = ip->i_mount;
 100	xfs_lsn_t		lsn = 0;
 101
 102	trace_xfs_dir_fsync(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 103
 104	xfs_ilock(ip, XFS_ILOCK_SHARED);
 105	if (xfs_ipincount(ip))
 106		lsn = ip->i_itemp->ili_last_lsn;
 107	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 108
 109	if (!lsn)
 110		return 0;
 111	return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 
 
 
 112}
 113
 114STATIC int
 115xfs_file_fsync(
 116	struct file		*file,
 117	loff_t			start,
 118	loff_t			end,
 119	int			datasync)
 120{
 121	struct inode		*inode = file->f_mapping->host;
 122	struct xfs_inode	*ip = XFS_I(inode);
 123	struct xfs_mount	*mp = ip->i_mount;
 124	int			error = 0;
 125	int			log_flushed = 0;
 126	xfs_lsn_t		lsn = 0;
 127
 128	trace_xfs_file_fsync(ip);
 129
 130	error = file_write_and_wait_range(file, start, end);
 131	if (error)
 132		return error;
 133
 134	if (XFS_FORCED_SHUTDOWN(mp))
 135		return -EIO;
 136
 137	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 138
 139	/*
 140	 * If we have an RT and/or log subvolume we need to make sure to flush
 141	 * the write cache the device used for file data first.  This is to
 142	 * ensure newly written file data make it to disk before logging the new
 143	 * inode size in case of an extending write.
 144	 */
 145	if (XFS_IS_REALTIME_INODE(ip))
 146		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
 147	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 148		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 149
 150	/*
 151	 * All metadata updates are logged, which means that we just have to
 152	 * flush the log up to the latest LSN that touched the inode. If we have
 153	 * concurrent fsync/fdatasync() calls, we need them to all block on the
 154	 * log force before we clear the ili_fsync_fields field. This ensures
 155	 * that we don't get a racing sync operation that does not wait for the
 156	 * metadata to hit the journal before returning. If we race with
 157	 * clearing the ili_fsync_fields, then all that will happen is the log
 158	 * force will do nothing as the lsn will already be on disk. We can't
 159	 * race with setting ili_fsync_fields because that is done under
 160	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
 161	 * until after the ili_fsync_fields is cleared.
 162	 */
 163	xfs_ilock(ip, XFS_ILOCK_SHARED);
 164	if (xfs_ipincount(ip)) {
 165		if (!datasync ||
 166		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
 167			lsn = ip->i_itemp->ili_last_lsn;
 168	}
 169
 170	if (lsn) {
 171		error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
 172		ip->i_itemp->ili_fsync_fields = 0;
 173	}
 174	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 175
 176	/*
 177	 * If we only have a single device, and the log force about was
 178	 * a no-op we might have to flush the data device cache here.
 179	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 180	 * an already allocated file and thus do not have any metadata to
 181	 * commit.
 182	 */
 183	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 184	    mp->m_logdev_targp == mp->m_ddev_targp)
 185		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
 
 
 186
 187	return error;
 188}
 189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190STATIC ssize_t
 191xfs_file_dio_aio_read(
 192	struct kiocb		*iocb,
 193	struct iov_iter		*to)
 194{
 195	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 196	size_t			count = iov_iter_count(to);
 197	ssize_t			ret;
 198
 199	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
 200
 201	if (!count)
 202		return 0; /* skip atime */
 203
 204	file_accessed(iocb->ki_filp);
 205
 206	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 207	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
 
 
 208	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 209
 210	return ret;
 211}
 212
 213static noinline ssize_t
 214xfs_file_dax_read(
 215	struct kiocb		*iocb,
 216	struct iov_iter		*to)
 217{
 218	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 219	size_t			count = iov_iter_count(to);
 220	ssize_t			ret = 0;
 221
 222	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
 223
 224	if (!count)
 225		return 0; /* skip atime */
 226
 227	if (iocb->ki_flags & IOCB_NOWAIT) {
 228		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
 229			return -EAGAIN;
 230	} else {
 231		xfs_ilock(ip, XFS_IOLOCK_SHARED);
 232	}
 233
 234	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
 235	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 236
 237	file_accessed(iocb->ki_filp);
 238	return ret;
 239}
 240
 241STATIC ssize_t
 242xfs_file_buffered_aio_read(
 243	struct kiocb		*iocb,
 244	struct iov_iter		*to)
 245{
 246	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 247	ssize_t			ret;
 248
 249	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 250
 251	if (iocb->ki_flags & IOCB_NOWAIT) {
 252		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
 253			return -EAGAIN;
 254	} else {
 255		xfs_ilock(ip, XFS_IOLOCK_SHARED);
 256	}
 257	ret = generic_file_read_iter(iocb, to);
 258	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 259
 260	return ret;
 261}
 262
 263STATIC ssize_t
 264xfs_file_read_iter(
 265	struct kiocb		*iocb,
 266	struct iov_iter		*to)
 267{
 268	struct inode		*inode = file_inode(iocb->ki_filp);
 269	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 270	ssize_t			ret = 0;
 271
 272	XFS_STATS_INC(mp, xs_read_calls);
 273
 274	if (XFS_FORCED_SHUTDOWN(mp))
 275		return -EIO;
 276
 277	if (IS_DAX(inode))
 278		ret = xfs_file_dax_read(iocb, to);
 279	else if (iocb->ki_flags & IOCB_DIRECT)
 280		ret = xfs_file_dio_aio_read(iocb, to);
 281	else
 282		ret = xfs_file_buffered_aio_read(iocb, to);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283
 
 
 
 
 
 
 
 
 
 
 284	if (ret > 0)
 285		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 286	return ret;
 287}
 288
 289/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290 * Common pre-write limit and setup checks.
 291 *
 292 * Called with the iolocked held either shared and exclusive according to
 293 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 294 * if called for a direct write beyond i_size.
 295 */
 296STATIC ssize_t
 297xfs_file_aio_write_checks(
 298	struct kiocb		*iocb,
 299	struct iov_iter		*from,
 300	int			*iolock)
 301{
 302	struct file		*file = iocb->ki_filp;
 303	struct inode		*inode = file->f_mapping->host;
 304	struct xfs_inode	*ip = XFS_I(inode);
 305	ssize_t			error = 0;
 306	size_t			count = iov_iter_count(from);
 307	bool			drained_dio = false;
 308	loff_t			isize;
 309
 310restart:
 311	error = generic_write_checks(iocb, from);
 312	if (error <= 0)
 313		return error;
 314
 315	error = xfs_break_layouts(inode, iolock);
 
 
 
 
 
 
 
 316	if (error)
 317		return error;
 318
 319	/*
 320	 * For changing security info in file_remove_privs() we need i_rwsem
 321	 * exclusively.
 322	 */
 323	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 324		xfs_iunlock(ip, *iolock);
 325		*iolock = XFS_IOLOCK_EXCL;
 326		xfs_ilock(ip, *iolock);
 
 
 
 
 327		goto restart;
 328	}
 
 329	/*
 330	 * If the offset is beyond the size of the file, we need to zero any
 331	 * blocks that fall between the existing EOF and the start of this
 332	 * write.  If zeroing is needed and we are currently holding the
 333	 * iolock shared, we need to update it to exclusive which implies
 334	 * having to redo all checks before.
 335	 *
 336	 * We need to serialise against EOF updates that occur in IO
 337	 * completions here. We want to make sure that nobody is changing the
 338	 * size while we do this check until we have placed an IO barrier (i.e.
 339	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
 340	 * The spinlock effectively forms a memory barrier once we have the
 341	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
 342	 * and hence be able to correctly determine if we need to run zeroing.
 343	 */
 344	spin_lock(&ip->i_flags_lock);
 345	isize = i_size_read(inode);
 346	if (iocb->ki_pos > isize) {
 347		spin_unlock(&ip->i_flags_lock);
 348		if (!drained_dio) {
 349			if (*iolock == XFS_IOLOCK_SHARED) {
 350				xfs_iunlock(ip, *iolock);
 351				*iolock = XFS_IOLOCK_EXCL;
 352				xfs_ilock(ip, *iolock);
 353				iov_iter_reexpand(from, count);
 354			}
 355			/*
 356			 * We now have an IO submission barrier in place, but
 357			 * AIO can do EOF updates during IO completion and hence
 358			 * we now need to wait for all of them to drain. Non-AIO
 359			 * DIO will have drained before we are given the
 360			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 361			 * no-op.
 362			 */
 363			inode_dio_wait(inode);
 364			drained_dio = true;
 365			goto restart;
 366		}
 367	
 368		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
 369		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
 370				NULL, &xfs_iomap_ops);
 371		if (error)
 372			return error;
 373	} else
 374		spin_unlock(&ip->i_flags_lock);
 375
 376	/*
 377	 * Updating the timestamps will grab the ilock again from
 378	 * xfs_fs_dirty_inode, so we have to call it after dropping the
 379	 * lock above.  Eventually we should look into a way to avoid
 380	 * the pointless lock roundtrip.
 381	 */
 382	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
 383		error = file_update_time(file);
 384		if (error)
 385			return error;
 386	}
 387
 388	/*
 389	 * If we're writing the file then make sure to clear the setuid and
 390	 * setgid bits if the process is not being run by root.  This keeps
 391	 * people from modifying setuid and setgid binaries.
 392	 */
 393	if (!IS_NOSEC(inode))
 394		return file_remove_privs(file);
 395	return 0;
 396}
 397
 398static int
 399xfs_dio_write_end_io(
 400	struct kiocb		*iocb,
 401	ssize_t			size,
 
 402	unsigned		flags)
 403{
 404	struct inode		*inode = file_inode(iocb->ki_filp);
 405	struct xfs_inode	*ip = XFS_I(inode);
 406	loff_t			offset = iocb->ki_pos;
 407	int			error = 0;
 408
 409	trace_xfs_end_io_direct_write(ip, offset, size);
 410
 411	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 412		return -EIO;
 413
 414	if (size <= 0)
 415		return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416
 417	if (flags & IOMAP_DIO_COW) {
 418		error = xfs_reflink_end_cow(ip, offset, size);
 419		if (error)
 420			return error;
 421	}
 422
 423	/*
 424	 * Unwritten conversion updates the in-core isize after extent
 425	 * conversion but before updating the on-disk size. Updating isize any
 426	 * earlier allows a racing dio read to find unwritten extents before
 427	 * they are converted.
 428	 */
 429	if (flags & IOMAP_DIO_UNWRITTEN)
 430		return xfs_iomap_write_unwritten(ip, offset, size, true);
 
 
 431
 432	/*
 433	 * We need to update the in-core inode size here so that we don't end up
 434	 * with the on-disk inode size being outside the in-core inode size. We
 435	 * have no other method of updating EOF for AIO, so always do it here
 436	 * if necessary.
 437	 *
 438	 * We need to lock the test/set EOF update as we can be racing with
 439	 * other IO completions here to update the EOF. Failing to serialise
 440	 * here can result in EOF moving backwards and Bad Things Happen when
 441	 * that occurs.
 
 
 
 
 
 
 
 442	 */
 
 
 
 443	spin_lock(&ip->i_flags_lock);
 444	if (offset + size > i_size_read(inode)) {
 445		i_size_write(inode, offset + size);
 446		spin_unlock(&ip->i_flags_lock);
 447		error = xfs_setfilesize(ip, offset, size);
 448	} else {
 449		spin_unlock(&ip->i_flags_lock);
 450	}
 451
 
 
 452	return error;
 453}
 454
 
 
 
 
 455/*
 456 * xfs_file_dio_aio_write - handle direct IO writes
 457 *
 458 * Lock the inode appropriately to prepare for and issue a direct IO write.
 459 * By separating it from the buffered write path we remove all the tricky to
 460 * follow locking changes and looping.
 461 *
 462 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 463 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 464 * pages are flushed out.
 465 *
 466 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 467 * allowing them to be done in parallel with reads and other direct IO writes.
 468 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 469 * needs to do sub-block zeroing and that requires serialisation against other
 470 * direct IOs to the same block. In this case we need to serialise the
 471 * submission of the unaligned IOs so that we don't get racing block zeroing in
 472 * the dio layer.  To avoid the problem with aio, we also need to wait for
 473 * outstanding IOs to complete so that unwritten extent conversion is completed
 474 * before we try to map the overlapping block. This is currently implemented by
 475 * hitting it with a big hammer (i.e. inode_dio_wait()).
 476 *
 477 * Returns with locks held indicated by @iolock and errors indicated by
 478 * negative return values.
 479 */
 480STATIC ssize_t
 481xfs_file_dio_aio_write(
 
 482	struct kiocb		*iocb,
 483	struct iov_iter		*from)
 484{
 485	struct file		*file = iocb->ki_filp;
 486	struct address_space	*mapping = file->f_mapping;
 487	struct inode		*inode = mapping->host;
 488	struct xfs_inode	*ip = XFS_I(inode);
 489	struct xfs_mount	*mp = ip->i_mount;
 490	ssize_t			ret = 0;
 491	int			unaligned_io = 0;
 492	int			iolock;
 493	size_t			count = iov_iter_count(from);
 494	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 495					mp->m_rtdev_targp : mp->m_ddev_targp;
 496
 497	/* DIO must be aligned to device logical sector size */
 498	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 499		return -EINVAL;
 
 
 
 500
 501	/*
 502	 * Don't take the exclusive iolock here unless the I/O is unaligned to
 503	 * the file system block size.  We don't need to consider the EOF
 504	 * extension case here because xfs_file_aio_write_checks() will relock
 505	 * the inode as necessary for EOF zeroing cases and fill out the new
 506	 * inode size as appropriate.
 507	 */
 508	if ((iocb->ki_pos & mp->m_blockmask) ||
 509	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
 510		unaligned_io = 1;
 511
 512		/*
 513		 * We can't properly handle unaligned direct I/O to reflink
 514		 * files yet, as we can't unshare a partial block.
 515		 */
 516		if (xfs_is_reflink_inode(ip)) {
 517			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
 518			return -EREMCHG;
 519		}
 520		iolock = XFS_IOLOCK_EXCL;
 521	} else {
 522		iolock = XFS_IOLOCK_SHARED;
 523	}
 
 
 
 
 
 
 
 
 524
 525	if (iocb->ki_flags & IOCB_NOWAIT) {
 526		if (!xfs_ilock_nowait(ip, iolock))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527			return -EAGAIN;
 528	} else {
 529		xfs_ilock(ip, iolock);
 
 530	}
 531
 532	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 533	if (ret)
 534		goto out;
 535	count = iov_iter_count(from);
 536
 537	/*
 538	 * If we are doing unaligned IO, wait for all other IO to drain,
 539	 * otherwise demote the lock if we had to take the exclusive lock
 540	 * for other reasons in xfs_file_aio_write_checks.
 541	 */
 542	if (unaligned_io) {
 543		/* If we are going to wait for other DIO to finish, bail */
 544		if (iocb->ki_flags & IOCB_NOWAIT) {
 545			if (atomic_read(&inode->i_dio_count))
 546				return -EAGAIN;
 547		} else {
 548			inode_dio_wait(inode);
 549		}
 550	} else if (iolock == XFS_IOLOCK_EXCL) {
 551		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 552		iolock = XFS_IOLOCK_SHARED;
 553	}
 554
 555	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 556	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
 557out:
 558	xfs_iunlock(ip, iolock);
 
 
 
 
 
 
 
 
 
 
 
 
 559
 560	/*
 561	 * No fallback to buffered IO on errors for XFS, direct IO will either
 562	 * complete fully or fail.
 
 563	 */
 564	ASSERT(ret < 0 || ret == count);
 
 
 
 
 
 
 
 
 565	return ret;
 566}
 567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568static noinline ssize_t
 569xfs_file_dax_write(
 570	struct kiocb		*iocb,
 571	struct iov_iter		*from)
 572{
 573	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 574	struct xfs_inode	*ip = XFS_I(inode);
 575	int			iolock = XFS_IOLOCK_EXCL;
 576	ssize_t			ret, error = 0;
 577	size_t			count;
 578	loff_t			pos;
 579
 580	if (iocb->ki_flags & IOCB_NOWAIT) {
 581		if (!xfs_ilock_nowait(ip, iolock))
 582			return -EAGAIN;
 583	} else {
 584		xfs_ilock(ip, iolock);
 585	}
 586
 587	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 588	if (ret)
 589		goto out;
 590
 591	pos = iocb->ki_pos;
 592	count = iov_iter_count(from);
 593
 594	trace_xfs_file_dax_write(ip, count, pos);
 595	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
 596	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 597		i_size_write(inode, iocb->ki_pos);
 598		error = xfs_setfilesize(ip, pos, ret);
 599	}
 600out:
 601	xfs_iunlock(ip, iolock);
 602	return error ? error : ret;
 
 
 
 
 
 
 
 
 
 
 603}
 604
 605STATIC ssize_t
 606xfs_file_buffered_aio_write(
 607	struct kiocb		*iocb,
 608	struct iov_iter		*from)
 609{
 610	struct file		*file = iocb->ki_filp;
 611	struct address_space	*mapping = file->f_mapping;
 612	struct inode		*inode = mapping->host;
 613	struct xfs_inode	*ip = XFS_I(inode);
 614	ssize_t			ret;
 615	int			enospc = 0;
 616	int			iolock;
 617
 618	if (iocb->ki_flags & IOCB_NOWAIT)
 619		return -EOPNOTSUPP;
 620
 621write_retry:
 622	iolock = XFS_IOLOCK_EXCL;
 623	xfs_ilock(ip, iolock);
 
 
 624
 625	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 626	if (ret)
 627		goto out;
 628
 629	/* We can write back this queue in page reclaim */
 630	current->backing_dev_info = inode_to_bdi(inode);
 631
 632	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 633	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
 634	if (likely(ret >= 0))
 635		iocb->ki_pos += ret;
 636
 637	/*
 638	 * If we hit a space limit, try to free up some lingering preallocated
 639	 * space before returning an error. In the case of ENOSPC, first try to
 640	 * write back all dirty inodes to free up some of the excess reserved
 641	 * metadata space. This reduces the chances that the eofblocks scan
 642	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 643	 * also behaves as a filter to prevent too many eofblocks scans from
 644	 * running at the same time.
 
 645	 */
 646	if (ret == -EDQUOT && !enospc) {
 647		xfs_iunlock(ip, iolock);
 648		enospc = xfs_inode_free_quota_eofblocks(ip);
 649		if (enospc)
 650			goto write_retry;
 651		enospc = xfs_inode_free_quota_cowblocks(ip);
 652		if (enospc)
 653			goto write_retry;
 654		iolock = 0;
 655	} else if (ret == -ENOSPC && !enospc) {
 656		struct xfs_eofblocks eofb = {0};
 657
 658		enospc = 1;
 659		xfs_flush_inodes(ip->i_mount);
 660
 661		xfs_iunlock(ip, iolock);
 662		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 663		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 664		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
 665		goto write_retry;
 666	}
 667
 668	current->backing_dev_info = NULL;
 669out:
 670	if (iolock)
 671		xfs_iunlock(ip, iolock);
 
 
 
 
 
 
 672	return ret;
 673}
 674
 675STATIC ssize_t
 676xfs_file_write_iter(
 677	struct kiocb		*iocb,
 678	struct iov_iter		*from)
 679{
 680	struct file		*file = iocb->ki_filp;
 681	struct address_space	*mapping = file->f_mapping;
 682	struct inode		*inode = mapping->host;
 683	struct xfs_inode	*ip = XFS_I(inode);
 684	ssize_t			ret;
 685	size_t			ocount = iov_iter_count(from);
 686
 687	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 688
 689	if (ocount == 0)
 690		return 0;
 691
 692	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 693		return -EIO;
 694
 695	if (IS_DAX(inode))
 696		ret = xfs_file_dax_write(iocb, from);
 697	else if (iocb->ki_flags & IOCB_DIRECT) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698		/*
 699		 * Allow a directio write to fall back to a buffered
 700		 * write *only* in the case that we're doing a reflink
 701		 * CoW.  In all other directio scenarios we do not
 702		 * allow an operation to fall back to buffered mode.
 703		 */
 704		ret = xfs_file_dio_aio_write(iocb, from);
 705		if (ret == -EREMCHG)
 706			goto buffered;
 707	} else {
 708buffered:
 709		ret = xfs_file_buffered_aio_write(iocb, from);
 710	}
 711
 712	if (ret > 0) {
 713		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 714
 715		/* Handle various SYNC-type writes */
 716		ret = generic_write_sync(iocb, ret);
 717	}
 718	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 719}
 720
 721#define	XFS_FALLOC_FL_SUPPORTED						\
 722		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 723		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 724		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 725
 726STATIC long
 727xfs_file_fallocate(
 728	struct file		*file,
 729	int			mode,
 730	loff_t			offset,
 731	loff_t			len)
 732{
 733	struct inode		*inode = file_inode(file);
 734	struct xfs_inode	*ip = XFS_I(inode);
 735	long			error;
 736	enum xfs_prealloc_flags	flags = 0;
 737	uint			iolock = XFS_IOLOCK_EXCL;
 738	loff_t			new_size = 0;
 739	bool			do_file_insert = false;
 740
 741	if (!S_ISREG(inode->i_mode))
 742		return -EINVAL;
 743	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 744		return -EOPNOTSUPP;
 745
 746	xfs_ilock(ip, iolock);
 747	error = xfs_break_layouts(inode, &iolock);
 748	if (error)
 749		goto out_unlock;
 750
 751	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 752	iolock |= XFS_MMAPLOCK_EXCL;
 
 
 
 
 
 
 753
 754	if (mode & FALLOC_FL_PUNCH_HOLE) {
 
 
 
 
 
 755		error = xfs_free_file_space(ip, offset, len);
 756		if (error)
 757			goto out_unlock;
 758	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
 759		unsigned int blksize_mask = i_blocksize(inode) - 1;
 760
 761		if (offset & blksize_mask || len & blksize_mask) {
 762			error = -EINVAL;
 763			goto out_unlock;
 764		}
 
 
 
 
 
 
 
 
 
 
 
 765
 766		/*
 767		 * There is no need to overlap collapse range with EOF,
 768		 * in which case it is effectively a truncate operation
 769		 */
 770		if (offset + len >= i_size_read(inode)) {
 771			error = -EINVAL;
 772			goto out_unlock;
 773		}
 774
 775		new_size = i_size_read(inode) - len;
 
 
 
 776
 777		error = xfs_collapse_file_space(ip, offset, len);
 778		if (error)
 779			goto out_unlock;
 780	} else if (mode & FALLOC_FL_INSERT_RANGE) {
 781		unsigned int	blksize_mask = i_blocksize(inode) - 1;
 782		loff_t		isize = i_size_read(inode);
 783
 784		if (offset & blksize_mask || len & blksize_mask) {
 785			error = -EINVAL;
 786			goto out_unlock;
 787		}
 788
 789		/*
 790		 * New inode size must not exceed ->s_maxbytes, accounting for
 791		 * possible signed overflow.
 792		 */
 793		if (inode->i_sb->s_maxbytes - isize < len) {
 794			error = -EFBIG;
 795			goto out_unlock;
 796		}
 797		new_size = isize + len;
 
 
 
 
 798
 799		/* Offset should be less than i_size */
 800		if (offset >= isize) {
 801			error = -EINVAL;
 802			goto out_unlock;
 803		}
 804		do_file_insert = true;
 805	} else {
 806		flags |= XFS_PREALLOC_SET;
 
 
 
 
 
 
 
 
 
 807
 808		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 809		    offset + len > i_size_read(inode)) {
 810			new_size = offset + len;
 811			error = inode_newsize_ok(inode, new_size);
 812			if (error)
 813				goto out_unlock;
 814		}
 815
 816		if (mode & FALLOC_FL_ZERO_RANGE)
 817			error = xfs_zero_file_space(ip, offset, len);
 818		else {
 819			if (mode & FALLOC_FL_UNSHARE_RANGE) {
 820				error = xfs_reflink_unshare(ip, offset, len);
 821				if (error)
 822					goto out_unlock;
 823			}
 824			error = xfs_alloc_file_space(ip, offset, len,
 825						     XFS_BMAPI_PREALLOC);
 826		}
 827		if (error)
 828			goto out_unlock;
 829	}
 830
 831	if (file->f_flags & O_DSYNC)
 832		flags |= XFS_PREALLOC_SYNC;
 833
 834	error = xfs_update_prealloc_flags(ip, flags);
 835	if (error)
 836		goto out_unlock;
 
 
 837
 838	/* Change file size if needed */
 839	if (new_size) {
 840		struct iattr iattr;
 841
 842		iattr.ia_valid = ATTR_SIZE;
 843		iattr.ia_size = new_size;
 844		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
 845		if (error)
 846			goto out_unlock;
 847	}
 848
 849	/*
 850	 * Perform hole insertion now that the file size has been
 851	 * updated so that if we crash during the operation we don't
 852	 * leave shifted extents past EOF and hence losing access to
 853	 * the data that is contained within them.
 854	 */
 855	if (do_file_insert)
 856		error = xfs_insert_file_space(ip, offset, len);
 
 
 
 
 857
 858out_unlock:
 859	xfs_iunlock(ip, iolock);
 860	return error;
 861}
 862
 863STATIC int
 864xfs_file_clone_range(
 865	struct file	*file_in,
 866	loff_t		pos_in,
 867	struct file	*file_out,
 868	loff_t		pos_out,
 869	u64		len)
 870{
 871	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
 872				     len, false);
 873}
 874
 875STATIC ssize_t
 876xfs_file_dedupe_range(
 877	struct file	*src_file,
 878	u64		loff,
 879	u64		len,
 880	struct file	*dst_file,
 881	u64		dst_loff)
 882{
 883	struct inode	*srci = file_inode(src_file);
 884	u64		max_dedupe;
 885	int		error;
 886
 
 
 
 
 
 
 887	/*
 888	 * Since we have to read all these pages in to compare them, cut
 889	 * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
 890	 * That means we won't do more than MAX_RW_COUNT IO per request.
 891	 */
 892	max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
 893	if (len > max_dedupe)
 894		len = max_dedupe;
 895	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
 896				     len, true);
 897	if (error)
 898		return error;
 899	return len;
 900}
 901
 902STATIC int
 903xfs_file_open(
 904	struct inode	*inode,
 905	struct file	*file)
 906{
 907	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
 908		return -EFBIG;
 909	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 910		return -EIO;
 911	file->f_mode |= FMODE_NOWAIT;
 912	return 0;
 
 
 913}
 914
 915STATIC int
 916xfs_dir_open(
 917	struct inode	*inode,
 918	struct file	*file)
 919{
 920	struct xfs_inode *ip = XFS_I(inode);
 921	int		mode;
 922	int		error;
 923
 924	error = xfs_file_open(inode, file);
 
 
 925	if (error)
 926		return error;
 927
 928	/*
 929	 * If there are any blocks, read-ahead block 0 as we're almost
 930	 * certain to have the next operation be a read there.
 931	 */
 932	mode = xfs_ilock_data_map_shared(ip);
 933	if (ip->i_d.di_nextents > 0)
 934		error = xfs_dir3_data_readahead(ip, 0, -1);
 935	xfs_iunlock(ip, mode);
 936	return error;
 937}
 938
 
 
 
 
 939STATIC int
 940xfs_file_release(
 941	struct inode	*inode,
 942	struct file	*filp)
 943{
 944	return xfs_release(XFS_I(inode));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945}
 946
 947STATIC int
 948xfs_file_readdir(
 949	struct file	*file,
 950	struct dir_context *ctx)
 951{
 952	struct inode	*inode = file_inode(file);
 953	xfs_inode_t	*ip = XFS_I(inode);
 954	size_t		bufsize;
 955
 956	/*
 957	 * The Linux API doesn't pass down the total size of the buffer
 958	 * we read into down to the filesystem.  With the filldir concept
 959	 * it's not needed for correct information, but the XFS dir2 leaf
 960	 * code wants an estimate of the buffer size to calculate it's
 961	 * readahead window and size the buffers used for mapping to
 962	 * physical blocks.
 963	 *
 964	 * Try to give it an estimate that's good enough, maybe at some
 965	 * point we can change the ->readdir prototype to include the
 966	 * buffer size.  For now we use the current glibc buffer size.
 967	 */
 968	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
 969
 970	return xfs_readdir(NULL, ip, ctx, bufsize);
 971}
 972
 973STATIC loff_t
 974xfs_file_llseek(
 975	struct file	*file,
 976	loff_t		offset,
 977	int		whence)
 978{
 979	struct inode		*inode = file->f_mapping->host;
 980
 981	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
 982		return -EIO;
 983
 984	switch (whence) {
 985	default:
 986		return generic_file_llseek(file, offset, whence);
 987	case SEEK_HOLE:
 988		offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
 989		break;
 990	case SEEK_DATA:
 991		offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
 992		break;
 993	}
 994
 995	if (offset < 0)
 996		return offset;
 997	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 998}
 999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1000/*
1001 * Locking for serialisation of IO during page faults. This results in a lock
1002 * ordering of:
1003 *
1004 * mmap_sem (MM)
1005 *   sb_start_pagefault(vfs, freeze)
1006 *     i_mmaplock (XFS - truncate serialisation)
1007 *       page_lock (MM)
1008 *         i_lock (XFS - extent map serialisation)
1009 */
1010static int
1011__xfs_filemap_fault(
1012	struct vm_fault		*vmf,
1013	enum page_entry_size	pe_size,
1014	bool			write_fault)
1015{
1016	struct inode		*inode = file_inode(vmf->vma->vm_file);
1017	struct xfs_inode	*ip = XFS_I(inode);
1018	int			ret;
1019
1020	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1021
1022	if (write_fault) {
1023		sb_start_pagefault(inode->i_sb);
1024		file_update_time(vmf->vma->vm_file);
1025	}
1026
1027	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1028	if (IS_DAX(inode)) {
1029		pfn_t pfn;
1030
1031		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1032		if (ret & VM_FAULT_NEEDDSYNC)
1033			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1034	} else {
1035		if (write_fault)
1036			ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1037		else
1038			ret = filemap_fault(vmf);
 
 
1039	}
1040	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1041
1042	if (write_fault)
1043		sb_end_pagefault(inode->i_sb);
 
 
 
 
 
1044	return ret;
1045}
1046
1047static int
 
 
 
 
 
 
 
 
1048xfs_filemap_fault(
1049	struct vm_fault		*vmf)
1050{
 
 
1051	/* DAX can shortcut the normal fault path on write faults! */
1052	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1053			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1054			(vmf->flags & FAULT_FLAG_WRITE));
 
 
 
 
 
1055}
1056
1057static int
1058xfs_filemap_huge_fault(
1059	struct vm_fault		*vmf,
1060	enum page_entry_size	pe_size)
1061{
1062	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1063		return VM_FAULT_FALLBACK;
1064
1065	/* DAX can shortcut the normal fault path on write faults! */
1066	return __xfs_filemap_fault(vmf, pe_size,
1067			(vmf->flags & FAULT_FLAG_WRITE));
 
1068}
1069
1070static int
1071xfs_filemap_page_mkwrite(
1072	struct vm_fault		*vmf)
1073{
1074	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1075}
1076
1077/*
1078 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1079 * on write faults. In reality, it needs to serialise against truncate and
1080 * prepare memory for writing so handle is as standard write fault.
1081 */
1082static int
1083xfs_filemap_pfn_mkwrite(
1084	struct vm_fault		*vmf)
1085{
1086
1087	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1088}
1089
1090static const struct vm_operations_struct xfs_file_vm_ops = {
1091	.fault		= xfs_filemap_fault,
1092	.huge_fault	= xfs_filemap_huge_fault,
1093	.map_pages	= filemap_map_pages,
1094	.page_mkwrite	= xfs_filemap_page_mkwrite,
1095	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1096};
1097
1098STATIC int
1099xfs_file_mmap(
1100	struct file	*filp,
1101	struct vm_area_struct *vma)
1102{
 
 
 
1103	/*
1104	 * We don't support synchronous mappings for non-DAX files. At least
1105	 * until someone comes with a sensible use case.
1106	 */
1107	if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1108		return -EOPNOTSUPP;
1109
1110	file_accessed(filp);
1111	vma->vm_ops = &xfs_file_vm_ops;
1112	if (IS_DAX(file_inode(filp)))
1113		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1114	return 0;
1115}
1116
1117const struct file_operations xfs_file_operations = {
1118	.llseek		= xfs_file_llseek,
1119	.read_iter	= xfs_file_read_iter,
1120	.write_iter	= xfs_file_write_iter,
1121	.splice_read	= generic_file_splice_read,
1122	.splice_write	= iter_file_splice_write,
 
1123	.unlocked_ioctl	= xfs_file_ioctl,
1124#ifdef CONFIG_COMPAT
1125	.compat_ioctl	= xfs_file_compat_ioctl,
1126#endif
1127	.mmap		= xfs_file_mmap,
1128	.mmap_supported_flags = MAP_SYNC,
1129	.open		= xfs_file_open,
1130	.release	= xfs_file_release,
1131	.fsync		= xfs_file_fsync,
1132	.get_unmapped_area = thp_get_unmapped_area,
1133	.fallocate	= xfs_file_fallocate,
1134	.clone_file_range = xfs_file_clone_range,
1135	.dedupe_file_range = xfs_file_dedupe_range,
 
 
1136};
1137
1138const struct file_operations xfs_dir_file_operations = {
1139	.open		= xfs_dir_open,
1140	.read		= generic_read_dir,
1141	.iterate_shared	= xfs_file_readdir,
1142	.llseek		= generic_file_llseek,
1143	.unlocked_ioctl	= xfs_file_ioctl,
1144#ifdef CONFIG_COMPAT
1145	.compat_ioctl	= xfs_file_compat_ioctl,
1146#endif
1147	.fsync		= xfs_dir_fsync,
1148};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
 
 
  13#include "xfs_inode.h"
  14#include "xfs_trans.h"
  15#include "xfs_inode_item.h"
  16#include "xfs_bmap.h"
  17#include "xfs_bmap_util.h"
 
  18#include "xfs_dir2.h"
  19#include "xfs_dir2_priv.h"
  20#include "xfs_ioctl.h"
  21#include "xfs_trace.h"
  22#include "xfs_log.h"
  23#include "xfs_icache.h"
  24#include "xfs_pnfs.h"
  25#include "xfs_iomap.h"
  26#include "xfs_reflink.h"
  27#include "xfs_file.h"
  28
  29#include <linux/dax.h>
  30#include <linux/falloc.h>
 
  31#include <linux/backing-dev.h>
  32#include <linux/mman.h>
  33#include <linux/fadvise.h>
  34#include <linux/mount.h>
  35
  36static const struct vm_operations_struct xfs_file_vm_ops;
  37
  38/*
  39 * Decide if the given file range is aligned to the size of the fundamental
  40 * allocation unit for the file.
  41 */
  42bool
  43xfs_is_falloc_aligned(
  44	struct xfs_inode	*ip,
  45	loff_t			pos,
  46	long long int		len)
  47{
  48	unsigned int		alloc_unit = xfs_inode_alloc_unitsize(ip);
 
 
 
 
 
 
  49
  50	if (!is_power_of_2(alloc_unit))
  51		return isaligned_64(pos, alloc_unit) &&
  52		       isaligned_64(len, alloc_unit);
  53
  54	return !((pos | len) & (alloc_unit - 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55}
  56
  57/*
  58 * Fsync operations on directories are much simpler than on regular files,
  59 * as there is no file data to flush, and thus also no need for explicit
  60 * cache flush operations, and there are no non-transaction metadata updates
  61 * on directories either.
  62 */
  63STATIC int
  64xfs_dir_fsync(
  65	struct file		*file,
  66	loff_t			start,
  67	loff_t			end,
  68	int			datasync)
  69{
  70	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 
  71
  72	trace_xfs_dir_fsync(ip);
  73	return xfs_log_force_inode(ip);
  74}
  75
  76static xfs_csn_t
  77xfs_fsync_seq(
  78	struct xfs_inode	*ip,
  79	bool			datasync)
  80{
  81	if (!xfs_ipincount(ip))
  82		return 0;
  83	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  84		return 0;
  85	return ip->i_itemp->ili_commit_seq;
  86}
  87
  88/*
  89 * All metadata updates are logged, which means that we just have to flush the
  90 * log up to the latest LSN that touched the inode.
  91 *
  92 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
  93 * the log force before we clear the ili_fsync_fields field. This ensures that
  94 * we don't get a racing sync operation that does not wait for the metadata to
  95 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
  96 * then all that will happen is the log force will do nothing as the lsn will
  97 * already be on disk.  We can't race with setting ili_fsync_fields because that
  98 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
  99 * shared until after the ili_fsync_fields is cleared.
 100 */
 101static  int
 102xfs_fsync_flush_log(
 103	struct xfs_inode	*ip,
 104	bool			datasync,
 105	int			*log_flushed)
 106{
 107	int			error = 0;
 108	xfs_csn_t		seq;
 109
 110	xfs_ilock(ip, XFS_ILOCK_SHARED);
 111	seq = xfs_fsync_seq(ip, datasync);
 112	if (seq) {
 113		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
 114					  log_flushed);
 115
 116		spin_lock(&ip->i_itemp->ili_lock);
 117		ip->i_itemp->ili_fsync_fields = 0;
 118		spin_unlock(&ip->i_itemp->ili_lock);
 119	}
 120	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 121	return error;
 122}
 123
 124STATIC int
 125xfs_file_fsync(
 126	struct file		*file,
 127	loff_t			start,
 128	loff_t			end,
 129	int			datasync)
 130{
 131	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 132	struct xfs_mount	*mp = ip->i_mount;
 133	int			error, err2;
 134	int			log_flushed = 0;
 
 135
 136	trace_xfs_file_fsync(ip);
 137
 138	error = file_write_and_wait_range(file, start, end);
 139	if (error)
 140		return error;
 141
 142	if (xfs_is_shutdown(mp))
 143		return -EIO;
 144
 145	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 146
 147	/*
 148	 * If we have an RT and/or log subvolume we need to make sure to flush
 149	 * the write cache the device used for file data first.  This is to
 150	 * ensure newly written file data make it to disk before logging the new
 151	 * inode size in case of an extending write.
 152	 */
 153	if (XFS_IS_REALTIME_INODE(ip))
 154		error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
 155	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 156		error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 157
 158	/*
 159	 * Any inode that has dirty modifications in the log is pinned.  The
 160	 * racy check here for a pinned inode will not catch modifications
 161	 * that happen concurrently to the fsync call, but fsync semantics
 162	 * only require to sync previously completed I/O.
 
 
 
 
 
 
 
 163	 */
 
 164	if (xfs_ipincount(ip)) {
 165		err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
 166		if (err2 && !error)
 167			error = err2;
 
 
 
 
 
 168	}
 
 169
 170	/*
 171	 * If we only have a single device, and the log force about was
 172	 * a no-op we might have to flush the data device cache here.
 173	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 174	 * an already allocated file and thus do not have any metadata to
 175	 * commit.
 176	 */
 177	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 178	    mp->m_logdev_targp == mp->m_ddev_targp) {
 179		err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 180		if (err2 && !error)
 181			error = err2;
 182	}
 183
 184	return error;
 185}
 186
 187static int
 188xfs_ilock_iocb(
 189	struct kiocb		*iocb,
 190	unsigned int		lock_mode)
 191{
 192	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 193
 194	if (iocb->ki_flags & IOCB_NOWAIT) {
 195		if (!xfs_ilock_nowait(ip, lock_mode))
 196			return -EAGAIN;
 197	} else {
 198		xfs_ilock(ip, lock_mode);
 199	}
 200
 201	return 0;
 202}
 203
 204static int
 205xfs_ilock_iocb_for_write(
 206	struct kiocb		*iocb,
 207	unsigned int		*lock_mode)
 208{
 209	ssize_t			ret;
 210	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 211
 212	ret = xfs_ilock_iocb(iocb, *lock_mode);
 213	if (ret)
 214		return ret;
 215
 216	/*
 217	 * If a reflink remap is in progress we always need to take the iolock
 218	 * exclusively to wait for it to finish.
 219	 */
 220	if (*lock_mode == XFS_IOLOCK_SHARED &&
 221	    xfs_iflags_test(ip, XFS_IREMAPPING)) {
 222		xfs_iunlock(ip, *lock_mode);
 223		*lock_mode = XFS_IOLOCK_EXCL;
 224		return xfs_ilock_iocb(iocb, *lock_mode);
 225	}
 226
 227	return 0;
 228}
 229
 230STATIC ssize_t
 231xfs_file_dio_read(
 232	struct kiocb		*iocb,
 233	struct iov_iter		*to)
 234{
 235	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 
 236	ssize_t			ret;
 237
 238	trace_xfs_file_direct_read(iocb, to);
 239
 240	if (!iov_iter_count(to))
 241		return 0; /* skip atime */
 242
 243	file_accessed(iocb->ki_filp);
 244
 245	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 246	if (ret)
 247		return ret;
 248	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
 249	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 250
 251	return ret;
 252}
 253
 254static noinline ssize_t
 255xfs_file_dax_read(
 256	struct kiocb		*iocb,
 257	struct iov_iter		*to)
 258{
 259	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 
 260	ssize_t			ret = 0;
 261
 262	trace_xfs_file_dax_read(iocb, to);
 263
 264	if (!iov_iter_count(to))
 265		return 0; /* skip atime */
 266
 267	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 268	if (ret)
 269		return ret;
 270	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
 
 
 
 
 271	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 272
 273	file_accessed(iocb->ki_filp);
 274	return ret;
 275}
 276
 277STATIC ssize_t
 278xfs_file_buffered_read(
 279	struct kiocb		*iocb,
 280	struct iov_iter		*to)
 281{
 282	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 283	ssize_t			ret;
 284
 285	trace_xfs_file_buffered_read(iocb, to);
 286
 287	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 288	if (ret)
 289		return ret;
 
 
 
 290	ret = generic_file_read_iter(iocb, to);
 291	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 292
 293	return ret;
 294}
 295
 296STATIC ssize_t
 297xfs_file_read_iter(
 298	struct kiocb		*iocb,
 299	struct iov_iter		*to)
 300{
 301	struct inode		*inode = file_inode(iocb->ki_filp);
 302	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 303	ssize_t			ret = 0;
 304
 305	XFS_STATS_INC(mp, xs_read_calls);
 306
 307	if (xfs_is_shutdown(mp))
 308		return -EIO;
 309
 310	if (IS_DAX(inode))
 311		ret = xfs_file_dax_read(iocb, to);
 312	else if (iocb->ki_flags & IOCB_DIRECT)
 313		ret = xfs_file_dio_read(iocb, to);
 314	else
 315		ret = xfs_file_buffered_read(iocb, to);
 316
 317	if (ret > 0)
 318		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 319	return ret;
 320}
 321
 322STATIC ssize_t
 323xfs_file_splice_read(
 324	struct file		*in,
 325	loff_t			*ppos,
 326	struct pipe_inode_info	*pipe,
 327	size_t			len,
 328	unsigned int		flags)
 329{
 330	struct inode		*inode = file_inode(in);
 331	struct xfs_inode	*ip = XFS_I(inode);
 332	struct xfs_mount	*mp = ip->i_mount;
 333	ssize_t			ret = 0;
 334
 335	XFS_STATS_INC(mp, xs_read_calls);
 336
 337	if (xfs_is_shutdown(mp))
 338		return -EIO;
 339
 340	trace_xfs_file_splice_read(ip, *ppos, len);
 341
 342	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 343	ret = filemap_splice_read(in, ppos, pipe, len, flags);
 344	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 345	if (ret > 0)
 346		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 347	return ret;
 348}
 349
 350/*
 351 * Take care of zeroing post-EOF blocks when they might exist.
 352 *
 353 * Returns 0 if successfully, a negative error for a failure, or 1 if this
 354 * function dropped the iolock and reacquired it exclusively and the caller
 355 * needs to restart the write sanity checks.
 356 */
 357static ssize_t
 358xfs_file_write_zero_eof(
 359	struct kiocb		*iocb,
 360	struct iov_iter		*from,
 361	unsigned int		*iolock,
 362	size_t			count,
 363	bool			*drained_dio)
 364{
 365	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 366	loff_t			isize;
 367	int			error;
 368
 369	/*
 370	 * We need to serialise against EOF updates that occur in IO completions
 371	 * here. We want to make sure that nobody is changing the size while
 372	 * we do this check until we have placed an IO barrier (i.e. hold
 373	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
 374	 * spinlock effectively forms a memory barrier once we have
 375	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
 376	 * hence be able to correctly determine if we need to run zeroing.
 377	 */
 378	spin_lock(&ip->i_flags_lock);
 379	isize = i_size_read(VFS_I(ip));
 380	if (iocb->ki_pos <= isize) {
 381		spin_unlock(&ip->i_flags_lock);
 382		return 0;
 383	}
 384	spin_unlock(&ip->i_flags_lock);
 385
 386	if (iocb->ki_flags & IOCB_NOWAIT)
 387		return -EAGAIN;
 388
 389	if (!*drained_dio) {
 390		/*
 391		 * If zeroing is needed and we are currently holding the iolock
 392		 * shared, we need to update it to exclusive which implies
 393		 * having to redo all checks before.
 394		 */
 395		if (*iolock == XFS_IOLOCK_SHARED) {
 396			xfs_iunlock(ip, *iolock);
 397			*iolock = XFS_IOLOCK_EXCL;
 398			xfs_ilock(ip, *iolock);
 399			iov_iter_reexpand(from, count);
 400		}
 401
 402		/*
 403		 * We now have an IO submission barrier in place, but AIO can do
 404		 * EOF updates during IO completion and hence we now need to
 405		 * wait for all of them to drain.  Non-AIO DIO will have drained
 406		 * before we are given the XFS_IOLOCK_EXCL, and so for most
 407		 * cases this wait is a no-op.
 408		 */
 409		inode_dio_wait(VFS_I(ip));
 410		*drained_dio = true;
 411		return 1;
 412	}
 413
 414	trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
 415
 416	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 417	error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
 418	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
 419
 420	return error;
 421}
 422
 423/*
 424 * Common pre-write limit and setup checks.
 425 *
 426 * Called with the iolock held either shared and exclusive according to
 427 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 428 * if called for a direct write beyond i_size.
 429 */
 430STATIC ssize_t
 431xfs_file_write_checks(
 432	struct kiocb		*iocb,
 433	struct iov_iter		*from,
 434	unsigned int		*iolock)
 435{
 436	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 
 437	size_t			count = iov_iter_count(from);
 438	bool			drained_dio = false;
 439	ssize_t			error;
 440
 441restart:
 442	error = generic_write_checks(iocb, from);
 443	if (error <= 0)
 444		return error;
 445
 446	if (iocb->ki_flags & IOCB_NOWAIT) {
 447		error = break_layout(inode, false);
 448		if (error == -EWOULDBLOCK)
 449			error = -EAGAIN;
 450	} else {
 451		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
 452	}
 453
 454	if (error)
 455		return error;
 456
 457	/*
 458	 * For changing security info in file_remove_privs() we need i_rwsem
 459	 * exclusively.
 460	 */
 461	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 462		xfs_iunlock(XFS_I(inode), *iolock);
 463		*iolock = XFS_IOLOCK_EXCL;
 464		error = xfs_ilock_iocb(iocb, *iolock);
 465		if (error) {
 466			*iolock = 0;
 467			return error;
 468		}
 469		goto restart;
 470	}
 471
 472	/*
 473	 * If the offset is beyond the size of the file, we need to zero all
 474	 * blocks that fall between the existing EOF and the start of this
 475	 * write.
 
 
 476	 *
 477	 * We can do an unlocked check for i_size here safely as I/O completion
 478	 * can only extend EOF.  Truncate is locked out at this point, so the
 479	 * EOF can not move backwards, only forwards. Hence we only need to take
 480	 * the slow path when we are at or beyond the current EOF.
 
 
 
 481	 */
 482	if (iocb->ki_pos > i_size_read(inode)) {
 483		error = xfs_file_write_zero_eof(iocb, from, iolock, count,
 484				&drained_dio);
 485		if (error == 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 486			goto restart;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487		if (error)
 488			return error;
 489	}
 490
 491	return kiocb_modified(iocb);
 
 
 
 
 
 
 
 492}
 493
 494static int
 495xfs_dio_write_end_io(
 496	struct kiocb		*iocb,
 497	ssize_t			size,
 498	int			error,
 499	unsigned		flags)
 500{
 501	struct inode		*inode = file_inode(iocb->ki_filp);
 502	struct xfs_inode	*ip = XFS_I(inode);
 503	loff_t			offset = iocb->ki_pos;
 504	unsigned int		nofs_flag;
 505
 506	trace_xfs_end_io_direct_write(ip, offset, size);
 507
 508	if (xfs_is_shutdown(ip->i_mount))
 509		return -EIO;
 510
 511	if (error)
 512		return error;
 513	if (!size)
 514		return 0;
 515
 516	/*
 517	 * Capture amount written on completion as we can't reliably account
 518	 * for it on submission.
 519	 */
 520	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
 521
 522	/*
 523	 * We can allocate memory here while doing writeback on behalf of
 524	 * memory reclaim.  To avoid memory allocation deadlocks set the
 525	 * task-wide nofs context for the following operations.
 526	 */
 527	nofs_flag = memalloc_nofs_save();
 528
 529	if (flags & IOMAP_DIO_COW) {
 530		error = xfs_reflink_end_cow(ip, offset, size);
 531		if (error)
 532			goto out;
 533	}
 534
 535	/*
 536	 * Unwritten conversion updates the in-core isize after extent
 537	 * conversion but before updating the on-disk size. Updating isize any
 538	 * earlier allows a racing dio read to find unwritten extents before
 539	 * they are converted.
 540	 */
 541	if (flags & IOMAP_DIO_UNWRITTEN) {
 542		error = xfs_iomap_write_unwritten(ip, offset, size, true);
 543		goto out;
 544	}
 545
 546	/*
 547	 * We need to update the in-core inode size here so that we don't end up
 548	 * with the on-disk inode size being outside the in-core inode size. We
 549	 * have no other method of updating EOF for AIO, so always do it here
 550	 * if necessary.
 551	 *
 552	 * We need to lock the test/set EOF update as we can be racing with
 553	 * other IO completions here to update the EOF. Failing to serialise
 554	 * here can result in EOF moving backwards and Bad Things Happen when
 555	 * that occurs.
 556	 *
 557	 * As IO completion only ever extends EOF, we can do an unlocked check
 558	 * here to avoid taking the spinlock. If we land within the current EOF,
 559	 * then we do not need to do an extending update at all, and we don't
 560	 * need to take the lock to check this. If we race with an update moving
 561	 * EOF, then we'll either still be beyond EOF and need to take the lock,
 562	 * or we'll be within EOF and we don't need to take it at all.
 563	 */
 564	if (offset + size <= i_size_read(inode))
 565		goto out;
 566
 567	spin_lock(&ip->i_flags_lock);
 568	if (offset + size > i_size_read(inode)) {
 569		i_size_write(inode, offset + size);
 570		spin_unlock(&ip->i_flags_lock);
 571		error = xfs_setfilesize(ip, offset, size);
 572	} else {
 573		spin_unlock(&ip->i_flags_lock);
 574	}
 575
 576out:
 577	memalloc_nofs_restore(nofs_flag);
 578	return error;
 579}
 580
 581static const struct iomap_dio_ops xfs_dio_write_ops = {
 582	.end_io		= xfs_dio_write_end_io,
 583};
 584
 585/*
 586 * Handle block aligned direct I/O writes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 587 */
 588static noinline ssize_t
 589xfs_file_dio_write_aligned(
 590	struct xfs_inode	*ip,
 591	struct kiocb		*iocb,
 592	struct iov_iter		*from)
 593{
 594	unsigned int		iolock = XFS_IOLOCK_SHARED;
 595	ssize_t			ret;
 
 
 
 
 
 
 
 
 
 596
 597	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 598	if (ret)
 599		return ret;
 600	ret = xfs_file_write_checks(iocb, from, &iolock);
 601	if (ret)
 602		goto out_unlock;
 603
 604	/*
 605	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
 606	 * the iolock back to shared if we had to take the exclusive lock in
 607	 * xfs_file_write_checks() for other reasons.
 608	 */
 609	if (iolock == XFS_IOLOCK_EXCL) {
 610		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611		iolock = XFS_IOLOCK_SHARED;
 612	}
 613	trace_xfs_file_direct_write(iocb, from);
 614	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 615			   &xfs_dio_write_ops, 0, NULL, 0);
 616out_unlock:
 617	if (iolock)
 618		xfs_iunlock(ip, iolock);
 619	return ret;
 620}
 621
 622/*
 623 * Handle block unaligned direct I/O writes
 624 *
 625 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 626 * them to be done in parallel with reads and other direct I/O writes.  However,
 627 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 628 * to do sub-block zeroing and that requires serialisation against other direct
 629 * I/O to the same block.  In this case we need to serialise the submission of
 630 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
 631 * In the case where sub-block zeroing is not required, we can do concurrent
 632 * sub-block dios to the same block successfully.
 633 *
 634 * Optimistically submit the I/O using the shared lock first, but use the
 635 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 636 * if block allocation or partial block zeroing would be required.  In that case
 637 * we try again with the exclusive lock.
 638 */
 639static noinline ssize_t
 640xfs_file_dio_write_unaligned(
 641	struct xfs_inode	*ip,
 642	struct kiocb		*iocb,
 643	struct iov_iter		*from)
 644{
 645	size_t			isize = i_size_read(VFS_I(ip));
 646	size_t			count = iov_iter_count(from);
 647	unsigned int		iolock = XFS_IOLOCK_SHARED;
 648	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
 649	ssize_t			ret;
 650
 651	/*
 652	 * Extending writes need exclusivity because of the sub-block zeroing
 653	 * that the DIO code always does for partial tail blocks beyond EOF, so
 654	 * don't even bother trying the fast path in this case.
 655	 */
 656	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
 657		if (iocb->ki_flags & IOCB_NOWAIT)
 658			return -EAGAIN;
 659retry_exclusive:
 660		iolock = XFS_IOLOCK_EXCL;
 661		flags = IOMAP_DIO_FORCE_WAIT;
 662	}
 663
 664	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 665	if (ret)
 666		return ret;
 
 667
 668	/*
 669	 * We can't properly handle unaligned direct I/O to reflink files yet,
 670	 * as we can't unshare a partial block.
 671	 */
 672	if (xfs_is_cow_inode(ip)) {
 673		trace_xfs_reflink_bounce_dio_write(iocb, from);
 674		ret = -ENOTBLK;
 675		goto out_unlock;
 
 
 
 
 
 
 
 
 676	}
 677
 678	ret = xfs_file_write_checks(iocb, from, &iolock);
 679	if (ret)
 680		goto out_unlock;
 681
 682	/*
 683	 * If we are doing exclusive unaligned I/O, this must be the only I/O
 684	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
 685	 * conversions from the AIO end_io handler.  Wait for all other I/O to
 686	 * drain first.
 687	 */
 688	if (flags & IOMAP_DIO_FORCE_WAIT)
 689		inode_dio_wait(VFS_I(ip));
 690
 691	trace_xfs_file_direct_write(iocb, from);
 692	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 693			   &xfs_dio_write_ops, flags, NULL, 0);
 694
 695	/*
 696	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
 697	 * layer rejected it for mapping or locking reasons. If we are doing
 698	 * nonblocking user I/O, propagate the error.
 699	 */
 700	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
 701		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
 702		xfs_iunlock(ip, iolock);
 703		goto retry_exclusive;
 704	}
 705
 706out_unlock:
 707	if (iolock)
 708		xfs_iunlock(ip, iolock);
 709	return ret;
 710}
 711
 712static ssize_t
 713xfs_file_dio_write(
 714	struct kiocb		*iocb,
 715	struct iov_iter		*from)
 716{
 717	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 718	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
 719	size_t			count = iov_iter_count(from);
 720
 721	/* direct I/O must be aligned to device logical sector size */
 722	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 723		return -EINVAL;
 724	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
 725		return xfs_file_dio_write_unaligned(ip, iocb, from);
 726	return xfs_file_dio_write_aligned(ip, iocb, from);
 727}
 728
 729static noinline ssize_t
 730xfs_file_dax_write(
 731	struct kiocb		*iocb,
 732	struct iov_iter		*from)
 733{
 734	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 735	struct xfs_inode	*ip = XFS_I(inode);
 736	unsigned int		iolock = XFS_IOLOCK_EXCL;
 737	ssize_t			ret, error = 0;
 
 738	loff_t			pos;
 739
 740	ret = xfs_ilock_iocb(iocb, iolock);
 741	if (ret)
 742		return ret;
 743	ret = xfs_file_write_checks(iocb, from, &iolock);
 
 
 
 
 744	if (ret)
 745		goto out;
 746
 747	pos = iocb->ki_pos;
 
 748
 749	trace_xfs_file_dax_write(iocb, from);
 750	ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
 751	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 752		i_size_write(inode, iocb->ki_pos);
 753		error = xfs_setfilesize(ip, pos, ret);
 754	}
 755out:
 756	if (iolock)
 757		xfs_iunlock(ip, iolock);
 758	if (error)
 759		return error;
 760
 761	if (ret > 0) {
 762		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 763
 764		/* Handle various SYNC-type writes */
 765		ret = generic_write_sync(iocb, ret);
 766	}
 767	return ret;
 768}
 769
 770STATIC ssize_t
 771xfs_file_buffered_write(
 772	struct kiocb		*iocb,
 773	struct iov_iter		*from)
 774{
 775	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 776	struct xfs_inode	*ip = XFS_I(inode);
 777	ssize_t			ret;
 778	bool			cleared_space = false;
 779	unsigned int		iolock;
 
 
 
 780
 781write_retry:
 782	iolock = XFS_IOLOCK_EXCL;
 783	ret = xfs_ilock_iocb(iocb, iolock);
 784	if (ret)
 785		return ret;
 786
 787	ret = xfs_file_write_checks(iocb, from, &iolock);
 788	if (ret)
 789		goto out;
 790
 791	trace_xfs_file_buffered_write(iocb, from);
 792	ret = iomap_file_buffered_write(iocb, from,
 793			&xfs_buffered_write_iomap_ops, NULL);
 
 
 
 
 794
 795	/*
 796	 * If we hit a space limit, try to free up some lingering preallocated
 797	 * space before returning an error. In the case of ENOSPC, first try to
 798	 * write back all dirty inodes to free up some of the excess reserved
 799	 * metadata space. This reduces the chances that the eofblocks scan
 800	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 801	 * also behaves as a filter to prevent too many eofblocks scans from
 802	 * running at the same time.  Use a synchronous scan to increase the
 803	 * effectiveness of the scan.
 804	 */
 805	if (ret == -EDQUOT && !cleared_space) {
 806		xfs_iunlock(ip, iolock);
 807		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
 808		cleared_space = true;
 809		goto write_retry;
 810	} else if (ret == -ENOSPC && !cleared_space) {
 811		struct xfs_icwalk	icw = {0};
 
 
 
 
 812
 813		cleared_space = true;
 814		xfs_flush_inodes(ip->i_mount);
 815
 816		xfs_iunlock(ip, iolock);
 817		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
 818		xfs_blockgc_free_space(ip->i_mount, &icw);
 
 819		goto write_retry;
 820	}
 821
 
 822out:
 823	if (iolock)
 824		xfs_iunlock(ip, iolock);
 825
 826	if (ret > 0) {
 827		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 828		/* Handle various SYNC-type writes */
 829		ret = generic_write_sync(iocb, ret);
 830	}
 831	return ret;
 832}
 833
 834STATIC ssize_t
 835xfs_file_write_iter(
 836	struct kiocb		*iocb,
 837	struct iov_iter		*from)
 838{
 839	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 840	struct xfs_inode	*ip = XFS_I(inode);
 841	ssize_t			ret;
 842	size_t			ocount = iov_iter_count(from);
 843
 844	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 845
 846	if (ocount == 0)
 847		return 0;
 848
 849	if (xfs_is_shutdown(ip->i_mount))
 850		return -EIO;
 851
 852	if (IS_DAX(inode))
 853		return xfs_file_dax_write(iocb, from);
 854
 855	if (iocb->ki_flags & IOCB_ATOMIC) {
 856		/*
 857		 * Currently only atomic writing of a single FS block is
 858		 * supported. It would be possible to atomic write smaller than
 859		 * a FS block, but there is no requirement to support this.
 860		 * Note that iomap also does not support this yet.
 861		 */
 862		if (ocount != ip->i_mount->m_sb.sb_blocksize)
 863			return -EINVAL;
 864		ret = generic_atomic_write_valid(iocb, from);
 865		if (ret)
 866			return ret;
 867	}
 868
 869	if (iocb->ki_flags & IOCB_DIRECT) {
 870		/*
 871		 * Allow a directio write to fall back to a buffered
 872		 * write *only* in the case that we're doing a reflink
 873		 * CoW.  In all other directio scenarios we do not
 874		 * allow an operation to fall back to buffered mode.
 875		 */
 876		ret = xfs_file_dio_write(iocb, from);
 877		if (ret != -ENOTBLK)
 878			return ret;
 
 
 
 879	}
 880
 881	return xfs_file_buffered_write(iocb, from);
 882}
 883
 884/* Does this file, inode, or mount want synchronous writes? */
 885static inline bool xfs_file_sync_writes(struct file *filp)
 886{
 887	struct xfs_inode	*ip = XFS_I(file_inode(filp));
 888
 889	if (xfs_has_wsync(ip->i_mount))
 890		return true;
 891	if (filp->f_flags & (__O_SYNC | O_DSYNC))
 892		return true;
 893	if (IS_SYNC(file_inode(filp)))
 894		return true;
 895
 896	return false;
 897}
 898
 899static int
 900xfs_falloc_newsize(
 901	struct file		*file,
 902	int			mode,
 903	loff_t			offset,
 904	loff_t			len,
 905	loff_t			*new_size)
 906{
 907	struct inode		*inode = file_inode(file);
 908
 909	if ((mode & FALLOC_FL_KEEP_SIZE) || offset + len <= i_size_read(inode))
 910		return 0;
 911	*new_size = offset + len;
 912	return inode_newsize_ok(inode, *new_size);
 913}
 914
 915static int
 916xfs_falloc_setsize(
 917	struct file		*file,
 918	loff_t			new_size)
 919{
 920	struct iattr iattr = {
 921		.ia_valid	= ATTR_SIZE,
 922		.ia_size	= new_size,
 923	};
 924
 925	if (!new_size)
 926		return 0;
 927	return xfs_vn_setattr_size(file_mnt_idmap(file), file_dentry(file),
 928			&iattr);
 929}
 930
 931static int
 932xfs_falloc_collapse_range(
 933	struct file		*file,
 934	loff_t			offset,
 935	loff_t			len)
 936{
 937	struct inode		*inode = file_inode(file);
 938	loff_t			new_size = i_size_read(inode) - len;
 939	int			error;
 940
 941	if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
 942		return -EINVAL;
 943
 944	/*
 945	 * There is no need to overlap collapse range with EOF, in which case it
 946	 * is effectively a truncate operation
 947	 */
 948	if (offset + len >= i_size_read(inode))
 949		return -EINVAL;
 950
 951	error = xfs_collapse_file_space(XFS_I(inode), offset, len);
 952	if (error)
 953		return error;
 954	return xfs_falloc_setsize(file, new_size);
 955}
 956
 957static int
 958xfs_falloc_insert_range(
 959	struct file		*file,
 960	loff_t			offset,
 961	loff_t			len)
 962{
 963	struct inode		*inode = file_inode(file);
 964	loff_t			isize = i_size_read(inode);
 965	int			error;
 966
 967	if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
 968		return -EINVAL;
 969
 970	/*
 971	 * New inode size must not exceed ->s_maxbytes, accounting for
 972	 * possible signed overflow.
 973	 */
 974	if (inode->i_sb->s_maxbytes - isize < len)
 975		return -EFBIG;
 976
 977	/* Offset should be less than i_size */
 978	if (offset >= isize)
 979		return -EINVAL;
 980
 981	error = xfs_falloc_setsize(file, isize + len);
 982	if (error)
 983		return error;
 984
 985	/*
 986	 * Perform hole insertion now that the file size has been updated so
 987	 * that if we crash during the operation we don't leave shifted extents
 988	 * past EOF and hence losing access to the data that is contained within
 989	 * them.
 990	 */
 991	return xfs_insert_file_space(XFS_I(inode), offset, len);
 992}
 993
 994/*
 995 * Punch a hole and prealloc the range.  We use a hole punch rather than
 996 * unwritten extent conversion for two reasons:
 997 *
 998 *   1.) Hole punch handles partial block zeroing for us.
 999 *   2.) If prealloc returns ENOSPC, the file range is still zero-valued by
1000 *	 virtue of the hole punch.
1001 */
1002static int
1003xfs_falloc_zero_range(
1004	struct file		*file,
1005	int			mode,
1006	loff_t			offset,
1007	loff_t			len)
1008{
1009	struct inode		*inode = file_inode(file);
1010	unsigned int		blksize = i_blocksize(inode);
1011	loff_t			new_size = 0;
1012	int			error;
1013
1014	trace_xfs_zero_file_space(XFS_I(inode));
1015
1016	error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1017	if (error)
1018		return error;
1019
1020	error = xfs_free_file_space(XFS_I(inode), offset, len);
1021	if (error)
1022		return error;
1023
1024	len = round_up(offset + len, blksize) - round_down(offset, blksize);
1025	offset = round_down(offset, blksize);
1026	error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1027	if (error)
1028		return error;
1029	return xfs_falloc_setsize(file, new_size);
1030}
1031
1032static int
1033xfs_falloc_unshare_range(
1034	struct file		*file,
1035	int			mode,
1036	loff_t			offset,
1037	loff_t			len)
1038{
1039	struct inode		*inode = file_inode(file);
1040	loff_t			new_size = 0;
1041	int			error;
1042
1043	error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1044	if (error)
1045		return error;
1046
1047	error = xfs_reflink_unshare(XFS_I(inode), offset, len);
1048	if (error)
1049		return error;
1050
1051	error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1052	if (error)
1053		return error;
1054	return xfs_falloc_setsize(file, new_size);
1055}
1056
1057static int
1058xfs_falloc_allocate_range(
1059	struct file		*file,
1060	int			mode,
1061	loff_t			offset,
1062	loff_t			len)
1063{
1064	struct inode		*inode = file_inode(file);
1065	loff_t			new_size = 0;
1066	int			error;
1067
1068	/*
1069	 * If always_cow mode we can't use preallocations and thus should not
1070	 * create them.
1071	 */
1072	if (xfs_is_always_cow_inode(XFS_I(inode)))
1073		return -EOPNOTSUPP;
1074
1075	error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1076	if (error)
1077		return error;
1078
1079	error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1080	if (error)
1081		return error;
1082	return xfs_falloc_setsize(file, new_size);
1083}
1084
1085#define	XFS_FALLOC_FL_SUPPORTED						\
1086		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
1087		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
1088		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
1089
1090STATIC long
1091xfs_file_fallocate(
1092	struct file		*file,
1093	int			mode,
1094	loff_t			offset,
1095	loff_t			len)
1096{
1097	struct inode		*inode = file_inode(file);
1098	struct xfs_inode	*ip = XFS_I(inode);
1099	long			error;
1100	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 
 
 
1101
1102	if (!S_ISREG(inode->i_mode))
1103		return -EINVAL;
1104	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
1105		return -EOPNOTSUPP;
1106
1107	xfs_ilock(ip, iolock);
1108	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
1109	if (error)
1110		goto out_unlock;
1111
1112	/*
1113	 * Must wait for all AIO to complete before we continue as AIO can
1114	 * change the file size on completion without holding any locks we
1115	 * currently hold. We must do this first because AIO can update both
1116	 * the on disk and in memory inode sizes, and the operations that follow
1117	 * require the in-memory size to be fully up-to-date.
1118	 */
1119	inode_dio_wait(inode);
1120
1121	error = file_modified(file);
1122	if (error)
1123		goto out_unlock;
1124
1125	switch (mode & FALLOC_FL_MODE_MASK) {
1126	case FALLOC_FL_PUNCH_HOLE:
1127		error = xfs_free_file_space(ip, offset, len);
1128		break;
1129	case FALLOC_FL_COLLAPSE_RANGE:
1130		error = xfs_falloc_collapse_range(file, offset, len);
1131		break;
1132	case FALLOC_FL_INSERT_RANGE:
1133		error = xfs_falloc_insert_range(file, offset, len);
1134		break;
1135	case FALLOC_FL_ZERO_RANGE:
1136		error = xfs_falloc_zero_range(file, mode, offset, len);
1137		break;
1138	case FALLOC_FL_UNSHARE_RANGE:
1139		error = xfs_falloc_unshare_range(file, mode, offset, len);
1140		break;
1141	case FALLOC_FL_ALLOCATE_RANGE:
1142		error = xfs_falloc_allocate_range(file, mode, offset, len);
1143		break;
1144	default:
1145		error = -EOPNOTSUPP;
1146		break;
1147	}
1148
1149	if (!error && xfs_file_sync_writes(file))
1150		error = xfs_log_force_inode(ip);
 
 
 
 
 
 
1151
1152out_unlock:
1153	xfs_iunlock(ip, iolock);
1154	return error;
1155}
1156
1157STATIC int
1158xfs_file_fadvise(
1159	struct file	*file,
1160	loff_t		start,
1161	loff_t		end,
1162	int		advice)
1163{
1164	struct xfs_inode *ip = XFS_I(file_inode(file));
1165	int ret;
1166	int lockflags = 0;
 
1167
1168	/*
1169	 * Operations creating pages in page cache need protection from hole
1170	 * punching and similar ops
1171	 */
1172	if (advice == POSIX_FADV_WILLNEED) {
1173		lockflags = XFS_IOLOCK_SHARED;
1174		xfs_ilock(ip, lockflags);
1175	}
1176	ret = generic_fadvise(file, start, end, advice);
1177	if (lockflags)
1178		xfs_iunlock(ip, lockflags);
1179	return ret;
1180}
1181
1182STATIC loff_t
1183xfs_file_remap_range(
1184	struct file		*file_in,
1185	loff_t			pos_in,
1186	struct file		*file_out,
1187	loff_t			pos_out,
1188	loff_t			len,
1189	unsigned int		remap_flags)
1190{
1191	struct inode		*inode_in = file_inode(file_in);
1192	struct xfs_inode	*src = XFS_I(inode_in);
1193	struct inode		*inode_out = file_inode(file_out);
1194	struct xfs_inode	*dest = XFS_I(inode_out);
1195	struct xfs_mount	*mp = src->i_mount;
1196	loff_t			remapped = 0;
1197	xfs_extlen_t		cowextsize;
1198	int			ret;
1199
1200	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1201		return -EINVAL;
 
 
 
 
 
1202
1203	if (!xfs_has_reflink(mp))
1204		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
1205
1206	if (xfs_is_shutdown(mp))
1207		return -EIO;
1208
1209	/* Prepare and then clone file data. */
1210	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1211			&len, remap_flags);
1212	if (ret || len == 0)
1213		return ret;
1214
1215	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1216
1217	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1218			&remapped);
1219	if (ret)
1220		goto out_unlock;
 
 
 
 
1221
1222	/*
1223	 * Carry the cowextsize hint from src to dest if we're sharing the
1224	 * entire source file to the entire destination file, the source file
1225	 * has a cowextsize hint, and the destination file does not.
 
1226	 */
1227	cowextsize = 0;
1228	if (pos_in == 0 && len == i_size_read(inode_in) &&
1229	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1230	    pos_out == 0 && len >= i_size_read(inode_out) &&
1231	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1232		cowextsize = src->i_cowextsize;
1233
1234	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1235			remap_flags);
1236	if (ret)
1237		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1238
1239	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1240		xfs_log_force_inode(dest);
1241out_unlock:
1242	xfs_iunlock2_remapping(src, dest);
1243	if (ret)
1244		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1245	/*
1246	 * If the caller did not set CAN_SHORTEN, then it is not prepared to
1247	 * handle partial results -- either the whole remap succeeds, or we
1248	 * must say why it did not.  In this case, any error should be returned
1249	 * to the caller.
1250	 */
1251	if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
1252		return ret;
1253	return remapped > 0 ? remapped : ret;
 
 
 
 
1254}
1255
1256STATIC int
1257xfs_file_open(
1258	struct inode	*inode,
1259	struct file	*file)
1260{
1261	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
 
 
1262		return -EIO;
1263	file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
1264	if (xfs_inode_can_atomicwrite(XFS_I(inode)))
1265		file->f_mode |= FMODE_CAN_ATOMIC_WRITE;
1266	return generic_file_open(inode, file);
1267}
1268
1269STATIC int
1270xfs_dir_open(
1271	struct inode	*inode,
1272	struct file	*file)
1273{
1274	struct xfs_inode *ip = XFS_I(inode);
1275	unsigned int	mode;
1276	int		error;
1277
1278	if (xfs_is_shutdown(ip->i_mount))
1279		return -EIO;
1280	error = generic_file_open(inode, file);
1281	if (error)
1282		return error;
1283
1284	/*
1285	 * If there are any blocks, read-ahead block 0 as we're almost
1286	 * certain to have the next operation be a read there.
1287	 */
1288	mode = xfs_ilock_data_map_shared(ip);
1289	if (ip->i_df.if_nextents > 0)
1290		error = xfs_dir3_data_readahead(ip, 0, 0);
1291	xfs_iunlock(ip, mode);
1292	return error;
1293}
1294
1295/*
1296 * Don't bother propagating errors.  We're just doing cleanup, and the caller
1297 * ignores the return value anyway.
1298 */
1299STATIC int
1300xfs_file_release(
1301	struct inode		*inode,
1302	struct file		*file)
1303{
1304	struct xfs_inode	*ip = XFS_I(inode);
1305	struct xfs_mount	*mp = ip->i_mount;
1306
1307	/*
1308	 * If this is a read-only mount or the file system has been shut down,
1309	 * don't generate I/O.
1310	 */
1311	if (xfs_is_readonly(mp) || xfs_is_shutdown(mp))
1312		return 0;
1313
1314	/*
1315	 * If we previously truncated this file and removed old data in the
1316	 * process, we want to initiate "early" writeout on the last close.
1317	 * This is an attempt to combat the notorious NULL files problem which
1318	 * is particularly noticeable from a truncate down, buffered (re-)write
1319	 * (delalloc), followed by a crash.  What we are effectively doing here
1320	 * is significantly reducing the time window where we'd otherwise be
1321	 * exposed to that problem.
1322	 */
1323	if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) {
1324		xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED);
1325		if (ip->i_delayed_blks > 0)
1326			filemap_flush(inode->i_mapping);
1327	}
1328
1329	/*
1330	 * XFS aggressively preallocates post-EOF space to generate contiguous
1331	 * allocations for writers that append to the end of the file.
1332	 *
1333	 * To support workloads that close and reopen the file frequently, these
1334	 * preallocations usually persist after a close unless it is the first
1335	 * close for the inode.  This is a tradeoff to generate tightly packed
1336	 * data layouts for unpacking tarballs or similar archives that write
1337	 * one file after another without going back to it while keeping the
1338	 * preallocation for files that have recurring open/write/close cycles.
1339	 *
1340	 * This heuristic is skipped for inodes with the append-only flag as
1341	 * that flag is rather pointless for inodes written only once.
1342	 *
1343	 * There is no point in freeing blocks here for open but unlinked files
1344	 * as they will be taken care of by the inactivation path soon.
1345	 *
1346	 * When releasing a read-only context, don't flush data or trim post-EOF
1347	 * blocks.  This avoids open/read/close workloads from removing EOF
1348	 * blocks that other writers depend upon to reduce fragmentation.
1349	 *
1350	 * If we can't get the iolock just skip truncating the blocks past EOF
1351	 * because we could deadlock with the mmap_lock otherwise. We'll get
1352	 * another chance to drop them once the last reference to the inode is
1353	 * dropped, so we'll never leak blocks permanently.
1354	 */
1355	if (inode->i_nlink &&
1356	    (file->f_mode & FMODE_WRITE) &&
1357	    !(ip->i_diflags & XFS_DIFLAG_APPEND) &&
1358	    !xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) &&
1359	    xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1360		if (xfs_can_free_eofblocks(ip) &&
1361		    !xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED))
1362			xfs_free_eofblocks(ip);
1363		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1364	}
1365
1366	return 0;
1367}
1368
1369STATIC int
1370xfs_file_readdir(
1371	struct file	*file,
1372	struct dir_context *ctx)
1373{
1374	struct inode	*inode = file_inode(file);
1375	xfs_inode_t	*ip = XFS_I(inode);
1376	size_t		bufsize;
1377
1378	/*
1379	 * The Linux API doesn't pass down the total size of the buffer
1380	 * we read into down to the filesystem.  With the filldir concept
1381	 * it's not needed for correct information, but the XFS dir2 leaf
1382	 * code wants an estimate of the buffer size to calculate it's
1383	 * readahead window and size the buffers used for mapping to
1384	 * physical blocks.
1385	 *
1386	 * Try to give it an estimate that's good enough, maybe at some
1387	 * point we can change the ->readdir prototype to include the
1388	 * buffer size.  For now we use the current glibc buffer size.
1389	 */
1390	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1391
1392	return xfs_readdir(NULL, ip, ctx, bufsize);
1393}
1394
1395STATIC loff_t
1396xfs_file_llseek(
1397	struct file	*file,
1398	loff_t		offset,
1399	int		whence)
1400{
1401	struct inode		*inode = file->f_mapping->host;
1402
1403	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1404		return -EIO;
1405
1406	switch (whence) {
1407	default:
1408		return generic_file_llseek(file, offset, whence);
1409	case SEEK_HOLE:
1410		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1411		break;
1412	case SEEK_DATA:
1413		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1414		break;
1415	}
1416
1417	if (offset < 0)
1418		return offset;
1419	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1420}
1421
1422static inline vm_fault_t
1423xfs_dax_fault_locked(
1424	struct vm_fault		*vmf,
1425	unsigned int		order,
1426	bool			write_fault)
1427{
1428	vm_fault_t		ret;
1429	pfn_t			pfn;
1430
1431	if (!IS_ENABLED(CONFIG_FS_DAX)) {
1432		ASSERT(0);
1433		return VM_FAULT_SIGBUS;
1434	}
1435	ret = dax_iomap_fault(vmf, order, &pfn, NULL,
1436			(write_fault && !vmf->cow_page) ?
1437				&xfs_dax_write_iomap_ops :
1438				&xfs_read_iomap_ops);
1439	if (ret & VM_FAULT_NEEDDSYNC)
1440		ret = dax_finish_sync_fault(vmf, order, pfn);
1441	return ret;
1442}
1443
1444static vm_fault_t
1445xfs_dax_read_fault(
1446	struct vm_fault		*vmf,
1447	unsigned int		order)
1448{
1449	struct xfs_inode	*ip = XFS_I(file_inode(vmf->vma->vm_file));
1450	vm_fault_t		ret;
1451
1452	trace_xfs_read_fault(ip, order);
1453
1454	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1455	ret = xfs_dax_fault_locked(vmf, order, false);
1456	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1457
1458	return ret;
1459}
1460
1461/*
1462 * Locking for serialisation of IO during page faults. This results in a lock
1463 * ordering of:
1464 *
1465 * mmap_lock (MM)
1466 *   sb_start_pagefault(vfs, freeze)
1467 *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1468 *       page_lock (MM)
1469 *         i_lock (XFS - extent map serialisation)
1470 */
1471static vm_fault_t
1472xfs_write_fault(
1473	struct vm_fault		*vmf,
1474	unsigned int		order)
 
1475{
1476	struct inode		*inode = file_inode(vmf->vma->vm_file);
1477	struct xfs_inode	*ip = XFS_I(inode);
1478	unsigned int		lock_mode = XFS_MMAPLOCK_SHARED;
1479	vm_fault_t		ret;
 
1480
1481	trace_xfs_write_fault(ip, order);
 
 
 
1482
1483	sb_start_pagefault(inode->i_sb);
1484	file_update_time(vmf->vma->vm_file);
 
1485
1486	/*
1487	 * Normally we only need the shared mmaplock, but if a reflink remap is
1488	 * in progress we take the exclusive lock to wait for the remap to
1489	 * finish before taking a write fault.
1490	 */
1491	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1492	if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
1493		xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1494		xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1495		lock_mode = XFS_MMAPLOCK_EXCL;
1496	}
 
1497
1498	if (IS_DAX(inode))
1499		ret = xfs_dax_fault_locked(vmf, order, true);
1500	else
1501		ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops);
1502	xfs_iunlock(ip, lock_mode);
1503
1504	sb_end_pagefault(inode->i_sb);
1505	return ret;
1506}
1507
1508static inline bool
1509xfs_is_write_fault(
1510	struct vm_fault		*vmf)
1511{
1512	return (vmf->flags & FAULT_FLAG_WRITE) &&
1513	       (vmf->vma->vm_flags & VM_SHARED);
1514}
1515
1516static vm_fault_t
1517xfs_filemap_fault(
1518	struct vm_fault		*vmf)
1519{
1520	struct inode		*inode = file_inode(vmf->vma->vm_file);
1521
1522	/* DAX can shortcut the normal fault path on write faults! */
1523	if (IS_DAX(inode)) {
1524		if (xfs_is_write_fault(vmf))
1525			return xfs_write_fault(vmf, 0);
1526		return xfs_dax_read_fault(vmf, 0);
1527	}
1528
1529	trace_xfs_read_fault(XFS_I(inode), 0);
1530	return filemap_fault(vmf);
1531}
1532
1533static vm_fault_t
1534xfs_filemap_huge_fault(
1535	struct vm_fault		*vmf,
1536	unsigned int		order)
1537{
1538	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1539		return VM_FAULT_FALLBACK;
1540
1541	/* DAX can shortcut the normal fault path on write faults! */
1542	if (xfs_is_write_fault(vmf))
1543		return xfs_write_fault(vmf, order);
1544	return xfs_dax_read_fault(vmf, order);
1545}
1546
1547static vm_fault_t
1548xfs_filemap_page_mkwrite(
1549	struct vm_fault		*vmf)
1550{
1551	return xfs_write_fault(vmf, 0);
1552}
1553
1554/*
1555 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1556 * on write faults. In reality, it needs to serialise against truncate and
1557 * prepare memory for writing so handle is as standard write fault.
1558 */
1559static vm_fault_t
1560xfs_filemap_pfn_mkwrite(
1561	struct vm_fault		*vmf)
1562{
1563	return xfs_write_fault(vmf, 0);
 
1564}
1565
1566static const struct vm_operations_struct xfs_file_vm_ops = {
1567	.fault		= xfs_filemap_fault,
1568	.huge_fault	= xfs_filemap_huge_fault,
1569	.map_pages	= filemap_map_pages,
1570	.page_mkwrite	= xfs_filemap_page_mkwrite,
1571	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1572};
1573
1574STATIC int
1575xfs_file_mmap(
1576	struct file		*file,
1577	struct vm_area_struct	*vma)
1578{
1579	struct inode		*inode = file_inode(file);
1580	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1581
1582	/*
1583	 * We don't support synchronous mappings for non-DAX files and
1584	 * for DAX files if underneath dax_device is not synchronous.
1585	 */
1586	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1587		return -EOPNOTSUPP;
1588
1589	file_accessed(file);
1590	vma->vm_ops = &xfs_file_vm_ops;
1591	if (IS_DAX(inode))
1592		vm_flags_set(vma, VM_HUGEPAGE);
1593	return 0;
1594}
1595
1596const struct file_operations xfs_file_operations = {
1597	.llseek		= xfs_file_llseek,
1598	.read_iter	= xfs_file_read_iter,
1599	.write_iter	= xfs_file_write_iter,
1600	.splice_read	= xfs_file_splice_read,
1601	.splice_write	= iter_file_splice_write,
1602	.iopoll		= iocb_bio_iopoll,
1603	.unlocked_ioctl	= xfs_file_ioctl,
1604#ifdef CONFIG_COMPAT
1605	.compat_ioctl	= xfs_file_compat_ioctl,
1606#endif
1607	.mmap		= xfs_file_mmap,
 
1608	.open		= xfs_file_open,
1609	.release	= xfs_file_release,
1610	.fsync		= xfs_file_fsync,
1611	.get_unmapped_area = thp_get_unmapped_area,
1612	.fallocate	= xfs_file_fallocate,
1613	.fadvise	= xfs_file_fadvise,
1614	.remap_file_range = xfs_file_remap_range,
1615	.fop_flags	= FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
1616			  FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE,
1617};
1618
1619const struct file_operations xfs_dir_file_operations = {
1620	.open		= xfs_dir_open,
1621	.read		= generic_read_dir,
1622	.iterate_shared	= xfs_file_readdir,
1623	.llseek		= generic_file_llseek,
1624	.unlocked_ioctl	= xfs_file_ioctl,
1625#ifdef CONFIG_COMPAT
1626	.compat_ioctl	= xfs_file_compat_ioctl,
1627#endif
1628	.fsync		= xfs_dir_fsync,
1629};