Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_da_format.h"
  26#include "xfs_da_btree.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_bmap.h"
  31#include "xfs_bmap_util.h"
  32#include "xfs_error.h"
  33#include "xfs_dir2.h"
  34#include "xfs_dir2_priv.h"
  35#include "xfs_ioctl.h"
  36#include "xfs_trace.h"
  37#include "xfs_log.h"
  38#include "xfs_icache.h"
  39#include "xfs_pnfs.h"
  40#include "xfs_iomap.h"
  41#include "xfs_reflink.h"
 
  42
  43#include <linux/dcache.h>
  44#include <linux/falloc.h>
  45#include <linux/pagevec.h>
  46#include <linux/backing-dev.h>
 
 
 
  47
  48static const struct vm_operations_struct xfs_file_vm_ops;
  49
  50/*
  51 * Clear the specified ranges to zero through either the pagecache or DAX.
  52 * Holes and unwritten extents will be left as-is as they already are zeroed.
  53 */
  54int
  55xfs_zero_range(
  56	struct xfs_inode	*ip,
  57	xfs_off_t		pos,
  58	xfs_off_t		count,
  59	bool			*did_zero)
  60{
  61	return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
  62}
  63
  64int
  65xfs_update_prealloc_flags(
  66	struct xfs_inode	*ip,
  67	enum xfs_prealloc_flags	flags)
  68{
  69	struct xfs_trans	*tp;
  70	int			error;
  71
  72	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
  73			0, 0, 0, &tp);
  74	if (error)
  75		return error;
  76
  77	xfs_ilock(ip, XFS_ILOCK_EXCL);
  78	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
  79
  80	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  81		VFS_I(ip)->i_mode &= ~S_ISUID;
  82		if (VFS_I(ip)->i_mode & S_IXGRP)
  83			VFS_I(ip)->i_mode &= ~S_ISGID;
  84		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  85	}
  86
  87	if (flags & XFS_PREALLOC_SET)
  88		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  89	if (flags & XFS_PREALLOC_CLEAR)
  90		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  91
  92	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  93	if (flags & XFS_PREALLOC_SYNC)
  94		xfs_trans_set_sync(tp);
  95	return xfs_trans_commit(tp);
  96}
  97
  98/*
  99 * Fsync operations on directories are much simpler than on regular files,
 100 * as there is no file data to flush, and thus also no need for explicit
 101 * cache flush operations, and there are no non-transaction metadata updates
 102 * on directories either.
 103 */
 104STATIC int
 105xfs_dir_fsync(
 106	struct file		*file,
 107	loff_t			start,
 108	loff_t			end,
 109	int			datasync)
 110{
 111	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 112	struct xfs_mount	*mp = ip->i_mount;
 113	xfs_lsn_t		lsn = 0;
 114
 115	trace_xfs_dir_fsync(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116
 117	xfs_ilock(ip, XFS_ILOCK_SHARED);
 118	if (xfs_ipincount(ip))
 119		lsn = ip->i_itemp->ili_last_lsn;
 120	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 121
 122	if (!lsn)
 123		return 0;
 124	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 
 
 
 125}
 126
 127STATIC int
 128xfs_file_fsync(
 129	struct file		*file,
 130	loff_t			start,
 131	loff_t			end,
 132	int			datasync)
 133{
 134	struct inode		*inode = file->f_mapping->host;
 135	struct xfs_inode	*ip = XFS_I(inode);
 136	struct xfs_mount	*mp = ip->i_mount;
 137	int			error = 0;
 138	int			log_flushed = 0;
 139	xfs_lsn_t		lsn = 0;
 140
 141	trace_xfs_file_fsync(ip);
 142
 143	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 144	if (error)
 145		return error;
 146
 147	if (XFS_FORCED_SHUTDOWN(mp))
 148		return -EIO;
 149
 150	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 151
 152	/*
 153	 * If we have an RT and/or log subvolume we need to make sure to flush
 154	 * the write cache the device used for file data first.  This is to
 155	 * ensure newly written file data make it to disk before logging the new
 156	 * inode size in case of an extending write.
 157	 */
 158	if (XFS_IS_REALTIME_INODE(ip))
 159		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
 160	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 161		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 162
 163	/*
 164	 * All metadata updates are logged, which means that we just have to
 165	 * flush the log up to the latest LSN that touched the inode. If we have
 166	 * concurrent fsync/fdatasync() calls, we need them to all block on the
 167	 * log force before we clear the ili_fsync_fields field. This ensures
 168	 * that we don't get a racing sync operation that does not wait for the
 169	 * metadata to hit the journal before returning. If we race with
 170	 * clearing the ili_fsync_fields, then all that will happen is the log
 171	 * force will do nothing as the lsn will already be on disk. We can't
 172	 * race with setting ili_fsync_fields because that is done under
 173	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
 174	 * until after the ili_fsync_fields is cleared.
 175	 */
 176	xfs_ilock(ip, XFS_ILOCK_SHARED);
 177	if (xfs_ipincount(ip)) {
 178		if (!datasync ||
 179		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
 180			lsn = ip->i_itemp->ili_last_lsn;
 181	}
 182
 183	if (lsn) {
 184		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
 185		ip->i_itemp->ili_fsync_fields = 0;
 186	}
 187	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 188
 189	/*
 190	 * If we only have a single device, and the log force about was
 191	 * a no-op we might have to flush the data device cache here.
 192	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 193	 * an already allocated file and thus do not have any metadata to
 194	 * commit.
 195	 */
 196	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 197	    mp->m_logdev_targp == mp->m_ddev_targp)
 198		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
 
 
 199
 200	return error;
 201}
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203STATIC ssize_t
 204xfs_file_dio_aio_read(
 205	struct kiocb		*iocb,
 206	struct iov_iter		*to)
 207{
 208	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 209	size_t			count = iov_iter_count(to);
 210	ssize_t			ret;
 211
 212	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
 213
 214	if (!count)
 215		return 0; /* skip atime */
 216
 217	file_accessed(iocb->ki_filp);
 218
 219	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 220	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
 
 
 221	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 222
 223	return ret;
 224}
 225
 226static noinline ssize_t
 227xfs_file_dax_read(
 228	struct kiocb		*iocb,
 229	struct iov_iter		*to)
 230{
 231	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 232	size_t			count = iov_iter_count(to);
 233	ssize_t			ret = 0;
 234
 235	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
 236
 237	if (!count)
 238		return 0; /* skip atime */
 239
 240	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 241	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
 
 
 242	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 243
 244	file_accessed(iocb->ki_filp);
 245	return ret;
 246}
 247
 248STATIC ssize_t
 249xfs_file_buffered_aio_read(
 250	struct kiocb		*iocb,
 251	struct iov_iter		*to)
 252{
 253	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 254	ssize_t			ret;
 255
 256	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 257
 258	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 
 259	ret = generic_file_read_iter(iocb, to);
 260	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 261
 262	return ret;
 263}
 264
 265STATIC ssize_t
 266xfs_file_read_iter(
 267	struct kiocb		*iocb,
 268	struct iov_iter		*to)
 269{
 270	struct inode		*inode = file_inode(iocb->ki_filp);
 271	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 272	ssize_t			ret = 0;
 273
 274	XFS_STATS_INC(mp, xs_read_calls);
 275
 276	if (XFS_FORCED_SHUTDOWN(mp))
 277		return -EIO;
 278
 279	if (IS_DAX(inode))
 280		ret = xfs_file_dax_read(iocb, to);
 281	else if (iocb->ki_flags & IOCB_DIRECT)
 282		ret = xfs_file_dio_aio_read(iocb, to);
 283	else
 284		ret = xfs_file_buffered_aio_read(iocb, to);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285
 
 
 
 
 
 
 
 
 286	if (ret > 0)
 287		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 288	return ret;
 289}
 290
 291/*
 292 * Zero any on disk space between the current EOF and the new, larger EOF.
 293 *
 294 * This handles the normal case of zeroing the remainder of the last block in
 295 * the file and the unusual case of zeroing blocks out beyond the size of the
 296 * file.  This second case only happens with fixed size extents and when the
 297 * system crashes before the inode size was updated but after blocks were
 298 * allocated.
 299 *
 300 * Expects the iolock to be held exclusive, and will take the ilock internally.
 
 
 301 */
 302int					/* error (positive) */
 303xfs_zero_eof(
 304	struct xfs_inode	*ip,
 305	xfs_off_t		offset,		/* starting I/O offset */
 306	xfs_fsize_t		isize,		/* current inode size */
 307	bool			*did_zeroing)
 
 308{
 309	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 310	ASSERT(offset > isize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311
 312	trace_xfs_zero_eof(ip, isize, offset - isize);
 313	return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
 
 
 
 
 
 314}
 315
 316/*
 317 * Common pre-write limit and setup checks.
 318 *
 319 * Called with the iolocked held either shared and exclusive according to
 320 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 321 * if called for a direct write beyond i_size.
 322 */
 323STATIC ssize_t
 324xfs_file_aio_write_checks(
 325	struct kiocb		*iocb,
 326	struct iov_iter		*from,
 327	int			*iolock)
 328{
 329	struct file		*file = iocb->ki_filp;
 330	struct inode		*inode = file->f_mapping->host;
 331	struct xfs_inode	*ip = XFS_I(inode);
 332	ssize_t			error = 0;
 333	size_t			count = iov_iter_count(from);
 334	bool			drained_dio = false;
 
 335
 336restart:
 337	error = generic_write_checks(iocb, from);
 338	if (error <= 0)
 339		return error;
 340
 341	error = xfs_break_layouts(inode, iolock);
 
 
 
 
 
 
 
 342	if (error)
 343		return error;
 344
 345	/*
 346	 * For changing security info in file_remove_privs() we need i_rwsem
 347	 * exclusively.
 348	 */
 349	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 350		xfs_iunlock(ip, *iolock);
 351		*iolock = XFS_IOLOCK_EXCL;
 352		xfs_ilock(ip, *iolock);
 
 
 
 
 353		goto restart;
 354	}
 
 355	/*
 356	 * If the offset is beyond the size of the file, we need to zero any
 357	 * blocks that fall between the existing EOF and the start of this
 358	 * write.  If zeroing is needed and we are currently holding the
 359	 * iolock shared, we need to update it to exclusive which implies
 360	 * having to redo all checks before.
 361	 *
 362	 * We need to serialise against EOF updates that occur in IO
 363	 * completions here. We want to make sure that nobody is changing the
 364	 * size while we do this check until we have placed an IO barrier (i.e.
 365	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
 366	 * The spinlock effectively forms a memory barrier once we have the
 367	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
 368	 * and hence be able to correctly determine if we need to run zeroing.
 369	 */
 370	spin_lock(&ip->i_flags_lock);
 371	if (iocb->ki_pos > i_size_read(inode)) {
 372		bool	zero = false;
 373
 374		spin_unlock(&ip->i_flags_lock);
 375		if (!drained_dio) {
 376			if (*iolock == XFS_IOLOCK_SHARED) {
 377				xfs_iunlock(ip, *iolock);
 378				*iolock = XFS_IOLOCK_EXCL;
 379				xfs_ilock(ip, *iolock);
 380				iov_iter_reexpand(from, count);
 381			}
 382			/*
 383			 * We now have an IO submission barrier in place, but
 384			 * AIO can do EOF updates during IO completion and hence
 385			 * we now need to wait for all of them to drain. Non-AIO
 386			 * DIO will have drained before we are given the
 387			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 388			 * no-op.
 389			 */
 390			inode_dio_wait(inode);
 391			drained_dio = true;
 392			goto restart;
 393		}
 394		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
 395		if (error)
 396			return error;
 397	} else
 398		spin_unlock(&ip->i_flags_lock);
 399
 400	/*
 401	 * Updating the timestamps will grab the ilock again from
 402	 * xfs_fs_dirty_inode, so we have to call it after dropping the
 403	 * lock above.  Eventually we should look into a way to avoid
 404	 * the pointless lock roundtrip.
 405	 */
 406	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
 407		error = file_update_time(file);
 408		if (error)
 409			return error;
 410	}
 411
 412	/*
 413	 * If we're writing the file then make sure to clear the setuid and
 414	 * setgid bits if the process is not being run by root.  This keeps
 415	 * people from modifying setuid and setgid binaries.
 416	 */
 417	if (!IS_NOSEC(inode))
 418		return file_remove_privs(file);
 419	return 0;
 420}
 421
 422static int
 423xfs_dio_write_end_io(
 424	struct kiocb		*iocb,
 425	ssize_t			size,
 
 426	unsigned		flags)
 427{
 428	struct inode		*inode = file_inode(iocb->ki_filp);
 429	struct xfs_inode	*ip = XFS_I(inode);
 430	loff_t			offset = iocb->ki_pos;
 431	bool			update_size = false;
 432	int			error = 0;
 433
 434	trace_xfs_end_io_direct_write(ip, offset, size);
 435
 436	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 437		return -EIO;
 438
 439	if (size <= 0)
 440		return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441
 442	/*
 443	 * We need to update the in-core inode size here so that we don't end up
 444	 * with the on-disk inode size being outside the in-core inode size. We
 445	 * have no other method of updating EOF for AIO, so always do it here
 446	 * if necessary.
 447	 *
 448	 * We need to lock the test/set EOF update as we can be racing with
 449	 * other IO completions here to update the EOF. Failing to serialise
 450	 * here can result in EOF moving backwards and Bad Things Happen when
 451	 * that occurs.
 
 
 
 
 
 
 
 452	 */
 
 
 
 453	spin_lock(&ip->i_flags_lock);
 454	if (offset + size > i_size_read(inode)) {
 455		i_size_write(inode, offset + size);
 456		update_size = true;
 
 
 
 457	}
 458	spin_unlock(&ip->i_flags_lock);
 459
 460	if (flags & IOMAP_DIO_COW) {
 461		error = xfs_reflink_end_cow(ip, offset, size);
 462		if (error)
 463			return error;
 464	}
 465
 466	if (flags & IOMAP_DIO_UNWRITTEN)
 467		error = xfs_iomap_write_unwritten(ip, offset, size);
 468	else if (update_size)
 469		error = xfs_setfilesize(ip, offset, size);
 470
 471	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472}
 473
 474/*
 475 * xfs_file_dio_aio_write - handle direct IO writes
 476 *
 477 * Lock the inode appropriately to prepare for and issue a direct IO write.
 478 * By separating it from the buffered write path we remove all the tricky to
 479 * follow locking changes and looping.
 
 
 
 
 
 480 *
 481 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 482 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 483 * pages are flushed out.
 484 *
 485 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 486 * allowing them to be done in parallel with reads and other direct IO writes.
 487 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 488 * needs to do sub-block zeroing and that requires serialisation against other
 489 * direct IOs to the same block. In this case we need to serialise the
 490 * submission of the unaligned IOs so that we don't get racing block zeroing in
 491 * the dio layer.  To avoid the problem with aio, we also need to wait for
 492 * outstanding IOs to complete so that unwritten extent conversion is completed
 493 * before we try to map the overlapping block. This is currently implemented by
 494 * hitting it with a big hammer (i.e. inode_dio_wait()).
 495 *
 496 * Returns with locks held indicated by @iolock and errors indicated by
 497 * negative return values.
 498 */
 499STATIC ssize_t
 500xfs_file_dio_aio_write(
 
 501	struct kiocb		*iocb,
 502	struct iov_iter		*from)
 503{
 504	struct file		*file = iocb->ki_filp;
 505	struct address_space	*mapping = file->f_mapping;
 506	struct inode		*inode = mapping->host;
 507	struct xfs_inode	*ip = XFS_I(inode);
 508	struct xfs_mount	*mp = ip->i_mount;
 509	ssize_t			ret = 0;
 510	int			unaligned_io = 0;
 511	int			iolock;
 512	size_t			count = iov_iter_count(from);
 513	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 514					mp->m_rtdev_targp : mp->m_ddev_targp;
 515
 516	/* DIO must be aligned to device logical sector size */
 517	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 518		return -EINVAL;
 519
 520	/*
 521	 * Don't take the exclusive iolock here unless the I/O is unaligned to
 522	 * the file system block size.  We don't need to consider the EOF
 523	 * extension case here because xfs_file_aio_write_checks() will relock
 524	 * the inode as necessary for EOF zeroing cases and fill out the new
 525	 * inode size as appropriate.
 526	 */
 527	if ((iocb->ki_pos & mp->m_blockmask) ||
 528	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
 529		unaligned_io = 1;
 530
 531		/*
 532		 * We can't properly handle unaligned direct I/O to reflink
 533		 * files yet, as we can't unshare a partial block.
 534		 */
 535		if (xfs_is_reflink_inode(ip)) {
 536			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
 537			return -EREMCHG;
 538		}
 539		iolock = XFS_IOLOCK_EXCL;
 540	} else {
 541		iolock = XFS_IOLOCK_SHARED;
 542	}
 543
 544	xfs_ilock(ip, iolock);
 545
 546	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 547	if (ret)
 548		goto out;
 549	count = iov_iter_count(from);
 550
 551	/*
 552	 * If we are doing unaligned IO, wait for all other IO to drain,
 553	 * otherwise demote the lock if we had to take the exclusive lock
 554	 * for other reasons in xfs_file_aio_write_checks.
 555	 */
 556	if (unaligned_io)
 557		inode_dio_wait(inode);
 558	else if (iolock == XFS_IOLOCK_EXCL) {
 559		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 560		iolock = XFS_IOLOCK_SHARED;
 561	}
 562
 563	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 
 
 564
 565	/* If this is a block-aligned directio CoW, remap immediately. */
 566	if (xfs_is_reflink_inode(ip) && !unaligned_io) {
 567		ret = xfs_reflink_allocate_cow_range(ip, iocb->ki_pos, count);
 568		if (ret)
 569			goto out;
 570	}
 
 
 571
 572	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
 573out:
 574	xfs_iunlock(ip, iolock);
 575
 576	/*
 577	 * No fallback to buffered IO on errors for XFS, direct IO will either
 578	 * complete fully or fail.
 
 579	 */
 580	ASSERT(ret < 0 || ret == count);
 
 
 
 
 
 
 
 
 581	return ret;
 582}
 583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584static noinline ssize_t
 585xfs_file_dax_write(
 586	struct kiocb		*iocb,
 587	struct iov_iter		*from)
 588{
 589	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 590	struct xfs_inode	*ip = XFS_I(inode);
 591	int			iolock = XFS_IOLOCK_EXCL;
 592	ssize_t			ret, error = 0;
 593	size_t			count;
 594	loff_t			pos;
 595
 596	xfs_ilock(ip, iolock);
 597	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 
 
 598	if (ret)
 599		goto out;
 600
 601	pos = iocb->ki_pos;
 602	count = iov_iter_count(from);
 603
 604	trace_xfs_file_dax_write(ip, count, pos);
 605	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
 606	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 607		i_size_write(inode, iocb->ki_pos);
 608		error = xfs_setfilesize(ip, pos, ret);
 609	}
 610out:
 611	xfs_iunlock(ip, iolock);
 612	return error ? error : ret;
 
 
 
 
 
 
 
 
 
 
 613}
 614
 615STATIC ssize_t
 616xfs_file_buffered_aio_write(
 617	struct kiocb		*iocb,
 618	struct iov_iter		*from)
 619{
 620	struct file		*file = iocb->ki_filp;
 621	struct address_space	*mapping = file->f_mapping;
 622	struct inode		*inode = mapping->host;
 623	struct xfs_inode	*ip = XFS_I(inode);
 624	ssize_t			ret;
 625	int			enospc = 0;
 626	int			iolock;
 627
 628write_retry:
 629	iolock = XFS_IOLOCK_EXCL;
 630	xfs_ilock(ip, iolock);
 
 
 631
 632	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 633	if (ret)
 634		goto out;
 635
 636	/* We can write back this queue in page reclaim */
 637	current->backing_dev_info = inode_to_bdi(inode);
 638
 639	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 640	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
 641	if (likely(ret >= 0))
 642		iocb->ki_pos += ret;
 643
 644	/*
 645	 * If we hit a space limit, try to free up some lingering preallocated
 646	 * space before returning an error. In the case of ENOSPC, first try to
 647	 * write back all dirty inodes to free up some of the excess reserved
 648	 * metadata space. This reduces the chances that the eofblocks scan
 649	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 650	 * also behaves as a filter to prevent too many eofblocks scans from
 651	 * running at the same time.
 
 652	 */
 653	if (ret == -EDQUOT && !enospc) {
 654		xfs_iunlock(ip, iolock);
 655		enospc = xfs_inode_free_quota_eofblocks(ip);
 656		if (enospc)
 657			goto write_retry;
 658		enospc = xfs_inode_free_quota_cowblocks(ip);
 659		if (enospc)
 660			goto write_retry;
 661		iolock = 0;
 662	} else if (ret == -ENOSPC && !enospc) {
 663		struct xfs_eofblocks eofb = {0};
 664
 665		enospc = 1;
 666		xfs_flush_inodes(ip->i_mount);
 667
 668		xfs_iunlock(ip, iolock);
 669		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 670		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 671		goto write_retry;
 672	}
 673
 674	current->backing_dev_info = NULL;
 675out:
 676	if (iolock)
 677		xfs_iunlock(ip, iolock);
 
 
 
 
 
 
 678	return ret;
 679}
 680
 681STATIC ssize_t
 682xfs_file_write_iter(
 683	struct kiocb		*iocb,
 684	struct iov_iter		*from)
 685{
 686	struct file		*file = iocb->ki_filp;
 687	struct address_space	*mapping = file->f_mapping;
 688	struct inode		*inode = mapping->host;
 689	struct xfs_inode	*ip = XFS_I(inode);
 690	ssize_t			ret;
 691	size_t			ocount = iov_iter_count(from);
 692
 693	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 694
 695	if (ocount == 0)
 696		return 0;
 697
 698	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 699		return -EIO;
 700
 701	if (IS_DAX(inode))
 702		ret = xfs_file_dax_write(iocb, from);
 703	else if (iocb->ki_flags & IOCB_DIRECT) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 704		/*
 705		 * Allow a directio write to fall back to a buffered
 706		 * write *only* in the case that we're doing a reflink
 707		 * CoW.  In all other directio scenarios we do not
 708		 * allow an operation to fall back to buffered mode.
 709		 */
 710		ret = xfs_file_dio_aio_write(iocb, from);
 711		if (ret == -EREMCHG)
 712			goto buffered;
 713	} else {
 714buffered:
 715		ret = xfs_file_buffered_aio_write(iocb, from);
 716	}
 717
 718	if (ret > 0) {
 719		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 720
 721		/* Handle various SYNC-type writes */
 722		ret = generic_write_sync(iocb, ret);
 723	}
 724	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725}
 726
 727#define	XFS_FALLOC_FL_SUPPORTED						\
 728		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 729		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 730		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 731
 732STATIC long
 733xfs_file_fallocate(
 734	struct file		*file,
 735	int			mode,
 736	loff_t			offset,
 737	loff_t			len)
 738{
 739	struct inode		*inode = file_inode(file);
 740	struct xfs_inode	*ip = XFS_I(inode);
 741	long			error;
 742	enum xfs_prealloc_flags	flags = 0;
 743	uint			iolock = XFS_IOLOCK_EXCL;
 744	loff_t			new_size = 0;
 745	bool			do_file_insert = 0;
 746
 747	if (!S_ISREG(inode->i_mode))
 748		return -EINVAL;
 749	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 750		return -EOPNOTSUPP;
 751
 752	xfs_ilock(ip, iolock);
 753	error = xfs_break_layouts(inode, &iolock);
 754	if (error)
 755		goto out_unlock;
 756
 757	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 758	iolock |= XFS_MMAPLOCK_EXCL;
 
 
 
 
 
 
 759
 760	if (mode & FALLOC_FL_PUNCH_HOLE) {
 
 
 
 
 
 761		error = xfs_free_file_space(ip, offset, len);
 762		if (error)
 763			goto out_unlock;
 764	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
 765		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 766
 767		if (offset & blksize_mask || len & blksize_mask) {
 768			error = -EINVAL;
 769			goto out_unlock;
 770		}
 
 
 
 
 
 
 
 
 
 
 
 771
 772		/*
 773		 * There is no need to overlap collapse range with EOF,
 774		 * in which case it is effectively a truncate operation
 775		 */
 776		if (offset + len >= i_size_read(inode)) {
 777			error = -EINVAL;
 778			goto out_unlock;
 779		}
 780
 781		new_size = i_size_read(inode) - len;
 
 
 
 782
 783		error = xfs_collapse_file_space(ip, offset, len);
 784		if (error)
 785			goto out_unlock;
 786	} else if (mode & FALLOC_FL_INSERT_RANGE) {
 787		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 788
 789		new_size = i_size_read(inode) + len;
 790		if (offset & blksize_mask || len & blksize_mask) {
 791			error = -EINVAL;
 792			goto out_unlock;
 793		}
 794
 795		/* check the new inode size does not wrap through zero */
 796		if (new_size > inode->i_sb->s_maxbytes) {
 797			error = -EFBIG;
 798			goto out_unlock;
 799		}
 
 
 
 
 
 
 
 
 800
 801		/* Offset should be less than i_size */
 802		if (offset >= i_size_read(inode)) {
 803			error = -EINVAL;
 804			goto out_unlock;
 805		}
 806		do_file_insert = 1;
 807	} else {
 808		flags |= XFS_PREALLOC_SET;
 
 
 
 
 
 
 
 
 
 809
 810		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 811		    offset + len > i_size_read(inode)) {
 812			new_size = offset + len;
 813			error = inode_newsize_ok(inode, new_size);
 814			if (error)
 815				goto out_unlock;
 816		}
 817
 818		if (mode & FALLOC_FL_ZERO_RANGE)
 819			error = xfs_zero_file_space(ip, offset, len);
 820		else {
 821			if (mode & FALLOC_FL_UNSHARE_RANGE) {
 822				error = xfs_reflink_unshare(ip, offset, len);
 823				if (error)
 824					goto out_unlock;
 825			}
 826			error = xfs_alloc_file_space(ip, offset, len,
 827						     XFS_BMAPI_PREALLOC);
 828		}
 829		if (error)
 830			goto out_unlock;
 831	}
 832
 833	if (file->f_flags & O_DSYNC)
 834		flags |= XFS_PREALLOC_SYNC;
 835
 836	error = xfs_update_prealloc_flags(ip, flags);
 837	if (error)
 838		goto out_unlock;
 
 
 839
 840	/* Change file size if needed */
 841	if (new_size) {
 842		struct iattr iattr;
 843
 844		iattr.ia_valid = ATTR_SIZE;
 845		iattr.ia_size = new_size;
 846		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
 847		if (error)
 848			goto out_unlock;
 849	}
 850
 851	/*
 852	 * Perform hole insertion now that the file size has been
 853	 * updated so that if we crash during the operation we don't
 854	 * leave shifted extents past EOF and hence losing access to
 855	 * the data that is contained within them.
 856	 */
 857	if (do_file_insert)
 858		error = xfs_insert_file_space(ip, offset, len);
 859
 860out_unlock:
 861	xfs_iunlock(ip, iolock);
 862	return error;
 863}
 864
 865STATIC int
 866xfs_file_clone_range(
 867	struct file	*file_in,
 868	loff_t		pos_in,
 869	struct file	*file_out,
 870	loff_t		pos_out,
 871	u64		len)
 872{
 873	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
 874				     len, false);
 875}
 876
 877STATIC ssize_t
 878xfs_file_dedupe_range(
 879	struct file	*src_file,
 880	u64		loff,
 881	u64		len,
 882	struct file	*dst_file,
 883	u64		dst_loff)
 884{
 885	int		error;
 886
 887	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
 888				     len, true);
 889	if (error)
 890		return error;
 891	return len;
 
 
 
 
 
 
 
 
 
 
 892}
 893
 894STATIC int
 895xfs_file_open(
 896	struct inode	*inode,
 897	struct file	*file)
 898{
 899	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
 900		return -EFBIG;
 901	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 902		return -EIO;
 903	return 0;
 
 
 
 904}
 905
 906STATIC int
 907xfs_dir_open(
 908	struct inode	*inode,
 909	struct file	*file)
 910{
 911	struct xfs_inode *ip = XFS_I(inode);
 912	int		mode;
 913	int		error;
 914
 915	error = xfs_file_open(inode, file);
 
 
 916	if (error)
 917		return error;
 918
 919	/*
 920	 * If there are any blocks, read-ahead block 0 as we're almost
 921	 * certain to have the next operation be a read there.
 922	 */
 923	mode = xfs_ilock_data_map_shared(ip);
 924	if (ip->i_d.di_nextents > 0)
 925		error = xfs_dir3_data_readahead(ip, 0, -1);
 926	xfs_iunlock(ip, mode);
 927	return error;
 928}
 929
 
 
 
 
 930STATIC int
 931xfs_file_release(
 932	struct inode	*inode,
 933	struct file	*filp)
 934{
 935	return xfs_release(XFS_I(inode));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 936}
 937
 938STATIC int
 939xfs_file_readdir(
 940	struct file	*file,
 941	struct dir_context *ctx)
 942{
 943	struct inode	*inode = file_inode(file);
 944	xfs_inode_t	*ip = XFS_I(inode);
 945	size_t		bufsize;
 946
 947	/*
 948	 * The Linux API doesn't pass down the total size of the buffer
 949	 * we read into down to the filesystem.  With the filldir concept
 950	 * it's not needed for correct information, but the XFS dir2 leaf
 951	 * code wants an estimate of the buffer size to calculate it's
 952	 * readahead window and size the buffers used for mapping to
 953	 * physical blocks.
 954	 *
 955	 * Try to give it an estimate that's good enough, maybe at some
 956	 * point we can change the ->readdir prototype to include the
 957	 * buffer size.  For now we use the current glibc buffer size.
 958	 */
 959	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
 960
 961	return xfs_readdir(ip, ctx, bufsize);
 962}
 963
 964/*
 965 * This type is designed to indicate the type of offset we would like
 966 * to search from page cache for xfs_seek_hole_data().
 967 */
 968enum {
 969	HOLE_OFF = 0,
 970	DATA_OFF,
 971};
 972
 973/*
 974 * Lookup the desired type of offset from the given page.
 975 *
 976 * On success, return true and the offset argument will point to the
 977 * start of the region that was found.  Otherwise this function will
 978 * return false and keep the offset argument unchanged.
 979 */
 980STATIC bool
 981xfs_lookup_buffer_offset(
 982	struct page		*page,
 983	loff_t			*offset,
 984	unsigned int		type)
 985{
 986	loff_t			lastoff = page_offset(page);
 987	bool			found = false;
 988	struct buffer_head	*bh, *head;
 989
 990	bh = head = page_buffers(page);
 991	do {
 992		/*
 993		 * Unwritten extents that have data in the page
 994		 * cache covering them can be identified by the
 995		 * BH_Unwritten state flag.  Pages with multiple
 996		 * buffers might have a mix of holes, data and
 997		 * unwritten extents - any buffer with valid
 998		 * data in it should have BH_Uptodate flag set
 999		 * on it.
1000		 */
1001		if (buffer_unwritten(bh) ||
1002		    buffer_uptodate(bh)) {
1003			if (type == DATA_OFF)
1004				found = true;
1005		} else {
1006			if (type == HOLE_OFF)
1007				found = true;
1008		}
1009
1010		if (found) {
1011			*offset = lastoff;
1012			break;
1013		}
1014		lastoff += bh->b_size;
1015	} while ((bh = bh->b_this_page) != head);
1016
1017	return found;
1018}
1019
1020/*
1021 * This routine is called to find out and return a data or hole offset
1022 * from the page cache for unwritten extents according to the desired
1023 * type for xfs_seek_hole_data().
1024 *
1025 * The argument offset is used to tell where we start to search from the
1026 * page cache.  Map is used to figure out the end points of the range to
1027 * lookup pages.
1028 *
1029 * Return true if the desired type of offset was found, and the argument
1030 * offset is filled with that address.  Otherwise, return false and keep
1031 * offset unchanged.
1032 */
1033STATIC bool
1034xfs_find_get_desired_pgoff(
1035	struct inode		*inode,
1036	struct xfs_bmbt_irec	*map,
1037	unsigned int		type,
1038	loff_t			*offset)
1039{
1040	struct xfs_inode	*ip = XFS_I(inode);
1041	struct xfs_mount	*mp = ip->i_mount;
1042	struct pagevec		pvec;
1043	pgoff_t			index;
1044	pgoff_t			end;
1045	loff_t			endoff;
1046	loff_t			startoff = *offset;
1047	loff_t			lastoff = startoff;
1048	bool			found = false;
1049
1050	pagevec_init(&pvec, 0);
1051
1052	index = startoff >> PAGE_SHIFT;
1053	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1054	end = endoff >> PAGE_SHIFT;
1055	do {
1056		int		want;
1057		unsigned	nr_pages;
1058		unsigned int	i;
1059
1060		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1061		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1062					  want);
1063		/*
1064		 * No page mapped into given range.  If we are searching holes
1065		 * and if this is the first time we got into the loop, it means
1066		 * that the given offset is landed in a hole, return it.
1067		 *
1068		 * If we have already stepped through some block buffers to find
1069		 * holes but they all contains data.  In this case, the last
1070		 * offset is already updated and pointed to the end of the last
1071		 * mapped page, if it does not reach the endpoint to search,
1072		 * that means there should be a hole between them.
1073		 */
1074		if (nr_pages == 0) {
1075			/* Data search found nothing */
1076			if (type == DATA_OFF)
1077				break;
1078
1079			ASSERT(type == HOLE_OFF);
1080			if (lastoff == startoff || lastoff < endoff) {
1081				found = true;
1082				*offset = lastoff;
1083			}
1084			break;
1085		}
1086
1087		/*
1088		 * At lease we found one page.  If this is the first time we
1089		 * step into the loop, and if the first page index offset is
1090		 * greater than the given search offset, a hole was found.
1091		 */
1092		if (type == HOLE_OFF && lastoff == startoff &&
1093		    lastoff < page_offset(pvec.pages[0])) {
1094			found = true;
1095			break;
1096		}
1097
1098		for (i = 0; i < nr_pages; i++) {
1099			struct page	*page = pvec.pages[i];
1100			loff_t		b_offset;
1101
1102			/*
1103			 * At this point, the page may be truncated or
1104			 * invalidated (changing page->mapping to NULL),
1105			 * or even swizzled back from swapper_space to tmpfs
1106			 * file mapping. However, page->index will not change
1107			 * because we have a reference on the page.
1108			 *
1109			 * Searching done if the page index is out of range.
1110			 * If the current offset is not reaches the end of
1111			 * the specified search range, there should be a hole
1112			 * between them.
1113			 */
1114			if (page->index > end) {
1115				if (type == HOLE_OFF && lastoff < endoff) {
1116					*offset = lastoff;
1117					found = true;
1118				}
1119				goto out;
1120			}
1121
1122			lock_page(page);
1123			/*
1124			 * Page truncated or invalidated(page->mapping == NULL).
1125			 * We can freely skip it and proceed to check the next
1126			 * page.
1127			 */
1128			if (unlikely(page->mapping != inode->i_mapping)) {
1129				unlock_page(page);
1130				continue;
1131			}
1132
1133			if (!page_has_buffers(page)) {
1134				unlock_page(page);
1135				continue;
1136			}
1137
1138			found = xfs_lookup_buffer_offset(page, &b_offset, type);
1139			if (found) {
1140				/*
1141				 * The found offset may be less than the start
1142				 * point to search if this is the first time to
1143				 * come here.
1144				 */
1145				*offset = max_t(loff_t, startoff, b_offset);
1146				unlock_page(page);
1147				goto out;
1148			}
1149
1150			/*
1151			 * We either searching data but nothing was found, or
1152			 * searching hole but found a data buffer.  In either
1153			 * case, probably the next page contains the desired
1154			 * things, update the last offset to it so.
1155			 */
1156			lastoff = page_offset(page) + PAGE_SIZE;
1157			unlock_page(page);
1158		}
1159
1160		/*
1161		 * The number of returned pages less than our desired, search
1162		 * done.  In this case, nothing was found for searching data,
1163		 * but we found a hole behind the last offset.
1164		 */
1165		if (nr_pages < want) {
1166			if (type == HOLE_OFF) {
1167				*offset = lastoff;
1168				found = true;
1169			}
1170			break;
1171		}
1172
1173		index = pvec.pages[i - 1]->index + 1;
1174		pagevec_release(&pvec);
1175	} while (index <= end);
1176
1177out:
1178	pagevec_release(&pvec);
1179	return found;
1180}
1181
1182/*
1183 * caller must lock inode with xfs_ilock_data_map_shared,
1184 * can we craft an appropriate ASSERT?
1185 *
1186 * end is because the VFS-level lseek interface is defined such that any
1187 * offset past i_size shall return -ENXIO, but we use this for quota code
1188 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1189 */
1190loff_t
1191__xfs_seek_hole_data(
1192	struct inode		*inode,
1193	loff_t			start,
1194	loff_t			end,
1195	int			whence)
1196{
1197	struct xfs_inode	*ip = XFS_I(inode);
1198	struct xfs_mount	*mp = ip->i_mount;
1199	loff_t			uninitialized_var(offset);
1200	xfs_fileoff_t		fsbno;
1201	xfs_filblks_t		lastbno;
1202	int			error;
1203
1204	if (start >= end) {
1205		error = -ENXIO;
1206		goto out_error;
1207	}
1208
1209	/*
1210	 * Try to read extents from the first block indicated
1211	 * by fsbno to the end block of the file.
1212	 */
1213	fsbno = XFS_B_TO_FSBT(mp, start);
1214	lastbno = XFS_B_TO_FSB(mp, end);
1215
1216	for (;;) {
1217		struct xfs_bmbt_irec	map[2];
1218		int			nmap = 2;
1219		unsigned int		i;
1220
1221		error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
1222				       XFS_BMAPI_ENTIRE);
1223		if (error)
1224			goto out_error;
1225
1226		/* No extents at given offset, must be beyond EOF */
1227		if (nmap == 0) {
1228			error = -ENXIO;
1229			goto out_error;
1230		}
1231
1232		for (i = 0; i < nmap; i++) {
1233			offset = max_t(loff_t, start,
1234				       XFS_FSB_TO_B(mp, map[i].br_startoff));
1235
1236			/* Landed in the hole we wanted? */
1237			if (whence == SEEK_HOLE &&
1238			    map[i].br_startblock == HOLESTARTBLOCK)
1239				goto out;
1240
1241			/* Landed in the data extent we wanted? */
1242			if (whence == SEEK_DATA &&
1243			    (map[i].br_startblock == DELAYSTARTBLOCK ||
1244			     (map[i].br_state == XFS_EXT_NORM &&
1245			      !isnullstartblock(map[i].br_startblock))))
1246				goto out;
1247
1248			/*
1249			 * Landed in an unwritten extent, try to search
1250			 * for hole or data from page cache.
1251			 */
1252			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1253				if (xfs_find_get_desired_pgoff(inode, &map[i],
1254				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1255							&offset))
1256					goto out;
1257			}
1258		}
1259
1260		/*
1261		 * We only received one extent out of the two requested. This
1262		 * means we've hit EOF and didn't find what we are looking for.
1263		 */
1264		if (nmap == 1) {
1265			/*
1266			 * If we were looking for a hole, set offset to
1267			 * the end of the file (i.e., there is an implicit
1268			 * hole at the end of any file).
1269		 	 */
1270			if (whence == SEEK_HOLE) {
1271				offset = end;
1272				break;
1273			}
1274			/*
1275			 * If we were looking for data, it's nowhere to be found
1276			 */
1277			ASSERT(whence == SEEK_DATA);
1278			error = -ENXIO;
1279			goto out_error;
1280		}
1281
1282		ASSERT(i > 1);
1283
1284		/*
1285		 * Nothing was found, proceed to the next round of search
1286		 * if the next reading offset is not at or beyond EOF.
1287		 */
1288		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1289		start = XFS_FSB_TO_B(mp, fsbno);
1290		if (start >= end) {
1291			if (whence == SEEK_HOLE) {
1292				offset = end;
1293				break;
1294			}
1295			ASSERT(whence == SEEK_DATA);
1296			error = -ENXIO;
1297			goto out_error;
1298		}
1299	}
1300
1301out:
1302	/*
1303	 * If at this point we have found the hole we wanted, the returned
1304	 * offset may be bigger than the file size as it may be aligned to
1305	 * page boundary for unwritten extents.  We need to deal with this
1306	 * situation in particular.
1307	 */
1308	if (whence == SEEK_HOLE)
1309		offset = min_t(loff_t, offset, end);
1310
1311	return offset;
1312
1313out_error:
1314	return error;
1315}
1316
1317STATIC loff_t
1318xfs_seek_hole_data(
1319	struct file		*file,
1320	loff_t			start,
1321	int			whence)
1322{
1323	struct inode		*inode = file->f_mapping->host;
1324	struct xfs_inode	*ip = XFS_I(inode);
1325	struct xfs_mount	*mp = ip->i_mount;
1326	uint			lock;
1327	loff_t			offset, end;
1328	int			error = 0;
1329
1330	if (XFS_FORCED_SHUTDOWN(mp))
1331		return -EIO;
1332
1333	lock = xfs_ilock_data_map_shared(ip);
1334
1335	end = i_size_read(inode);
1336	offset = __xfs_seek_hole_data(inode, start, end, whence);
1337	if (offset < 0) {
1338		error = offset;
1339		goto out_unlock;
1340	}
1341
1342	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1343
1344out_unlock:
1345	xfs_iunlock(ip, lock);
1346
1347	if (error)
1348		return error;
1349	return offset;
1350}
1351
1352STATIC loff_t
1353xfs_file_llseek(
1354	struct file	*file,
1355	loff_t		offset,
1356	int		whence)
1357{
 
 
 
 
 
1358	switch (whence) {
1359	case SEEK_END:
1360	case SEEK_CUR:
1361	case SEEK_SET:
1362		return generic_file_llseek(file, offset, whence);
1363	case SEEK_HOLE:
 
 
1364	case SEEK_DATA:
1365		return xfs_seek_hole_data(file, offset, whence);
1366	default:
1367		return -EINVAL;
1368	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369}
1370
1371/*
1372 * Locking for serialisation of IO during page faults. This results in a lock
1373 * ordering of:
1374 *
1375 * mmap_sem (MM)
1376 *   sb_start_pagefault(vfs, freeze)
1377 *     i_mmaplock (XFS - truncate serialisation)
1378 *       page_lock (MM)
1379 *         i_lock (XFS - extent map serialisation)
1380 */
1381
1382/*
1383 * mmap()d file has taken write protection fault and is being made writable. We
1384 * can set the page state up correctly for a writable page, which means we can
1385 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1386 * mapping.
1387 */
1388STATIC int
1389xfs_filemap_page_mkwrite(
1390	struct vm_area_struct	*vma,
1391	struct vm_fault		*vmf)
1392{
1393	struct inode		*inode = file_inode(vma->vm_file);
1394	int			ret;
 
 
1395
1396	trace_xfs_filemap_page_mkwrite(XFS_I(inode));
1397
1398	sb_start_pagefault(inode->i_sb);
1399	file_update_time(vma->vm_file);
1400	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1401
1402	if (IS_DAX(inode)) {
1403		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
1404	} else {
1405		ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
1406		ret = block_page_mkwrite_return(ret);
 
 
 
 
 
1407	}
1408
1409	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1410	sb_end_pagefault(inode->i_sb);
 
 
 
1411
 
1412	return ret;
1413}
1414
1415STATIC int
1416xfs_filemap_fault(
1417	struct vm_area_struct	*vma,
1418	struct vm_fault		*vmf)
1419{
1420	struct inode		*inode = file_inode(vma->vm_file);
1421	int			ret;
 
1422
1423	trace_xfs_filemap_fault(XFS_I(inode));
 
 
 
 
1424
1425	/* DAX can shortcut the normal fault path on write faults! */
1426	if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1427		return xfs_filemap_page_mkwrite(vma, vmf);
1428
1429	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1430	if (IS_DAX(inode))
1431		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
1432	else
1433		ret = filemap_fault(vma, vmf);
1434	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1435
1436	return ret;
 
1437}
1438
1439/*
1440 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1441 * both read and write faults. Hence we need to handle both cases. There is no
1442 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1443 * handle both cases here. @flags carries the information on the type of fault
1444 * occuring.
1445 */
1446STATIC int
1447xfs_filemap_pmd_fault(
1448	struct vm_area_struct	*vma,
1449	unsigned long		addr,
1450	pmd_t			*pmd,
1451	unsigned int		flags)
1452{
1453	struct inode		*inode = file_inode(vma->vm_file);
1454	struct xfs_inode	*ip = XFS_I(inode);
1455	int			ret;
1456
1457	if (!IS_DAX(inode))
1458		return VM_FAULT_FALLBACK;
1459
1460	trace_xfs_filemap_pmd_fault(ip);
1461
1462	if (flags & FAULT_FLAG_WRITE) {
1463		sb_start_pagefault(inode->i_sb);
1464		file_update_time(vma->vm_file);
1465	}
1466
1467	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1468	ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
1469	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1470
1471	if (flags & FAULT_FLAG_WRITE)
1472		sb_end_pagefault(inode->i_sb);
1473
1474	return ret;
 
 
 
 
1475}
1476
1477/*
1478 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1479 * updates on write faults. In reality, it's need to serialise against
1480 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1481 * to ensure we serialise the fault barrier in place.
1482 */
1483static int
1484xfs_filemap_pfn_mkwrite(
1485	struct vm_area_struct	*vma,
1486	struct vm_fault		*vmf)
1487{
1488
1489	struct inode		*inode = file_inode(vma->vm_file);
1490	struct xfs_inode	*ip = XFS_I(inode);
1491	int			ret = VM_FAULT_NOPAGE;
1492	loff_t			size;
1493
1494	trace_xfs_filemap_pfn_mkwrite(ip);
1495
1496	sb_start_pagefault(inode->i_sb);
1497	file_update_time(vma->vm_file);
1498
1499	/* check if the faulting page hasn't raced with truncate */
1500	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1501	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502	if (vmf->pgoff >= size)
1503		ret = VM_FAULT_SIGBUS;
1504	else if (IS_DAX(inode))
1505		ret = dax_pfn_mkwrite(vma, vmf);
1506	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1507	sb_end_pagefault(inode->i_sb);
1508	return ret;
1509
1510}
1511
1512static const struct vm_operations_struct xfs_file_vm_ops = {
1513	.fault		= xfs_filemap_fault,
1514	.pmd_fault	= xfs_filemap_pmd_fault,
1515	.map_pages	= filemap_map_pages,
1516	.page_mkwrite	= xfs_filemap_page_mkwrite,
1517	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1518};
1519
1520STATIC int
1521xfs_file_mmap(
1522	struct file	*filp,
1523	struct vm_area_struct *vma)
1524{
1525	file_accessed(filp);
 
 
 
 
 
 
 
 
 
 
1526	vma->vm_ops = &xfs_file_vm_ops;
1527	if (IS_DAX(file_inode(filp)))
1528		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1529	return 0;
1530}
1531
1532const struct file_operations xfs_file_operations = {
1533	.llseek		= xfs_file_llseek,
1534	.read_iter	= xfs_file_read_iter,
1535	.write_iter	= xfs_file_write_iter,
1536	.splice_read	= generic_file_splice_read,
1537	.splice_write	= iter_file_splice_write,
 
1538	.unlocked_ioctl	= xfs_file_ioctl,
1539#ifdef CONFIG_COMPAT
1540	.compat_ioctl	= xfs_file_compat_ioctl,
1541#endif
1542	.mmap		= xfs_file_mmap,
1543	.open		= xfs_file_open,
1544	.release	= xfs_file_release,
1545	.fsync		= xfs_file_fsync,
1546	.get_unmapped_area = thp_get_unmapped_area,
1547	.fallocate	= xfs_file_fallocate,
1548	.clone_file_range = xfs_file_clone_range,
1549	.dedupe_file_range = xfs_file_dedupe_range,
 
 
1550};
1551
1552const struct file_operations xfs_dir_file_operations = {
1553	.open		= xfs_dir_open,
1554	.read		= generic_read_dir,
1555	.iterate_shared	= xfs_file_readdir,
1556	.llseek		= generic_file_llseek,
1557	.unlocked_ioctl	= xfs_file_ioctl,
1558#ifdef CONFIG_COMPAT
1559	.compat_ioctl	= xfs_file_compat_ioctl,
1560#endif
1561	.fsync		= xfs_dir_fsync,
1562};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
 
 
  13#include "xfs_inode.h"
  14#include "xfs_trans.h"
  15#include "xfs_inode_item.h"
  16#include "xfs_bmap.h"
  17#include "xfs_bmap_util.h"
 
  18#include "xfs_dir2.h"
  19#include "xfs_dir2_priv.h"
  20#include "xfs_ioctl.h"
  21#include "xfs_trace.h"
  22#include "xfs_log.h"
  23#include "xfs_icache.h"
  24#include "xfs_pnfs.h"
  25#include "xfs_iomap.h"
  26#include "xfs_reflink.h"
  27#include "xfs_file.h"
  28
  29#include <linux/dax.h>
  30#include <linux/falloc.h>
 
  31#include <linux/backing-dev.h>
  32#include <linux/mman.h>
  33#include <linux/fadvise.h>
  34#include <linux/mount.h>
  35
  36static const struct vm_operations_struct xfs_file_vm_ops;
  37
  38/*
  39 * Decide if the given file range is aligned to the size of the fundamental
  40 * allocation unit for the file.
  41 */
  42bool
  43xfs_is_falloc_aligned(
  44	struct xfs_inode	*ip,
  45	loff_t			pos,
  46	long long int		len)
 
  47{
  48	unsigned int		alloc_unit = xfs_inode_alloc_unitsize(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49
  50	if (!is_power_of_2(alloc_unit))
  51		return isaligned_64(pos, alloc_unit) &&
  52		       isaligned_64(len, alloc_unit);
  53
  54	return !((pos | len) & (alloc_unit - 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55}
  56
  57/*
  58 * Fsync operations on directories are much simpler than on regular files,
  59 * as there is no file data to flush, and thus also no need for explicit
  60 * cache flush operations, and there are no non-transaction metadata updates
  61 * on directories either.
  62 */
  63STATIC int
  64xfs_dir_fsync(
  65	struct file		*file,
  66	loff_t			start,
  67	loff_t			end,
  68	int			datasync)
  69{
  70	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 
  71
  72	trace_xfs_dir_fsync(ip);
  73	return xfs_log_force_inode(ip);
  74}
  75
  76static xfs_csn_t
  77xfs_fsync_seq(
  78	struct xfs_inode	*ip,
  79	bool			datasync)
  80{
  81	if (!xfs_ipincount(ip))
  82		return 0;
  83	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  84		return 0;
  85	return ip->i_itemp->ili_commit_seq;
  86}
  87
  88/*
  89 * All metadata updates are logged, which means that we just have to flush the
  90 * log up to the latest LSN that touched the inode.
  91 *
  92 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
  93 * the log force before we clear the ili_fsync_fields field. This ensures that
  94 * we don't get a racing sync operation that does not wait for the metadata to
  95 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
  96 * then all that will happen is the log force will do nothing as the lsn will
  97 * already be on disk.  We can't race with setting ili_fsync_fields because that
  98 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
  99 * shared until after the ili_fsync_fields is cleared.
 100 */
 101static  int
 102xfs_fsync_flush_log(
 103	struct xfs_inode	*ip,
 104	bool			datasync,
 105	int			*log_flushed)
 106{
 107	int			error = 0;
 108	xfs_csn_t		seq;
 109
 110	xfs_ilock(ip, XFS_ILOCK_SHARED);
 111	seq = xfs_fsync_seq(ip, datasync);
 112	if (seq) {
 113		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
 114					  log_flushed);
 115
 116		spin_lock(&ip->i_itemp->ili_lock);
 117		ip->i_itemp->ili_fsync_fields = 0;
 118		spin_unlock(&ip->i_itemp->ili_lock);
 119	}
 120	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 121	return error;
 122}
 123
 124STATIC int
 125xfs_file_fsync(
 126	struct file		*file,
 127	loff_t			start,
 128	loff_t			end,
 129	int			datasync)
 130{
 131	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 132	struct xfs_mount	*mp = ip->i_mount;
 133	int			error, err2;
 134	int			log_flushed = 0;
 
 135
 136	trace_xfs_file_fsync(ip);
 137
 138	error = file_write_and_wait_range(file, start, end);
 139	if (error)
 140		return error;
 141
 142	if (xfs_is_shutdown(mp))
 143		return -EIO;
 144
 145	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 146
 147	/*
 148	 * If we have an RT and/or log subvolume we need to make sure to flush
 149	 * the write cache the device used for file data first.  This is to
 150	 * ensure newly written file data make it to disk before logging the new
 151	 * inode size in case of an extending write.
 152	 */
 153	if (XFS_IS_REALTIME_INODE(ip))
 154		error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
 155	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 156		error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 157
 158	/*
 159	 * Any inode that has dirty modifications in the log is pinned.  The
 160	 * racy check here for a pinned inode will not catch modifications
 161	 * that happen concurrently to the fsync call, but fsync semantics
 162	 * only require to sync previously completed I/O.
 
 
 
 
 
 
 
 163	 */
 
 164	if (xfs_ipincount(ip)) {
 165		err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
 166		if (err2 && !error)
 167			error = err2;
 168	}
 169
 
 
 
 
 
 
 170	/*
 171	 * If we only have a single device, and the log force about was
 172	 * a no-op we might have to flush the data device cache here.
 173	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 174	 * an already allocated file and thus do not have any metadata to
 175	 * commit.
 176	 */
 177	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 178	    mp->m_logdev_targp == mp->m_ddev_targp) {
 179		err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 180		if (err2 && !error)
 181			error = err2;
 182	}
 183
 184	return error;
 185}
 186
 187static int
 188xfs_ilock_iocb(
 189	struct kiocb		*iocb,
 190	unsigned int		lock_mode)
 191{
 192	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 193
 194	if (iocb->ki_flags & IOCB_NOWAIT) {
 195		if (!xfs_ilock_nowait(ip, lock_mode))
 196			return -EAGAIN;
 197	} else {
 198		xfs_ilock(ip, lock_mode);
 199	}
 200
 201	return 0;
 202}
 203
 204static int
 205xfs_ilock_iocb_for_write(
 206	struct kiocb		*iocb,
 207	unsigned int		*lock_mode)
 208{
 209	ssize_t			ret;
 210	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 211
 212	ret = xfs_ilock_iocb(iocb, *lock_mode);
 213	if (ret)
 214		return ret;
 215
 216	/*
 217	 * If a reflink remap is in progress we always need to take the iolock
 218	 * exclusively to wait for it to finish.
 219	 */
 220	if (*lock_mode == XFS_IOLOCK_SHARED &&
 221	    xfs_iflags_test(ip, XFS_IREMAPPING)) {
 222		xfs_iunlock(ip, *lock_mode);
 223		*lock_mode = XFS_IOLOCK_EXCL;
 224		return xfs_ilock_iocb(iocb, *lock_mode);
 225	}
 226
 227	return 0;
 228}
 229
 230STATIC ssize_t
 231xfs_file_dio_read(
 232	struct kiocb		*iocb,
 233	struct iov_iter		*to)
 234{
 235	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 
 236	ssize_t			ret;
 237
 238	trace_xfs_file_direct_read(iocb, to);
 239
 240	if (!iov_iter_count(to))
 241		return 0; /* skip atime */
 242
 243	file_accessed(iocb->ki_filp);
 244
 245	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 246	if (ret)
 247		return ret;
 248	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
 249	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 250
 251	return ret;
 252}
 253
 254static noinline ssize_t
 255xfs_file_dax_read(
 256	struct kiocb		*iocb,
 257	struct iov_iter		*to)
 258{
 259	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 
 260	ssize_t			ret = 0;
 261
 262	trace_xfs_file_dax_read(iocb, to);
 263
 264	if (!iov_iter_count(to))
 265		return 0; /* skip atime */
 266
 267	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 268	if (ret)
 269		return ret;
 270	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
 271	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 272
 273	file_accessed(iocb->ki_filp);
 274	return ret;
 275}
 276
 277STATIC ssize_t
 278xfs_file_buffered_read(
 279	struct kiocb		*iocb,
 280	struct iov_iter		*to)
 281{
 282	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 283	ssize_t			ret;
 284
 285	trace_xfs_file_buffered_read(iocb, to);
 286
 287	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 288	if (ret)
 289		return ret;
 290	ret = generic_file_read_iter(iocb, to);
 291	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 292
 293	return ret;
 294}
 295
 296STATIC ssize_t
 297xfs_file_read_iter(
 298	struct kiocb		*iocb,
 299	struct iov_iter		*to)
 300{
 301	struct inode		*inode = file_inode(iocb->ki_filp);
 302	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 303	ssize_t			ret = 0;
 304
 305	XFS_STATS_INC(mp, xs_read_calls);
 306
 307	if (xfs_is_shutdown(mp))
 308		return -EIO;
 309
 310	if (IS_DAX(inode))
 311		ret = xfs_file_dax_read(iocb, to);
 312	else if (iocb->ki_flags & IOCB_DIRECT)
 313		ret = xfs_file_dio_read(iocb, to);
 314	else
 315		ret = xfs_file_buffered_read(iocb, to);
 316
 317	if (ret > 0)
 318		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 319	return ret;
 320}
 321
 322STATIC ssize_t
 323xfs_file_splice_read(
 324	struct file		*in,
 325	loff_t			*ppos,
 326	struct pipe_inode_info	*pipe,
 327	size_t			len,
 328	unsigned int		flags)
 329{
 330	struct inode		*inode = file_inode(in);
 331	struct xfs_inode	*ip = XFS_I(inode);
 332	struct xfs_mount	*mp = ip->i_mount;
 333	ssize_t			ret = 0;
 334
 335	XFS_STATS_INC(mp, xs_read_calls);
 336
 337	if (xfs_is_shutdown(mp))
 338		return -EIO;
 339
 340	trace_xfs_file_splice_read(ip, *ppos, len);
 341
 342	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 343	ret = filemap_splice_read(in, ppos, pipe, len, flags);
 344	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 345	if (ret > 0)
 346		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 347	return ret;
 348}
 349
 350/*
 351 * Take care of zeroing post-EOF blocks when they might exist.
 
 
 
 
 
 
 352 *
 353 * Returns 0 if successfully, a negative error for a failure, or 1 if this
 354 * function dropped the iolock and reacquired it exclusively and the caller
 355 * needs to restart the write sanity checks.
 356 */
 357static ssize_t
 358xfs_file_write_zero_eof(
 359	struct kiocb		*iocb,
 360	struct iov_iter		*from,
 361	unsigned int		*iolock,
 362	size_t			count,
 363	bool			*drained_dio)
 364{
 365	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 366	loff_t			isize;
 367	int			error;
 368
 369	/*
 370	 * We need to serialise against EOF updates that occur in IO completions
 371	 * here. We want to make sure that nobody is changing the size while
 372	 * we do this check until we have placed an IO barrier (i.e. hold
 373	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
 374	 * spinlock effectively forms a memory barrier once we have
 375	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
 376	 * hence be able to correctly determine if we need to run zeroing.
 377	 */
 378	spin_lock(&ip->i_flags_lock);
 379	isize = i_size_read(VFS_I(ip));
 380	if (iocb->ki_pos <= isize) {
 381		spin_unlock(&ip->i_flags_lock);
 382		return 0;
 383	}
 384	spin_unlock(&ip->i_flags_lock);
 385
 386	if (iocb->ki_flags & IOCB_NOWAIT)
 387		return -EAGAIN;
 388
 389	if (!*drained_dio) {
 390		/*
 391		 * If zeroing is needed and we are currently holding the iolock
 392		 * shared, we need to update it to exclusive which implies
 393		 * having to redo all checks before.
 394		 */
 395		if (*iolock == XFS_IOLOCK_SHARED) {
 396			xfs_iunlock(ip, *iolock);
 397			*iolock = XFS_IOLOCK_EXCL;
 398			xfs_ilock(ip, *iolock);
 399			iov_iter_reexpand(from, count);
 400		}
 401
 402		/*
 403		 * We now have an IO submission barrier in place, but AIO can do
 404		 * EOF updates during IO completion and hence we now need to
 405		 * wait for all of them to drain.  Non-AIO DIO will have drained
 406		 * before we are given the XFS_IOLOCK_EXCL, and so for most
 407		 * cases this wait is a no-op.
 408		 */
 409		inode_dio_wait(VFS_I(ip));
 410		*drained_dio = true;
 411		return 1;
 412	}
 413
 414	trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
 415
 416	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 417	error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
 418	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
 419
 420	return error;
 421}
 422
 423/*
 424 * Common pre-write limit and setup checks.
 425 *
 426 * Called with the iolock held either shared and exclusive according to
 427 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 428 * if called for a direct write beyond i_size.
 429 */
 430STATIC ssize_t
 431xfs_file_write_checks(
 432	struct kiocb		*iocb,
 433	struct iov_iter		*from,
 434	unsigned int		*iolock)
 435{
 436	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 
 437	size_t			count = iov_iter_count(from);
 438	bool			drained_dio = false;
 439	ssize_t			error;
 440
 441restart:
 442	error = generic_write_checks(iocb, from);
 443	if (error <= 0)
 444		return error;
 445
 446	if (iocb->ki_flags & IOCB_NOWAIT) {
 447		error = break_layout(inode, false);
 448		if (error == -EWOULDBLOCK)
 449			error = -EAGAIN;
 450	} else {
 451		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
 452	}
 453
 454	if (error)
 455		return error;
 456
 457	/*
 458	 * For changing security info in file_remove_privs() we need i_rwsem
 459	 * exclusively.
 460	 */
 461	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 462		xfs_iunlock(XFS_I(inode), *iolock);
 463		*iolock = XFS_IOLOCK_EXCL;
 464		error = xfs_ilock_iocb(iocb, *iolock);
 465		if (error) {
 466			*iolock = 0;
 467			return error;
 468		}
 469		goto restart;
 470	}
 471
 472	/*
 473	 * If the offset is beyond the size of the file, we need to zero all
 474	 * blocks that fall between the existing EOF and the start of this
 475	 * write.
 
 
 476	 *
 477	 * We can do an unlocked check for i_size here safely as I/O completion
 478	 * can only extend EOF.  Truncate is locked out at this point, so the
 479	 * EOF can not move backwards, only forwards. Hence we only need to take
 480	 * the slow path when we are at or beyond the current EOF.
 
 
 
 481	 */
 
 482	if (iocb->ki_pos > i_size_read(inode)) {
 483		error = xfs_file_write_zero_eof(iocb, from, iolock, count,
 484				&drained_dio);
 485		if (error == 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 486			goto restart;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487		if (error)
 488			return error;
 489	}
 490
 491	return kiocb_modified(iocb);
 
 
 
 
 
 
 
 492}
 493
 494static int
 495xfs_dio_write_end_io(
 496	struct kiocb		*iocb,
 497	ssize_t			size,
 498	int			error,
 499	unsigned		flags)
 500{
 501	struct inode		*inode = file_inode(iocb->ki_filp);
 502	struct xfs_inode	*ip = XFS_I(inode);
 503	loff_t			offset = iocb->ki_pos;
 504	unsigned int		nofs_flag;
 
 505
 506	trace_xfs_end_io_direct_write(ip, offset, size);
 507
 508	if (xfs_is_shutdown(ip->i_mount))
 509		return -EIO;
 510
 511	if (error)
 512		return error;
 513	if (!size)
 514		return 0;
 515
 516	/*
 517	 * Capture amount written on completion as we can't reliably account
 518	 * for it on submission.
 519	 */
 520	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
 521
 522	/*
 523	 * We can allocate memory here while doing writeback on behalf of
 524	 * memory reclaim.  To avoid memory allocation deadlocks set the
 525	 * task-wide nofs context for the following operations.
 526	 */
 527	nofs_flag = memalloc_nofs_save();
 528
 529	if (flags & IOMAP_DIO_COW) {
 530		error = xfs_reflink_end_cow(ip, offset, size);
 531		if (error)
 532			goto out;
 533	}
 534
 535	/*
 536	 * Unwritten conversion updates the in-core isize after extent
 537	 * conversion but before updating the on-disk size. Updating isize any
 538	 * earlier allows a racing dio read to find unwritten extents before
 539	 * they are converted.
 540	 */
 541	if (flags & IOMAP_DIO_UNWRITTEN) {
 542		error = xfs_iomap_write_unwritten(ip, offset, size, true);
 543		goto out;
 544	}
 545
 546	/*
 547	 * We need to update the in-core inode size here so that we don't end up
 548	 * with the on-disk inode size being outside the in-core inode size. We
 549	 * have no other method of updating EOF for AIO, so always do it here
 550	 * if necessary.
 551	 *
 552	 * We need to lock the test/set EOF update as we can be racing with
 553	 * other IO completions here to update the EOF. Failing to serialise
 554	 * here can result in EOF moving backwards and Bad Things Happen when
 555	 * that occurs.
 556	 *
 557	 * As IO completion only ever extends EOF, we can do an unlocked check
 558	 * here to avoid taking the spinlock. If we land within the current EOF,
 559	 * then we do not need to do an extending update at all, and we don't
 560	 * need to take the lock to check this. If we race with an update moving
 561	 * EOF, then we'll either still be beyond EOF and need to take the lock,
 562	 * or we'll be within EOF and we don't need to take it at all.
 563	 */
 564	if (offset + size <= i_size_read(inode))
 565		goto out;
 566
 567	spin_lock(&ip->i_flags_lock);
 568	if (offset + size > i_size_read(inode)) {
 569		i_size_write(inode, offset + size);
 570		spin_unlock(&ip->i_flags_lock);
 571		error = xfs_setfilesize(ip, offset, size);
 572	} else {
 573		spin_unlock(&ip->i_flags_lock);
 574	}
 
 575
 576out:
 577	memalloc_nofs_restore(nofs_flag);
 578	return error;
 579}
 
 580
 581static const struct iomap_dio_ops xfs_dio_write_ops = {
 582	.end_io		= xfs_dio_write_end_io,
 583};
 
 584
 585/*
 586 * Handle block aligned direct I/O writes
 587 */
 588static noinline ssize_t
 589xfs_file_dio_write_aligned(
 590	struct xfs_inode	*ip,
 591	struct kiocb		*iocb,
 592	struct iov_iter		*from)
 593{
 594	unsigned int		iolock = XFS_IOLOCK_SHARED;
 595	ssize_t			ret;
 596
 597	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 598	if (ret)
 599		return ret;
 600	ret = xfs_file_write_checks(iocb, from, &iolock);
 601	if (ret)
 602		goto out_unlock;
 603
 604	/*
 605	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
 606	 * the iolock back to shared if we had to take the exclusive lock in
 607	 * xfs_file_write_checks() for other reasons.
 608	 */
 609	if (iolock == XFS_IOLOCK_EXCL) {
 610		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 611		iolock = XFS_IOLOCK_SHARED;
 612	}
 613	trace_xfs_file_direct_write(iocb, from);
 614	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 615			   &xfs_dio_write_ops, 0, NULL, 0);
 616out_unlock:
 617	if (iolock)
 618		xfs_iunlock(ip, iolock);
 619	return ret;
 620}
 621
 622/*
 623 * Handle block unaligned direct I/O writes
 624 *
 625 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 626 * them to be done in parallel with reads and other direct I/O writes.  However,
 627 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 628 * to do sub-block zeroing and that requires serialisation against other direct
 629 * I/O to the same block.  In this case we need to serialise the submission of
 630 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
 631 * In the case where sub-block zeroing is not required, we can do concurrent
 632 * sub-block dios to the same block successfully.
 633 *
 634 * Optimistically submit the I/O using the shared lock first, but use the
 635 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 636 * if block allocation or partial block zeroing would be required.  In that case
 637 * we try again with the exclusive lock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 638 */
 639static noinline ssize_t
 640xfs_file_dio_write_unaligned(
 641	struct xfs_inode	*ip,
 642	struct kiocb		*iocb,
 643	struct iov_iter		*from)
 644{
 645	size_t			isize = i_size_read(VFS_I(ip));
 
 
 
 
 
 
 
 646	size_t			count = iov_iter_count(from);
 647	unsigned int		iolock = XFS_IOLOCK_SHARED;
 648	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
 649	ssize_t			ret;
 
 
 
 650
 651	/*
 652	 * Extending writes need exclusivity because of the sub-block zeroing
 653	 * that the DIO code always does for partial tail blocks beyond EOF, so
 654	 * don't even bother trying the fast path in this case.
 655	 */
 656	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
 657		if (iocb->ki_flags & IOCB_NOWAIT)
 658			return -EAGAIN;
 659retry_exclusive:
 
 
 
 
 
 
 
 
 
 
 660		iolock = XFS_IOLOCK_EXCL;
 661		flags = IOMAP_DIO_FORCE_WAIT;
 
 662	}
 663
 664	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 
 
 665	if (ret)
 666		return ret;
 
 667
 668	/*
 669	 * We can't properly handle unaligned direct I/O to reflink files yet,
 670	 * as we can't unshare a partial block.
 
 671	 */
 672	if (xfs_is_cow_inode(ip)) {
 673		trace_xfs_reflink_bounce_dio_write(iocb, from);
 674		ret = -ENOTBLK;
 675		goto out_unlock;
 
 676	}
 677
 678	ret = xfs_file_write_checks(iocb, from, &iolock);
 679	if (ret)
 680		goto out_unlock;
 681
 682	/*
 683	 * If we are doing exclusive unaligned I/O, this must be the only I/O
 684	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
 685	 * conversions from the AIO end_io handler.  Wait for all other I/O to
 686	 * drain first.
 687	 */
 688	if (flags & IOMAP_DIO_FORCE_WAIT)
 689		inode_dio_wait(VFS_I(ip));
 690
 691	trace_xfs_file_direct_write(iocb, from);
 692	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 693			   &xfs_dio_write_ops, flags, NULL, 0);
 694
 695	/*
 696	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
 697	 * layer rejected it for mapping or locking reasons. If we are doing
 698	 * nonblocking user I/O, propagate the error.
 699	 */
 700	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
 701		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
 702		xfs_iunlock(ip, iolock);
 703		goto retry_exclusive;
 704	}
 705
 706out_unlock:
 707	if (iolock)
 708		xfs_iunlock(ip, iolock);
 709	return ret;
 710}
 711
 712static ssize_t
 713xfs_file_dio_write(
 714	struct kiocb		*iocb,
 715	struct iov_iter		*from)
 716{
 717	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 718	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
 719	size_t			count = iov_iter_count(from);
 720
 721	/* direct I/O must be aligned to device logical sector size */
 722	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 723		return -EINVAL;
 724	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
 725		return xfs_file_dio_write_unaligned(ip, iocb, from);
 726	return xfs_file_dio_write_aligned(ip, iocb, from);
 727}
 728
 729static noinline ssize_t
 730xfs_file_dax_write(
 731	struct kiocb		*iocb,
 732	struct iov_iter		*from)
 733{
 734	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 735	struct xfs_inode	*ip = XFS_I(inode);
 736	unsigned int		iolock = XFS_IOLOCK_EXCL;
 737	ssize_t			ret, error = 0;
 
 738	loff_t			pos;
 739
 740	ret = xfs_ilock_iocb(iocb, iolock);
 741	if (ret)
 742		return ret;
 743	ret = xfs_file_write_checks(iocb, from, &iolock);
 744	if (ret)
 745		goto out;
 746
 747	pos = iocb->ki_pos;
 
 748
 749	trace_xfs_file_dax_write(iocb, from);
 750	ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
 751	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 752		i_size_write(inode, iocb->ki_pos);
 753		error = xfs_setfilesize(ip, pos, ret);
 754	}
 755out:
 756	if (iolock)
 757		xfs_iunlock(ip, iolock);
 758	if (error)
 759		return error;
 760
 761	if (ret > 0) {
 762		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 763
 764		/* Handle various SYNC-type writes */
 765		ret = generic_write_sync(iocb, ret);
 766	}
 767	return ret;
 768}
 769
 770STATIC ssize_t
 771xfs_file_buffered_write(
 772	struct kiocb		*iocb,
 773	struct iov_iter		*from)
 774{
 775	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 776	struct xfs_inode	*ip = XFS_I(inode);
 777	ssize_t			ret;
 778	bool			cleared_space = false;
 779	unsigned int		iolock;
 780
 781write_retry:
 782	iolock = XFS_IOLOCK_EXCL;
 783	ret = xfs_ilock_iocb(iocb, iolock);
 784	if (ret)
 785		return ret;
 786
 787	ret = xfs_file_write_checks(iocb, from, &iolock);
 788	if (ret)
 789		goto out;
 790
 791	trace_xfs_file_buffered_write(iocb, from);
 792	ret = iomap_file_buffered_write(iocb, from,
 793			&xfs_buffered_write_iomap_ops, NULL);
 
 
 
 
 794
 795	/*
 796	 * If we hit a space limit, try to free up some lingering preallocated
 797	 * space before returning an error. In the case of ENOSPC, first try to
 798	 * write back all dirty inodes to free up some of the excess reserved
 799	 * metadata space. This reduces the chances that the eofblocks scan
 800	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 801	 * also behaves as a filter to prevent too many eofblocks scans from
 802	 * running at the same time.  Use a synchronous scan to increase the
 803	 * effectiveness of the scan.
 804	 */
 805	if (ret == -EDQUOT && !cleared_space) {
 806		xfs_iunlock(ip, iolock);
 807		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
 808		cleared_space = true;
 809		goto write_retry;
 810	} else if (ret == -ENOSPC && !cleared_space) {
 811		struct xfs_icwalk	icw = {0};
 
 
 
 
 812
 813		cleared_space = true;
 814		xfs_flush_inodes(ip->i_mount);
 815
 816		xfs_iunlock(ip, iolock);
 817		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
 818		xfs_blockgc_free_space(ip->i_mount, &icw);
 819		goto write_retry;
 820	}
 821
 
 822out:
 823	if (iolock)
 824		xfs_iunlock(ip, iolock);
 825
 826	if (ret > 0) {
 827		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 828		/* Handle various SYNC-type writes */
 829		ret = generic_write_sync(iocb, ret);
 830	}
 831	return ret;
 832}
 833
 834STATIC ssize_t
 835xfs_file_write_iter(
 836	struct kiocb		*iocb,
 837	struct iov_iter		*from)
 838{
 839	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 840	struct xfs_inode	*ip = XFS_I(inode);
 841	ssize_t			ret;
 842	size_t			ocount = iov_iter_count(from);
 843
 844	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 845
 846	if (ocount == 0)
 847		return 0;
 848
 849	if (xfs_is_shutdown(ip->i_mount))
 850		return -EIO;
 851
 852	if (IS_DAX(inode))
 853		return xfs_file_dax_write(iocb, from);
 854
 855	if (iocb->ki_flags & IOCB_ATOMIC) {
 856		/*
 857		 * Currently only atomic writing of a single FS block is
 858		 * supported. It would be possible to atomic write smaller than
 859		 * a FS block, but there is no requirement to support this.
 860		 * Note that iomap also does not support this yet.
 861		 */
 862		if (ocount != ip->i_mount->m_sb.sb_blocksize)
 863			return -EINVAL;
 864		ret = generic_atomic_write_valid(iocb, from);
 865		if (ret)
 866			return ret;
 867	}
 868
 869	if (iocb->ki_flags & IOCB_DIRECT) {
 870		/*
 871		 * Allow a directio write to fall back to a buffered
 872		 * write *only* in the case that we're doing a reflink
 873		 * CoW.  In all other directio scenarios we do not
 874		 * allow an operation to fall back to buffered mode.
 875		 */
 876		ret = xfs_file_dio_write(iocb, from);
 877		if (ret != -ENOTBLK)
 878			return ret;
 
 
 
 879	}
 880
 881	return xfs_file_buffered_write(iocb, from);
 882}
 883
 884/* Does this file, inode, or mount want synchronous writes? */
 885static inline bool xfs_file_sync_writes(struct file *filp)
 886{
 887	struct xfs_inode	*ip = XFS_I(file_inode(filp));
 888
 889	if (xfs_has_wsync(ip->i_mount))
 890		return true;
 891	if (filp->f_flags & (__O_SYNC | O_DSYNC))
 892		return true;
 893	if (IS_SYNC(file_inode(filp)))
 894		return true;
 895
 896	return false;
 897}
 898
 899static int
 900xfs_falloc_newsize(
 901	struct file		*file,
 902	int			mode,
 903	loff_t			offset,
 904	loff_t			len,
 905	loff_t			*new_size)
 906{
 907	struct inode		*inode = file_inode(file);
 908
 909	if ((mode & FALLOC_FL_KEEP_SIZE) || offset + len <= i_size_read(inode))
 910		return 0;
 911	*new_size = offset + len;
 912	return inode_newsize_ok(inode, *new_size);
 913}
 914
 915static int
 916xfs_falloc_setsize(
 917	struct file		*file,
 918	loff_t			new_size)
 919{
 920	struct iattr iattr = {
 921		.ia_valid	= ATTR_SIZE,
 922		.ia_size	= new_size,
 923	};
 924
 925	if (!new_size)
 926		return 0;
 927	return xfs_vn_setattr_size(file_mnt_idmap(file), file_dentry(file),
 928			&iattr);
 929}
 930
 931static int
 932xfs_falloc_collapse_range(
 933	struct file		*file,
 934	loff_t			offset,
 935	loff_t			len)
 936{
 937	struct inode		*inode = file_inode(file);
 938	loff_t			new_size = i_size_read(inode) - len;
 939	int			error;
 940
 941	if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
 942		return -EINVAL;
 943
 944	/*
 945	 * There is no need to overlap collapse range with EOF, in which case it
 946	 * is effectively a truncate operation
 947	 */
 948	if (offset + len >= i_size_read(inode))
 949		return -EINVAL;
 950
 951	error = xfs_collapse_file_space(XFS_I(inode), offset, len);
 952	if (error)
 953		return error;
 954	return xfs_falloc_setsize(file, new_size);
 955}
 956
 957static int
 958xfs_falloc_insert_range(
 959	struct file		*file,
 960	loff_t			offset,
 961	loff_t			len)
 962{
 963	struct inode		*inode = file_inode(file);
 964	loff_t			isize = i_size_read(inode);
 965	int			error;
 966
 967	if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
 968		return -EINVAL;
 969
 970	/*
 971	 * New inode size must not exceed ->s_maxbytes, accounting for
 972	 * possible signed overflow.
 973	 */
 974	if (inode->i_sb->s_maxbytes - isize < len)
 975		return -EFBIG;
 976
 977	/* Offset should be less than i_size */
 978	if (offset >= isize)
 979		return -EINVAL;
 980
 981	error = xfs_falloc_setsize(file, isize + len);
 982	if (error)
 983		return error;
 984
 985	/*
 986	 * Perform hole insertion now that the file size has been updated so
 987	 * that if we crash during the operation we don't leave shifted extents
 988	 * past EOF and hence losing access to the data that is contained within
 989	 * them.
 990	 */
 991	return xfs_insert_file_space(XFS_I(inode), offset, len);
 992}
 993
 994/*
 995 * Punch a hole and prealloc the range.  We use a hole punch rather than
 996 * unwritten extent conversion for two reasons:
 997 *
 998 *   1.) Hole punch handles partial block zeroing for us.
 999 *   2.) If prealloc returns ENOSPC, the file range is still zero-valued by
1000 *	 virtue of the hole punch.
1001 */
1002static int
1003xfs_falloc_zero_range(
1004	struct file		*file,
1005	int			mode,
1006	loff_t			offset,
1007	loff_t			len)
1008{
1009	struct inode		*inode = file_inode(file);
1010	unsigned int		blksize = i_blocksize(inode);
1011	loff_t			new_size = 0;
1012	int			error;
1013
1014	trace_xfs_zero_file_space(XFS_I(inode));
1015
1016	error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1017	if (error)
1018		return error;
1019
1020	error = xfs_free_file_space(XFS_I(inode), offset, len);
1021	if (error)
1022		return error;
1023
1024	len = round_up(offset + len, blksize) - round_down(offset, blksize);
1025	offset = round_down(offset, blksize);
1026	error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1027	if (error)
1028		return error;
1029	return xfs_falloc_setsize(file, new_size);
1030}
1031
1032static int
1033xfs_falloc_unshare_range(
1034	struct file		*file,
1035	int			mode,
1036	loff_t			offset,
1037	loff_t			len)
1038{
1039	struct inode		*inode = file_inode(file);
1040	loff_t			new_size = 0;
1041	int			error;
1042
1043	error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1044	if (error)
1045		return error;
1046
1047	error = xfs_reflink_unshare(XFS_I(inode), offset, len);
1048	if (error)
1049		return error;
1050
1051	error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1052	if (error)
1053		return error;
1054	return xfs_falloc_setsize(file, new_size);
1055}
1056
1057static int
1058xfs_falloc_allocate_range(
1059	struct file		*file,
1060	int			mode,
1061	loff_t			offset,
1062	loff_t			len)
1063{
1064	struct inode		*inode = file_inode(file);
1065	loff_t			new_size = 0;
1066	int			error;
1067
1068	/*
1069	 * If always_cow mode we can't use preallocations and thus should not
1070	 * create them.
1071	 */
1072	if (xfs_is_always_cow_inode(XFS_I(inode)))
1073		return -EOPNOTSUPP;
1074
1075	error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1076	if (error)
1077		return error;
1078
1079	error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1080	if (error)
1081		return error;
1082	return xfs_falloc_setsize(file, new_size);
1083}
1084
1085#define	XFS_FALLOC_FL_SUPPORTED						\
1086		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
1087		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
1088		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
1089
1090STATIC long
1091xfs_file_fallocate(
1092	struct file		*file,
1093	int			mode,
1094	loff_t			offset,
1095	loff_t			len)
1096{
1097	struct inode		*inode = file_inode(file);
1098	struct xfs_inode	*ip = XFS_I(inode);
1099	long			error;
1100	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 
 
 
1101
1102	if (!S_ISREG(inode->i_mode))
1103		return -EINVAL;
1104	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
1105		return -EOPNOTSUPP;
1106
1107	xfs_ilock(ip, iolock);
1108	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
1109	if (error)
1110		goto out_unlock;
1111
1112	/*
1113	 * Must wait for all AIO to complete before we continue as AIO can
1114	 * change the file size on completion without holding any locks we
1115	 * currently hold. We must do this first because AIO can update both
1116	 * the on disk and in memory inode sizes, and the operations that follow
1117	 * require the in-memory size to be fully up-to-date.
1118	 */
1119	inode_dio_wait(inode);
1120
1121	error = file_modified(file);
1122	if (error)
1123		goto out_unlock;
1124
1125	switch (mode & FALLOC_FL_MODE_MASK) {
1126	case FALLOC_FL_PUNCH_HOLE:
1127		error = xfs_free_file_space(ip, offset, len);
1128		break;
1129	case FALLOC_FL_COLLAPSE_RANGE:
1130		error = xfs_falloc_collapse_range(file, offset, len);
1131		break;
1132	case FALLOC_FL_INSERT_RANGE:
1133		error = xfs_falloc_insert_range(file, offset, len);
1134		break;
1135	case FALLOC_FL_ZERO_RANGE:
1136		error = xfs_falloc_zero_range(file, mode, offset, len);
1137		break;
1138	case FALLOC_FL_UNSHARE_RANGE:
1139		error = xfs_falloc_unshare_range(file, mode, offset, len);
1140		break;
1141	case FALLOC_FL_ALLOCATE_RANGE:
1142		error = xfs_falloc_allocate_range(file, mode, offset, len);
1143		break;
1144	default:
1145		error = -EOPNOTSUPP;
1146		break;
1147	}
1148
1149	if (!error && xfs_file_sync_writes(file))
1150		error = xfs_log_force_inode(ip);
 
 
 
 
 
 
1151
1152out_unlock:
1153	xfs_iunlock(ip, iolock);
1154	return error;
1155}
1156
1157STATIC int
1158xfs_file_fadvise(
1159	struct file	*file,
1160	loff_t		start,
1161	loff_t		end,
1162	int		advice)
1163{
1164	struct xfs_inode *ip = XFS_I(file_inode(file));
1165	int ret;
1166	int lockflags = 0;
 
1167
1168	/*
1169	 * Operations creating pages in page cache need protection from hole
1170	 * punching and similar ops
1171	 */
1172	if (advice == POSIX_FADV_WILLNEED) {
1173		lockflags = XFS_IOLOCK_SHARED;
1174		xfs_ilock(ip, lockflags);
1175	}
1176	ret = generic_fadvise(file, start, end, advice);
1177	if (lockflags)
1178		xfs_iunlock(ip, lockflags);
1179	return ret;
1180}
1181
1182STATIC loff_t
1183xfs_file_remap_range(
1184	struct file		*file_in,
1185	loff_t			pos_in,
1186	struct file		*file_out,
1187	loff_t			pos_out,
1188	loff_t			len,
1189	unsigned int		remap_flags)
1190{
1191	struct inode		*inode_in = file_inode(file_in);
1192	struct xfs_inode	*src = XFS_I(inode_in);
1193	struct inode		*inode_out = file_inode(file_out);
1194	struct xfs_inode	*dest = XFS_I(inode_out);
1195	struct xfs_mount	*mp = src->i_mount;
1196	loff_t			remapped = 0;
1197	xfs_extlen_t		cowextsize;
1198	int			ret;
1199
1200	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1201		return -EINVAL;
 
 
 
 
 
1202
1203	if (!xfs_has_reflink(mp))
1204		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
1205
1206	if (xfs_is_shutdown(mp))
1207		return -EIO;
1208
1209	/* Prepare and then clone file data. */
1210	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1211			&len, remap_flags);
1212	if (ret || len == 0)
1213		return ret;
1214
1215	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1216
1217	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1218			&remapped);
1219	if (ret)
1220		goto out_unlock;
 
 
 
 
1221
1222	/*
1223	 * Carry the cowextsize hint from src to dest if we're sharing the
1224	 * entire source file to the entire destination file, the source file
1225	 * has a cowextsize hint, and the destination file does not.
 
1226	 */
1227	cowextsize = 0;
1228	if (pos_in == 0 && len == i_size_read(inode_in) &&
1229	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1230	    pos_out == 0 && len >= i_size_read(inode_out) &&
1231	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1232		cowextsize = src->i_cowextsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
1233
1234	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1235			remap_flags);
1236	if (ret)
1237		goto out_unlock;
 
 
 
 
 
1238
1239	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1240		xfs_log_force_inode(dest);
1241out_unlock:
1242	xfs_iunlock2_remapping(src, dest);
1243	if (ret)
1244		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1245	/*
1246	 * If the caller did not set CAN_SHORTEN, then it is not prepared to
1247	 * handle partial results -- either the whole remap succeeds, or we
1248	 * must say why it did not.  In this case, any error should be returned
1249	 * to the caller.
1250	 */
1251	if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
1252		return ret;
1253	return remapped > 0 ? remapped : ret;
1254}
1255
1256STATIC int
1257xfs_file_open(
1258	struct inode	*inode,
1259	struct file	*file)
1260{
1261	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
 
 
1262		return -EIO;
1263	file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
1264	if (xfs_inode_can_atomicwrite(XFS_I(inode)))
1265		file->f_mode |= FMODE_CAN_ATOMIC_WRITE;
1266	return generic_file_open(inode, file);
1267}
1268
1269STATIC int
1270xfs_dir_open(
1271	struct inode	*inode,
1272	struct file	*file)
1273{
1274	struct xfs_inode *ip = XFS_I(inode);
1275	unsigned int	mode;
1276	int		error;
1277
1278	if (xfs_is_shutdown(ip->i_mount))
1279		return -EIO;
1280	error = generic_file_open(inode, file);
1281	if (error)
1282		return error;
1283
1284	/*
1285	 * If there are any blocks, read-ahead block 0 as we're almost
1286	 * certain to have the next operation be a read there.
1287	 */
1288	mode = xfs_ilock_data_map_shared(ip);
1289	if (ip->i_df.if_nextents > 0)
1290		error = xfs_dir3_data_readahead(ip, 0, 0);
1291	xfs_iunlock(ip, mode);
1292	return error;
1293}
1294
1295/*
1296 * Don't bother propagating errors.  We're just doing cleanup, and the caller
1297 * ignores the return value anyway.
1298 */
1299STATIC int
1300xfs_file_release(
1301	struct inode		*inode,
1302	struct file		*file)
1303{
1304	struct xfs_inode	*ip = XFS_I(inode);
1305	struct xfs_mount	*mp = ip->i_mount;
1306
1307	/*
1308	 * If this is a read-only mount or the file system has been shut down,
1309	 * don't generate I/O.
1310	 */
1311	if (xfs_is_readonly(mp) || xfs_is_shutdown(mp))
1312		return 0;
1313
1314	/*
1315	 * If we previously truncated this file and removed old data in the
1316	 * process, we want to initiate "early" writeout on the last close.
1317	 * This is an attempt to combat the notorious NULL files problem which
1318	 * is particularly noticeable from a truncate down, buffered (re-)write
1319	 * (delalloc), followed by a crash.  What we are effectively doing here
1320	 * is significantly reducing the time window where we'd otherwise be
1321	 * exposed to that problem.
1322	 */
1323	if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) {
1324		xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED);
1325		if (ip->i_delayed_blks > 0)
1326			filemap_flush(inode->i_mapping);
1327	}
1328
1329	/*
1330	 * XFS aggressively preallocates post-EOF space to generate contiguous
1331	 * allocations for writers that append to the end of the file.
1332	 *
1333	 * To support workloads that close and reopen the file frequently, these
1334	 * preallocations usually persist after a close unless it is the first
1335	 * close for the inode.  This is a tradeoff to generate tightly packed
1336	 * data layouts for unpacking tarballs or similar archives that write
1337	 * one file after another without going back to it while keeping the
1338	 * preallocation for files that have recurring open/write/close cycles.
1339	 *
1340	 * This heuristic is skipped for inodes with the append-only flag as
1341	 * that flag is rather pointless for inodes written only once.
1342	 *
1343	 * There is no point in freeing blocks here for open but unlinked files
1344	 * as they will be taken care of by the inactivation path soon.
1345	 *
1346	 * When releasing a read-only context, don't flush data or trim post-EOF
1347	 * blocks.  This avoids open/read/close workloads from removing EOF
1348	 * blocks that other writers depend upon to reduce fragmentation.
1349	 *
1350	 * If we can't get the iolock just skip truncating the blocks past EOF
1351	 * because we could deadlock with the mmap_lock otherwise. We'll get
1352	 * another chance to drop them once the last reference to the inode is
1353	 * dropped, so we'll never leak blocks permanently.
1354	 */
1355	if (inode->i_nlink &&
1356	    (file->f_mode & FMODE_WRITE) &&
1357	    !(ip->i_diflags & XFS_DIFLAG_APPEND) &&
1358	    !xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) &&
1359	    xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1360		if (xfs_can_free_eofblocks(ip) &&
1361		    !xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED))
1362			xfs_free_eofblocks(ip);
1363		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1364	}
1365
1366	return 0;
1367}
1368
1369STATIC int
1370xfs_file_readdir(
1371	struct file	*file,
1372	struct dir_context *ctx)
1373{
1374	struct inode	*inode = file_inode(file);
1375	xfs_inode_t	*ip = XFS_I(inode);
1376	size_t		bufsize;
1377
1378	/*
1379	 * The Linux API doesn't pass down the total size of the buffer
1380	 * we read into down to the filesystem.  With the filldir concept
1381	 * it's not needed for correct information, but the XFS dir2 leaf
1382	 * code wants an estimate of the buffer size to calculate it's
1383	 * readahead window and size the buffers used for mapping to
1384	 * physical blocks.
1385	 *
1386	 * Try to give it an estimate that's good enough, maybe at some
1387	 * point we can change the ->readdir prototype to include the
1388	 * buffer size.  For now we use the current glibc buffer size.
1389	 */
1390	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391
1392	return xfs_readdir(NULL, ip, ctx, bufsize);
 
 
1393}
1394
1395STATIC loff_t
1396xfs_file_llseek(
1397	struct file	*file,
1398	loff_t		offset,
1399	int		whence)
1400{
1401	struct inode		*inode = file->f_mapping->host;
1402
1403	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1404		return -EIO;
1405
1406	switch (whence) {
1407	default:
 
 
1408		return generic_file_llseek(file, offset, whence);
1409	case SEEK_HOLE:
1410		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1411		break;
1412	case SEEK_DATA:
1413		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1414		break;
 
1415	}
1416
1417	if (offset < 0)
1418		return offset;
1419	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1420}
1421
1422static inline vm_fault_t
1423xfs_dax_fault_locked(
1424	struct vm_fault		*vmf,
1425	unsigned int		order,
1426	bool			write_fault)
1427{
1428	vm_fault_t		ret;
1429	pfn_t			pfn;
1430
1431	if (!IS_ENABLED(CONFIG_FS_DAX)) {
1432		ASSERT(0);
1433		return VM_FAULT_SIGBUS;
1434	}
1435	ret = dax_iomap_fault(vmf, order, &pfn, NULL,
1436			(write_fault && !vmf->cow_page) ?
1437				&xfs_dax_write_iomap_ops :
1438				&xfs_read_iomap_ops);
1439	if (ret & VM_FAULT_NEEDDSYNC)
1440		ret = dax_finish_sync_fault(vmf, order, pfn);
1441	return ret;
1442}
1443
1444static vm_fault_t
1445xfs_dax_read_fault(
1446	struct vm_fault		*vmf,
1447	unsigned int		order)
1448{
1449	struct xfs_inode	*ip = XFS_I(file_inode(vmf->vma->vm_file));
1450	vm_fault_t		ret;
1451
1452	trace_xfs_read_fault(ip, order);
1453
1454	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1455	ret = xfs_dax_fault_locked(vmf, order, false);
1456	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1457
1458	return ret;
1459}
1460
1461/*
1462 * Locking for serialisation of IO during page faults. This results in a lock
1463 * ordering of:
1464 *
1465 * mmap_lock (MM)
1466 *   sb_start_pagefault(vfs, freeze)
1467 *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1468 *       page_lock (MM)
1469 *         i_lock (XFS - extent map serialisation)
1470 */
1471static vm_fault_t
1472xfs_write_fault(
1473	struct vm_fault		*vmf,
1474	unsigned int		order)
 
 
 
 
 
 
 
1475{
1476	struct inode		*inode = file_inode(vmf->vma->vm_file);
1477	struct xfs_inode	*ip = XFS_I(inode);
1478	unsigned int		lock_mode = XFS_MMAPLOCK_SHARED;
1479	vm_fault_t		ret;
1480
1481	trace_xfs_write_fault(ip, order);
1482
1483	sb_start_pagefault(inode->i_sb);
1484	file_update_time(vmf->vma->vm_file);
 
1485
1486	/*
1487	 * Normally we only need the shared mmaplock, but if a reflink remap is
1488	 * in progress we take the exclusive lock to wait for the remap to
1489	 * finish before taking a write fault.
1490	 */
1491	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1492	if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
1493		xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1494		xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1495		lock_mode = XFS_MMAPLOCK_EXCL;
1496	}
1497
1498	if (IS_DAX(inode))
1499		ret = xfs_dax_fault_locked(vmf, order, true);
1500	else
1501		ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops);
1502	xfs_iunlock(ip, lock_mode);
1503
1504	sb_end_pagefault(inode->i_sb);
1505	return ret;
1506}
1507
1508static inline bool
1509xfs_is_write_fault(
 
1510	struct vm_fault		*vmf)
1511{
1512	return (vmf->flags & FAULT_FLAG_WRITE) &&
1513	       (vmf->vma->vm_flags & VM_SHARED);
1514}
1515
1516static vm_fault_t
1517xfs_filemap_fault(
1518	struct vm_fault		*vmf)
1519{
1520	struct inode		*inode = file_inode(vmf->vma->vm_file);
1521
1522	/* DAX can shortcut the normal fault path on write faults! */
1523	if (IS_DAX(inode)) {
1524		if (xfs_is_write_fault(vmf))
1525			return xfs_write_fault(vmf, 0);
1526		return xfs_dax_read_fault(vmf, 0);
1527	}
 
 
 
 
1528
1529	trace_xfs_read_fault(XFS_I(inode), 0);
1530	return filemap_fault(vmf);
1531}
1532
1533static vm_fault_t
1534xfs_filemap_huge_fault(
1535	struct vm_fault		*vmf,
1536	unsigned int		order)
 
 
 
 
 
 
 
 
 
1537{
1538	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
 
 
 
 
1539		return VM_FAULT_FALLBACK;
1540
1541	/* DAX can shortcut the normal fault path on write faults! */
1542	if (xfs_is_write_fault(vmf))
1543		return xfs_write_fault(vmf, order);
1544	return xfs_dax_read_fault(vmf, order);
1545}
 
 
 
 
 
 
 
 
1546
1547static vm_fault_t
1548xfs_filemap_page_mkwrite(
1549	struct vm_fault		*vmf)
1550{
1551	return xfs_write_fault(vmf, 0);
1552}
1553
1554/*
1555 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1556 * on write faults. In reality, it needs to serialise against truncate and
1557 * prepare memory for writing so handle is as standard write fault.
 
1558 */
1559static vm_fault_t
1560xfs_filemap_pfn_mkwrite(
 
1561	struct vm_fault		*vmf)
1562{
1563	return xfs_write_fault(vmf, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1564}
1565
1566static const struct vm_operations_struct xfs_file_vm_ops = {
1567	.fault		= xfs_filemap_fault,
1568	.huge_fault	= xfs_filemap_huge_fault,
1569	.map_pages	= filemap_map_pages,
1570	.page_mkwrite	= xfs_filemap_page_mkwrite,
1571	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1572};
1573
1574STATIC int
1575xfs_file_mmap(
1576	struct file		*file,
1577	struct vm_area_struct	*vma)
1578{
1579	struct inode		*inode = file_inode(file);
1580	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1581
1582	/*
1583	 * We don't support synchronous mappings for non-DAX files and
1584	 * for DAX files if underneath dax_device is not synchronous.
1585	 */
1586	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1587		return -EOPNOTSUPP;
1588
1589	file_accessed(file);
1590	vma->vm_ops = &xfs_file_vm_ops;
1591	if (IS_DAX(inode))
1592		vm_flags_set(vma, VM_HUGEPAGE);
1593	return 0;
1594}
1595
1596const struct file_operations xfs_file_operations = {
1597	.llseek		= xfs_file_llseek,
1598	.read_iter	= xfs_file_read_iter,
1599	.write_iter	= xfs_file_write_iter,
1600	.splice_read	= xfs_file_splice_read,
1601	.splice_write	= iter_file_splice_write,
1602	.iopoll		= iocb_bio_iopoll,
1603	.unlocked_ioctl	= xfs_file_ioctl,
1604#ifdef CONFIG_COMPAT
1605	.compat_ioctl	= xfs_file_compat_ioctl,
1606#endif
1607	.mmap		= xfs_file_mmap,
1608	.open		= xfs_file_open,
1609	.release	= xfs_file_release,
1610	.fsync		= xfs_file_fsync,
1611	.get_unmapped_area = thp_get_unmapped_area,
1612	.fallocate	= xfs_file_fallocate,
1613	.fadvise	= xfs_file_fadvise,
1614	.remap_file_range = xfs_file_remap_range,
1615	.fop_flags	= FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
1616			  FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE,
1617};
1618
1619const struct file_operations xfs_dir_file_operations = {
1620	.open		= xfs_dir_open,
1621	.read		= generic_read_dir,
1622	.iterate_shared	= xfs_file_readdir,
1623	.llseek		= generic_file_llseek,
1624	.unlocked_ioctl	= xfs_file_ioctl,
1625#ifdef CONFIG_COMPAT
1626	.compat_ioctl	= xfs_file_compat_ioctl,
1627#endif
1628	.fsync		= xfs_dir_fsync,
1629};