Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_da_format.h"
  26#include "xfs_da_btree.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_bmap.h"
  31#include "xfs_bmap_util.h"
  32#include "xfs_error.h"
  33#include "xfs_dir2.h"
  34#include "xfs_dir2_priv.h"
  35#include "xfs_ioctl.h"
  36#include "xfs_trace.h"
  37#include "xfs_log.h"
  38#include "xfs_icache.h"
  39#include "xfs_pnfs.h"
  40#include "xfs_iomap.h"
  41#include "xfs_reflink.h"
  42
  43#include <linux/dcache.h>
  44#include <linux/falloc.h>
  45#include <linux/pagevec.h>
  46#include <linux/backing-dev.h>
  47#include <linux/mman.h>
  48
  49static const struct vm_operations_struct xfs_file_vm_ops;
  50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51int
  52xfs_update_prealloc_flags(
  53	struct xfs_inode	*ip,
  54	enum xfs_prealloc_flags	flags)
  55{
  56	struct xfs_trans	*tp;
  57	int			error;
  58
  59	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
  60			0, 0, 0, &tp);
  61	if (error)
  62		return error;
  63
  64	xfs_ilock(ip, XFS_ILOCK_EXCL);
  65	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  66
  67	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  68		VFS_I(ip)->i_mode &= ~S_ISUID;
  69		if (VFS_I(ip)->i_mode & S_IXGRP)
  70			VFS_I(ip)->i_mode &= ~S_ISGID;
  71		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  72	}
  73
  74	if (flags & XFS_PREALLOC_SET)
  75		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  76	if (flags & XFS_PREALLOC_CLEAR)
  77		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  78
  79	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  80	if (flags & XFS_PREALLOC_SYNC)
  81		xfs_trans_set_sync(tp);
  82	return xfs_trans_commit(tp);
  83}
  84
  85/*
  86 * Fsync operations on directories are much simpler than on regular files,
  87 * as there is no file data to flush, and thus also no need for explicit
  88 * cache flush operations, and there are no non-transaction metadata updates
  89 * on directories either.
  90 */
  91STATIC int
  92xfs_dir_fsync(
  93	struct file		*file,
  94	loff_t			start,
  95	loff_t			end,
  96	int			datasync)
  97{
  98	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
  99	struct xfs_mount	*mp = ip->i_mount;
 100	xfs_lsn_t		lsn = 0;
 101
 102	trace_xfs_dir_fsync(ip);
 103
 104	xfs_ilock(ip, XFS_ILOCK_SHARED);
 105	if (xfs_ipincount(ip))
 106		lsn = ip->i_itemp->ili_last_lsn;
 107	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 108
 109	if (!lsn)
 110		return 0;
 111	return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 112}
 113
 114STATIC int
 115xfs_file_fsync(
 116	struct file		*file,
 117	loff_t			start,
 118	loff_t			end,
 119	int			datasync)
 120{
 121	struct inode		*inode = file->f_mapping->host;
 122	struct xfs_inode	*ip = XFS_I(inode);
 123	struct xfs_mount	*mp = ip->i_mount;
 124	int			error = 0;
 125	int			log_flushed = 0;
 126	xfs_lsn_t		lsn = 0;
 127
 128	trace_xfs_file_fsync(ip);
 129
 130	error = file_write_and_wait_range(file, start, end);
 131	if (error)
 132		return error;
 133
 134	if (XFS_FORCED_SHUTDOWN(mp))
 135		return -EIO;
 136
 137	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 138
 139	/*
 140	 * If we have an RT and/or log subvolume we need to make sure to flush
 141	 * the write cache the device used for file data first.  This is to
 142	 * ensure newly written file data make it to disk before logging the new
 143	 * inode size in case of an extending write.
 144	 */
 145	if (XFS_IS_REALTIME_INODE(ip))
 146		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
 147	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 148		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 149
 150	/*
 151	 * All metadata updates are logged, which means that we just have to
 152	 * flush the log up to the latest LSN that touched the inode. If we have
 153	 * concurrent fsync/fdatasync() calls, we need them to all block on the
 154	 * log force before we clear the ili_fsync_fields field. This ensures
 155	 * that we don't get a racing sync operation that does not wait for the
 156	 * metadata to hit the journal before returning. If we race with
 157	 * clearing the ili_fsync_fields, then all that will happen is the log
 158	 * force will do nothing as the lsn will already be on disk. We can't
 159	 * race with setting ili_fsync_fields because that is done under
 160	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
 161	 * until after the ili_fsync_fields is cleared.
 162	 */
 163	xfs_ilock(ip, XFS_ILOCK_SHARED);
 164	if (xfs_ipincount(ip)) {
 165		if (!datasync ||
 166		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
 167			lsn = ip->i_itemp->ili_last_lsn;
 168	}
 169
 170	if (lsn) {
 171		error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
 172		ip->i_itemp->ili_fsync_fields = 0;
 173	}
 174	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 175
 176	/*
 177	 * If we only have a single device, and the log force about was
 178	 * a no-op we might have to flush the data device cache here.
 179	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 180	 * an already allocated file and thus do not have any metadata to
 181	 * commit.
 182	 */
 183	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 184	    mp->m_logdev_targp == mp->m_ddev_targp)
 185		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 186
 187	return error;
 188}
 189
 190STATIC ssize_t
 191xfs_file_dio_aio_read(
 192	struct kiocb		*iocb,
 193	struct iov_iter		*to)
 194{
 195	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 196	size_t			count = iov_iter_count(to);
 197	ssize_t			ret;
 198
 199	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
 200
 201	if (!count)
 202		return 0; /* skip atime */
 203
 204	file_accessed(iocb->ki_filp);
 205
 206	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 207	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
 208	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 209
 210	return ret;
 211}
 212
 213static noinline ssize_t
 214xfs_file_dax_read(
 215	struct kiocb		*iocb,
 216	struct iov_iter		*to)
 217{
 218	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 219	size_t			count = iov_iter_count(to);
 220	ssize_t			ret = 0;
 221
 222	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
 223
 224	if (!count)
 225		return 0; /* skip atime */
 226
 227	if (iocb->ki_flags & IOCB_NOWAIT) {
 228		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
 229			return -EAGAIN;
 230	} else {
 231		xfs_ilock(ip, XFS_IOLOCK_SHARED);
 232	}
 233
 234	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
 235	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 236
 237	file_accessed(iocb->ki_filp);
 238	return ret;
 239}
 240
 241STATIC ssize_t
 242xfs_file_buffered_aio_read(
 243	struct kiocb		*iocb,
 244	struct iov_iter		*to)
 245{
 246	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 247	ssize_t			ret;
 248
 249	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 250
 251	if (iocb->ki_flags & IOCB_NOWAIT) {
 252		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
 253			return -EAGAIN;
 254	} else {
 255		xfs_ilock(ip, XFS_IOLOCK_SHARED);
 256	}
 257	ret = generic_file_read_iter(iocb, to);
 258	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 259
 260	return ret;
 261}
 262
 263STATIC ssize_t
 264xfs_file_read_iter(
 265	struct kiocb		*iocb,
 266	struct iov_iter		*to)
 267{
 268	struct inode		*inode = file_inode(iocb->ki_filp);
 269	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 270	ssize_t			ret = 0;
 271
 272	XFS_STATS_INC(mp, xs_read_calls);
 273
 274	if (XFS_FORCED_SHUTDOWN(mp))
 275		return -EIO;
 276
 277	if (IS_DAX(inode))
 278		ret = xfs_file_dax_read(iocb, to);
 279	else if (iocb->ki_flags & IOCB_DIRECT)
 280		ret = xfs_file_dio_aio_read(iocb, to);
 281	else
 282		ret = xfs_file_buffered_aio_read(iocb, to);
 283
 284	if (ret > 0)
 285		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 286	return ret;
 287}
 288
 289/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290 * Common pre-write limit and setup checks.
 291 *
 292 * Called with the iolocked held either shared and exclusive according to
 293 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 294 * if called for a direct write beyond i_size.
 295 */
 296STATIC ssize_t
 297xfs_file_aio_write_checks(
 298	struct kiocb		*iocb,
 299	struct iov_iter		*from,
 300	int			*iolock)
 301{
 302	struct file		*file = iocb->ki_filp;
 303	struct inode		*inode = file->f_mapping->host;
 304	struct xfs_inode	*ip = XFS_I(inode);
 305	ssize_t			error = 0;
 306	size_t			count = iov_iter_count(from);
 307	bool			drained_dio = false;
 308	loff_t			isize;
 309
 310restart:
 311	error = generic_write_checks(iocb, from);
 312	if (error <= 0)
 313		return error;
 314
 315	error = xfs_break_layouts(inode, iolock);
 316	if (error)
 317		return error;
 318
 319	/*
 320	 * For changing security info in file_remove_privs() we need i_rwsem
 321	 * exclusively.
 322	 */
 323	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 324		xfs_iunlock(ip, *iolock);
 325		*iolock = XFS_IOLOCK_EXCL;
 326		xfs_ilock(ip, *iolock);
 327		goto restart;
 328	}
 329	/*
 330	 * If the offset is beyond the size of the file, we need to zero any
 331	 * blocks that fall between the existing EOF and the start of this
 332	 * write.  If zeroing is needed and we are currently holding the
 333	 * iolock shared, we need to update it to exclusive which implies
 334	 * having to redo all checks before.
 335	 *
 336	 * We need to serialise against EOF updates that occur in IO
 337	 * completions here. We want to make sure that nobody is changing the
 338	 * size while we do this check until we have placed an IO barrier (i.e.
 339	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
 340	 * The spinlock effectively forms a memory barrier once we have the
 341	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
 342	 * and hence be able to correctly determine if we need to run zeroing.
 343	 */
 344	spin_lock(&ip->i_flags_lock);
 345	isize = i_size_read(inode);
 346	if (iocb->ki_pos > isize) {
 
 347		spin_unlock(&ip->i_flags_lock);
 348		if (!drained_dio) {
 349			if (*iolock == XFS_IOLOCK_SHARED) {
 350				xfs_iunlock(ip, *iolock);
 351				*iolock = XFS_IOLOCK_EXCL;
 352				xfs_ilock(ip, *iolock);
 353				iov_iter_reexpand(from, count);
 354			}
 355			/*
 356			 * We now have an IO submission barrier in place, but
 357			 * AIO can do EOF updates during IO completion and hence
 358			 * we now need to wait for all of them to drain. Non-AIO
 359			 * DIO will have drained before we are given the
 360			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 361			 * no-op.
 362			 */
 363			inode_dio_wait(inode);
 364			drained_dio = true;
 365			goto restart;
 366		}
 367	
 368		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
 369		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
 370				NULL, &xfs_iomap_ops);
 371		if (error)
 372			return error;
 373	} else
 374		spin_unlock(&ip->i_flags_lock);
 375
 376	/*
 377	 * Updating the timestamps will grab the ilock again from
 378	 * xfs_fs_dirty_inode, so we have to call it after dropping the
 379	 * lock above.  Eventually we should look into a way to avoid
 380	 * the pointless lock roundtrip.
 381	 */
 382	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
 383		error = file_update_time(file);
 384		if (error)
 385			return error;
 386	}
 387
 388	/*
 389	 * If we're writing the file then make sure to clear the setuid and
 390	 * setgid bits if the process is not being run by root.  This keeps
 391	 * people from modifying setuid and setgid binaries.
 392	 */
 393	if (!IS_NOSEC(inode))
 394		return file_remove_privs(file);
 395	return 0;
 396}
 397
 398static int
 399xfs_dio_write_end_io(
 400	struct kiocb		*iocb,
 401	ssize_t			size,
 402	unsigned		flags)
 403{
 404	struct inode		*inode = file_inode(iocb->ki_filp);
 405	struct xfs_inode	*ip = XFS_I(inode);
 406	loff_t			offset = iocb->ki_pos;
 
 407	int			error = 0;
 408
 409	trace_xfs_end_io_direct_write(ip, offset, size);
 410
 411	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 412		return -EIO;
 413
 414	if (size <= 0)
 415		return size;
 416
 417	if (flags & IOMAP_DIO_COW) {
 418		error = xfs_reflink_end_cow(ip, offset, size);
 419		if (error)
 420			return error;
 421	}
 422
 423	/*
 424	 * Unwritten conversion updates the in-core isize after extent
 425	 * conversion but before updating the on-disk size. Updating isize any
 426	 * earlier allows a racing dio read to find unwritten extents before
 427	 * they are converted.
 428	 */
 429	if (flags & IOMAP_DIO_UNWRITTEN)
 430		return xfs_iomap_write_unwritten(ip, offset, size, true);
 431
 432	/*
 433	 * We need to update the in-core inode size here so that we don't end up
 434	 * with the on-disk inode size being outside the in-core inode size. We
 435	 * have no other method of updating EOF for AIO, so always do it here
 436	 * if necessary.
 437	 *
 438	 * We need to lock the test/set EOF update as we can be racing with
 439	 * other IO completions here to update the EOF. Failing to serialise
 440	 * here can result in EOF moving backwards and Bad Things Happen when
 441	 * that occurs.
 442	 */
 443	spin_lock(&ip->i_flags_lock);
 444	if (offset + size > i_size_read(inode)) {
 445		i_size_write(inode, offset + size);
 446		spin_unlock(&ip->i_flags_lock);
 447		error = xfs_setfilesize(ip, offset, size);
 448	} else {
 449		spin_unlock(&ip->i_flags_lock);
 450	}
 
 
 
 
 
 
 
 
 
 
 
 
 451
 452	return error;
 453}
 454
 455/*
 456 * xfs_file_dio_aio_write - handle direct IO writes
 457 *
 458 * Lock the inode appropriately to prepare for and issue a direct IO write.
 459 * By separating it from the buffered write path we remove all the tricky to
 460 * follow locking changes and looping.
 461 *
 462 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 463 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 464 * pages are flushed out.
 465 *
 466 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 467 * allowing them to be done in parallel with reads and other direct IO writes.
 468 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 469 * needs to do sub-block zeroing and that requires serialisation against other
 470 * direct IOs to the same block. In this case we need to serialise the
 471 * submission of the unaligned IOs so that we don't get racing block zeroing in
 472 * the dio layer.  To avoid the problem with aio, we also need to wait for
 473 * outstanding IOs to complete so that unwritten extent conversion is completed
 474 * before we try to map the overlapping block. This is currently implemented by
 475 * hitting it with a big hammer (i.e. inode_dio_wait()).
 476 *
 477 * Returns with locks held indicated by @iolock and errors indicated by
 478 * negative return values.
 479 */
 480STATIC ssize_t
 481xfs_file_dio_aio_write(
 482	struct kiocb		*iocb,
 483	struct iov_iter		*from)
 484{
 485	struct file		*file = iocb->ki_filp;
 486	struct address_space	*mapping = file->f_mapping;
 487	struct inode		*inode = mapping->host;
 488	struct xfs_inode	*ip = XFS_I(inode);
 489	struct xfs_mount	*mp = ip->i_mount;
 490	ssize_t			ret = 0;
 491	int			unaligned_io = 0;
 492	int			iolock;
 493	size_t			count = iov_iter_count(from);
 494	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 495					mp->m_rtdev_targp : mp->m_ddev_targp;
 496
 497	/* DIO must be aligned to device logical sector size */
 498	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 499		return -EINVAL;
 500
 501	/*
 502	 * Don't take the exclusive iolock here unless the I/O is unaligned to
 503	 * the file system block size.  We don't need to consider the EOF
 504	 * extension case here because xfs_file_aio_write_checks() will relock
 505	 * the inode as necessary for EOF zeroing cases and fill out the new
 506	 * inode size as appropriate.
 507	 */
 508	if ((iocb->ki_pos & mp->m_blockmask) ||
 509	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
 510		unaligned_io = 1;
 511
 512		/*
 513		 * We can't properly handle unaligned direct I/O to reflink
 514		 * files yet, as we can't unshare a partial block.
 515		 */
 516		if (xfs_is_reflink_inode(ip)) {
 517			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
 518			return -EREMCHG;
 519		}
 520		iolock = XFS_IOLOCK_EXCL;
 521	} else {
 522		iolock = XFS_IOLOCK_SHARED;
 523	}
 524
 525	if (iocb->ki_flags & IOCB_NOWAIT) {
 526		if (!xfs_ilock_nowait(ip, iolock))
 527			return -EAGAIN;
 528	} else {
 529		xfs_ilock(ip, iolock);
 530	}
 531
 532	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 533	if (ret)
 534		goto out;
 535	count = iov_iter_count(from);
 536
 537	/*
 538	 * If we are doing unaligned IO, wait for all other IO to drain,
 539	 * otherwise demote the lock if we had to take the exclusive lock
 540	 * for other reasons in xfs_file_aio_write_checks.
 541	 */
 542	if (unaligned_io) {
 543		/* If we are going to wait for other DIO to finish, bail */
 544		if (iocb->ki_flags & IOCB_NOWAIT) {
 545			if (atomic_read(&inode->i_dio_count))
 546				return -EAGAIN;
 547		} else {
 548			inode_dio_wait(inode);
 549		}
 550	} else if (iolock == XFS_IOLOCK_EXCL) {
 551		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 552		iolock = XFS_IOLOCK_SHARED;
 553	}
 554
 555	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 
 
 
 
 
 
 
 
 556	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
 557out:
 558	xfs_iunlock(ip, iolock);
 559
 560	/*
 561	 * No fallback to buffered IO on errors for XFS, direct IO will either
 562	 * complete fully or fail.
 563	 */
 564	ASSERT(ret < 0 || ret == count);
 565	return ret;
 566}
 567
 568static noinline ssize_t
 569xfs_file_dax_write(
 570	struct kiocb		*iocb,
 571	struct iov_iter		*from)
 572{
 573	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 574	struct xfs_inode	*ip = XFS_I(inode);
 575	int			iolock = XFS_IOLOCK_EXCL;
 576	ssize_t			ret, error = 0;
 577	size_t			count;
 578	loff_t			pos;
 579
 580	if (iocb->ki_flags & IOCB_NOWAIT) {
 581		if (!xfs_ilock_nowait(ip, iolock))
 582			return -EAGAIN;
 583	} else {
 584		xfs_ilock(ip, iolock);
 585	}
 586
 587	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 588	if (ret)
 589		goto out;
 590
 591	pos = iocb->ki_pos;
 592	count = iov_iter_count(from);
 593
 594	trace_xfs_file_dax_write(ip, count, pos);
 595	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
 596	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 597		i_size_write(inode, iocb->ki_pos);
 598		error = xfs_setfilesize(ip, pos, ret);
 599	}
 600out:
 601	xfs_iunlock(ip, iolock);
 602	return error ? error : ret;
 603}
 604
 605STATIC ssize_t
 606xfs_file_buffered_aio_write(
 607	struct kiocb		*iocb,
 608	struct iov_iter		*from)
 609{
 610	struct file		*file = iocb->ki_filp;
 611	struct address_space	*mapping = file->f_mapping;
 612	struct inode		*inode = mapping->host;
 613	struct xfs_inode	*ip = XFS_I(inode);
 614	ssize_t			ret;
 615	int			enospc = 0;
 616	int			iolock;
 617
 618	if (iocb->ki_flags & IOCB_NOWAIT)
 619		return -EOPNOTSUPP;
 620
 621write_retry:
 622	iolock = XFS_IOLOCK_EXCL;
 623	xfs_ilock(ip, iolock);
 624
 625	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 626	if (ret)
 627		goto out;
 628
 629	/* We can write back this queue in page reclaim */
 630	current->backing_dev_info = inode_to_bdi(inode);
 631
 632	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 633	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
 634	if (likely(ret >= 0))
 635		iocb->ki_pos += ret;
 636
 637	/*
 638	 * If we hit a space limit, try to free up some lingering preallocated
 639	 * space before returning an error. In the case of ENOSPC, first try to
 640	 * write back all dirty inodes to free up some of the excess reserved
 641	 * metadata space. This reduces the chances that the eofblocks scan
 642	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 643	 * also behaves as a filter to prevent too many eofblocks scans from
 644	 * running at the same time.
 645	 */
 646	if (ret == -EDQUOT && !enospc) {
 647		xfs_iunlock(ip, iolock);
 648		enospc = xfs_inode_free_quota_eofblocks(ip);
 649		if (enospc)
 650			goto write_retry;
 651		enospc = xfs_inode_free_quota_cowblocks(ip);
 652		if (enospc)
 653			goto write_retry;
 654		iolock = 0;
 655	} else if (ret == -ENOSPC && !enospc) {
 656		struct xfs_eofblocks eofb = {0};
 657
 658		enospc = 1;
 659		xfs_flush_inodes(ip->i_mount);
 660
 661		xfs_iunlock(ip, iolock);
 662		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 663		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 664		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
 665		goto write_retry;
 666	}
 667
 668	current->backing_dev_info = NULL;
 669out:
 670	if (iolock)
 671		xfs_iunlock(ip, iolock);
 672	return ret;
 673}
 674
 675STATIC ssize_t
 676xfs_file_write_iter(
 677	struct kiocb		*iocb,
 678	struct iov_iter		*from)
 679{
 680	struct file		*file = iocb->ki_filp;
 681	struct address_space	*mapping = file->f_mapping;
 682	struct inode		*inode = mapping->host;
 683	struct xfs_inode	*ip = XFS_I(inode);
 684	ssize_t			ret;
 685	size_t			ocount = iov_iter_count(from);
 686
 687	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 688
 689	if (ocount == 0)
 690		return 0;
 691
 692	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 693		return -EIO;
 694
 695	if (IS_DAX(inode))
 696		ret = xfs_file_dax_write(iocb, from);
 697	else if (iocb->ki_flags & IOCB_DIRECT) {
 698		/*
 699		 * Allow a directio write to fall back to a buffered
 700		 * write *only* in the case that we're doing a reflink
 701		 * CoW.  In all other directio scenarios we do not
 702		 * allow an operation to fall back to buffered mode.
 703		 */
 704		ret = xfs_file_dio_aio_write(iocb, from);
 705		if (ret == -EREMCHG)
 706			goto buffered;
 707	} else {
 708buffered:
 709		ret = xfs_file_buffered_aio_write(iocb, from);
 710	}
 711
 712	if (ret > 0) {
 713		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 714
 715		/* Handle various SYNC-type writes */
 716		ret = generic_write_sync(iocb, ret);
 717	}
 718	return ret;
 719}
 720
 721#define	XFS_FALLOC_FL_SUPPORTED						\
 722		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 723		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 724		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 725
 726STATIC long
 727xfs_file_fallocate(
 728	struct file		*file,
 729	int			mode,
 730	loff_t			offset,
 731	loff_t			len)
 732{
 733	struct inode		*inode = file_inode(file);
 734	struct xfs_inode	*ip = XFS_I(inode);
 735	long			error;
 736	enum xfs_prealloc_flags	flags = 0;
 737	uint			iolock = XFS_IOLOCK_EXCL;
 738	loff_t			new_size = 0;
 739	bool			do_file_insert = false;
 740
 741	if (!S_ISREG(inode->i_mode))
 742		return -EINVAL;
 743	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 744		return -EOPNOTSUPP;
 745
 746	xfs_ilock(ip, iolock);
 747	error = xfs_break_layouts(inode, &iolock);
 748	if (error)
 749		goto out_unlock;
 750
 751	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 752	iolock |= XFS_MMAPLOCK_EXCL;
 753
 754	if (mode & FALLOC_FL_PUNCH_HOLE) {
 755		error = xfs_free_file_space(ip, offset, len);
 756		if (error)
 757			goto out_unlock;
 758	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
 759		unsigned int blksize_mask = i_blocksize(inode) - 1;
 760
 761		if (offset & blksize_mask || len & blksize_mask) {
 762			error = -EINVAL;
 763			goto out_unlock;
 764		}
 765
 766		/*
 767		 * There is no need to overlap collapse range with EOF,
 768		 * in which case it is effectively a truncate operation
 769		 */
 770		if (offset + len >= i_size_read(inode)) {
 771			error = -EINVAL;
 772			goto out_unlock;
 773		}
 774
 775		new_size = i_size_read(inode) - len;
 776
 777		error = xfs_collapse_file_space(ip, offset, len);
 778		if (error)
 779			goto out_unlock;
 780	} else if (mode & FALLOC_FL_INSERT_RANGE) {
 781		unsigned int	blksize_mask = i_blocksize(inode) - 1;
 782		loff_t		isize = i_size_read(inode);
 783
 
 784		if (offset & blksize_mask || len & blksize_mask) {
 785			error = -EINVAL;
 786			goto out_unlock;
 787		}
 788
 789		/*
 790		 * New inode size must not exceed ->s_maxbytes, accounting for
 791		 * possible signed overflow.
 792		 */
 793		if (inode->i_sb->s_maxbytes - isize < len) {
 794			error = -EFBIG;
 795			goto out_unlock;
 796		}
 797		new_size = isize + len;
 798
 799		/* Offset should be less than i_size */
 800		if (offset >= isize) {
 801			error = -EINVAL;
 802			goto out_unlock;
 803		}
 804		do_file_insert = true;
 805	} else {
 806		flags |= XFS_PREALLOC_SET;
 807
 808		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 809		    offset + len > i_size_read(inode)) {
 810			new_size = offset + len;
 811			error = inode_newsize_ok(inode, new_size);
 812			if (error)
 813				goto out_unlock;
 814		}
 815
 816		if (mode & FALLOC_FL_ZERO_RANGE)
 817			error = xfs_zero_file_space(ip, offset, len);
 818		else {
 819			if (mode & FALLOC_FL_UNSHARE_RANGE) {
 820				error = xfs_reflink_unshare(ip, offset, len);
 821				if (error)
 822					goto out_unlock;
 823			}
 824			error = xfs_alloc_file_space(ip, offset, len,
 825						     XFS_BMAPI_PREALLOC);
 826		}
 827		if (error)
 828			goto out_unlock;
 829	}
 830
 831	if (file->f_flags & O_DSYNC)
 832		flags |= XFS_PREALLOC_SYNC;
 833
 834	error = xfs_update_prealloc_flags(ip, flags);
 835	if (error)
 836		goto out_unlock;
 837
 838	/* Change file size if needed */
 839	if (new_size) {
 840		struct iattr iattr;
 841
 842		iattr.ia_valid = ATTR_SIZE;
 843		iattr.ia_size = new_size;
 844		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
 845		if (error)
 846			goto out_unlock;
 847	}
 848
 849	/*
 850	 * Perform hole insertion now that the file size has been
 851	 * updated so that if we crash during the operation we don't
 852	 * leave shifted extents past EOF and hence losing access to
 853	 * the data that is contained within them.
 854	 */
 855	if (do_file_insert)
 856		error = xfs_insert_file_space(ip, offset, len);
 857
 858out_unlock:
 859	xfs_iunlock(ip, iolock);
 860	return error;
 861}
 862
 863STATIC int
 864xfs_file_clone_range(
 865	struct file	*file_in,
 866	loff_t		pos_in,
 867	struct file	*file_out,
 868	loff_t		pos_out,
 869	u64		len)
 870{
 871	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
 872				     len, false);
 873}
 874
 875STATIC ssize_t
 876xfs_file_dedupe_range(
 877	struct file	*src_file,
 878	u64		loff,
 879	u64		len,
 880	struct file	*dst_file,
 881	u64		dst_loff)
 882{
 883	struct inode	*srci = file_inode(src_file);
 884	u64		max_dedupe;
 885	int		error;
 886
 887	/*
 888	 * Since we have to read all these pages in to compare them, cut
 889	 * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
 890	 * That means we won't do more than MAX_RW_COUNT IO per request.
 891	 */
 892	max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
 893	if (len > max_dedupe)
 894		len = max_dedupe;
 895	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
 896				     len, true);
 897	if (error)
 898		return error;
 899	return len;
 900}
 901
 902STATIC int
 903xfs_file_open(
 904	struct inode	*inode,
 905	struct file	*file)
 906{
 907	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
 908		return -EFBIG;
 909	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 910		return -EIO;
 911	file->f_mode |= FMODE_NOWAIT;
 912	return 0;
 913}
 914
 915STATIC int
 916xfs_dir_open(
 917	struct inode	*inode,
 918	struct file	*file)
 919{
 920	struct xfs_inode *ip = XFS_I(inode);
 921	int		mode;
 922	int		error;
 923
 924	error = xfs_file_open(inode, file);
 925	if (error)
 926		return error;
 927
 928	/*
 929	 * If there are any blocks, read-ahead block 0 as we're almost
 930	 * certain to have the next operation be a read there.
 931	 */
 932	mode = xfs_ilock_data_map_shared(ip);
 933	if (ip->i_d.di_nextents > 0)
 934		error = xfs_dir3_data_readahead(ip, 0, -1);
 935	xfs_iunlock(ip, mode);
 936	return error;
 937}
 938
 939STATIC int
 940xfs_file_release(
 941	struct inode	*inode,
 942	struct file	*filp)
 943{
 944	return xfs_release(XFS_I(inode));
 945}
 946
 947STATIC int
 948xfs_file_readdir(
 949	struct file	*file,
 950	struct dir_context *ctx)
 951{
 952	struct inode	*inode = file_inode(file);
 953	xfs_inode_t	*ip = XFS_I(inode);
 954	size_t		bufsize;
 955
 956	/*
 957	 * The Linux API doesn't pass down the total size of the buffer
 958	 * we read into down to the filesystem.  With the filldir concept
 959	 * it's not needed for correct information, but the XFS dir2 leaf
 960	 * code wants an estimate of the buffer size to calculate it's
 961	 * readahead window and size the buffers used for mapping to
 962	 * physical blocks.
 963	 *
 964	 * Try to give it an estimate that's good enough, maybe at some
 965	 * point we can change the ->readdir prototype to include the
 966	 * buffer size.  For now we use the current glibc buffer size.
 967	 */
 968	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969
 970	return xfs_readdir(NULL, ip, ctx, bufsize);
 
 
 
 971}
 972
 973STATIC loff_t
 974xfs_file_llseek(
 975	struct file	*file,
 976	loff_t		offset,
 977	int		whence)
 978{
 979	struct inode		*inode = file->f_mapping->host;
 
 
 
 
 
 980
 981	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
 982		return -EIO;
 983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 984	switch (whence) {
 985	default:
 
 
 986		return generic_file_llseek(file, offset, whence);
 987	case SEEK_HOLE:
 988		offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
 989		break;
 990	case SEEK_DATA:
 991		offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
 992		break;
 
 993	}
 994
 995	if (offset < 0)
 996		return offset;
 997	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 998}
 999
1000/*
1001 * Locking for serialisation of IO during page faults. This results in a lock
1002 * ordering of:
1003 *
1004 * mmap_sem (MM)
1005 *   sb_start_pagefault(vfs, freeze)
1006 *     i_mmaplock (XFS - truncate serialisation)
1007 *       page_lock (MM)
1008 *         i_lock (XFS - extent map serialisation)
1009 */
1010static int
1011__xfs_filemap_fault(
1012	struct vm_fault		*vmf,
1013	enum page_entry_size	pe_size,
1014	bool			write_fault)
 
 
 
 
 
 
1015{
1016	struct inode		*inode = file_inode(vmf->vma->vm_file);
1017	struct xfs_inode	*ip = XFS_I(inode);
1018	int			ret;
1019
1020	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1021
1022	if (write_fault) {
1023		sb_start_pagefault(inode->i_sb);
1024		file_update_time(vmf->vma->vm_file);
1025	}
1026
 
 
1027	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1028	if (IS_DAX(inode)) {
1029		pfn_t pfn;
1030
1031		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1032		if (ret & VM_FAULT_NEEDDSYNC)
1033			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1034	} else {
1035		if (write_fault)
1036			ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1037		else
1038			ret = filemap_fault(vmf);
1039	}
 
1040	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
1041
1042	if (write_fault)
1043		sb_end_pagefault(inode->i_sb);
1044	return ret;
1045}
1046
1047static int
1048xfs_filemap_fault(
 
1049	struct vm_fault		*vmf)
1050{
 
 
 
 
 
1051	/* DAX can shortcut the normal fault path on write faults! */
1052	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1053			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1054			(vmf->flags & FAULT_FLAG_WRITE));
 
 
 
 
 
 
 
 
1055}
1056
1057static int
1058xfs_filemap_huge_fault(
1059	struct vm_fault		*vmf,
1060	enum page_entry_size	pe_size)
 
 
 
 
 
 
 
 
 
1061{
1062	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
 
 
 
 
1063		return VM_FAULT_FALLBACK;
1064
1065	/* DAX can shortcut the normal fault path on write faults! */
1066	return __xfs_filemap_fault(vmf, pe_size,
1067			(vmf->flags & FAULT_FLAG_WRITE));
1068}
1069
1070static int
1071xfs_filemap_page_mkwrite(
1072	struct vm_fault		*vmf)
1073{
1074	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
 
 
 
 
 
 
 
 
1075}
1076
1077/*
1078 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1079 * on write faults. In reality, it needs to serialise against truncate and
1080 * prepare memory for writing so handle is as standard write fault.
 
1081 */
1082static int
1083xfs_filemap_pfn_mkwrite(
 
1084	struct vm_fault		*vmf)
1085{
1086
1087	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088}
1089
1090static const struct vm_operations_struct xfs_file_vm_ops = {
1091	.fault		= xfs_filemap_fault,
1092	.huge_fault	= xfs_filemap_huge_fault,
1093	.map_pages	= filemap_map_pages,
1094	.page_mkwrite	= xfs_filemap_page_mkwrite,
1095	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1096};
1097
1098STATIC int
1099xfs_file_mmap(
1100	struct file	*filp,
1101	struct vm_area_struct *vma)
1102{
1103	/*
1104	 * We don't support synchronous mappings for non-DAX files. At least
1105	 * until someone comes with a sensible use case.
1106	 */
1107	if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1108		return -EOPNOTSUPP;
1109
1110	file_accessed(filp);
1111	vma->vm_ops = &xfs_file_vm_ops;
1112	if (IS_DAX(file_inode(filp)))
1113		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1114	return 0;
1115}
1116
1117const struct file_operations xfs_file_operations = {
1118	.llseek		= xfs_file_llseek,
1119	.read_iter	= xfs_file_read_iter,
1120	.write_iter	= xfs_file_write_iter,
1121	.splice_read	= generic_file_splice_read,
1122	.splice_write	= iter_file_splice_write,
1123	.unlocked_ioctl	= xfs_file_ioctl,
1124#ifdef CONFIG_COMPAT
1125	.compat_ioctl	= xfs_file_compat_ioctl,
1126#endif
1127	.mmap		= xfs_file_mmap,
1128	.mmap_supported_flags = MAP_SYNC,
1129	.open		= xfs_file_open,
1130	.release	= xfs_file_release,
1131	.fsync		= xfs_file_fsync,
1132	.get_unmapped_area = thp_get_unmapped_area,
1133	.fallocate	= xfs_file_fallocate,
1134	.clone_file_range = xfs_file_clone_range,
1135	.dedupe_file_range = xfs_file_dedupe_range,
1136};
1137
1138const struct file_operations xfs_dir_file_operations = {
1139	.open		= xfs_dir_open,
1140	.read		= generic_read_dir,
1141	.iterate_shared	= xfs_file_readdir,
1142	.llseek		= generic_file_llseek,
1143	.unlocked_ioctl	= xfs_file_ioctl,
1144#ifdef CONFIG_COMPAT
1145	.compat_ioctl	= xfs_file_compat_ioctl,
1146#endif
1147	.fsync		= xfs_dir_fsync,
1148};
v4.10.11
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_da_format.h"
  26#include "xfs_da_btree.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_bmap.h"
  31#include "xfs_bmap_util.h"
  32#include "xfs_error.h"
  33#include "xfs_dir2.h"
  34#include "xfs_dir2_priv.h"
  35#include "xfs_ioctl.h"
  36#include "xfs_trace.h"
  37#include "xfs_log.h"
  38#include "xfs_icache.h"
  39#include "xfs_pnfs.h"
  40#include "xfs_iomap.h"
  41#include "xfs_reflink.h"
  42
  43#include <linux/dcache.h>
  44#include <linux/falloc.h>
  45#include <linux/pagevec.h>
  46#include <linux/backing-dev.h>
 
  47
  48static const struct vm_operations_struct xfs_file_vm_ops;
  49
  50/*
  51 * Clear the specified ranges to zero through either the pagecache or DAX.
  52 * Holes and unwritten extents will be left as-is as they already are zeroed.
  53 */
  54int
  55xfs_zero_range(
  56	struct xfs_inode	*ip,
  57	xfs_off_t		pos,
  58	xfs_off_t		count,
  59	bool			*did_zero)
  60{
  61	return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
  62}
  63
  64int
  65xfs_update_prealloc_flags(
  66	struct xfs_inode	*ip,
  67	enum xfs_prealloc_flags	flags)
  68{
  69	struct xfs_trans	*tp;
  70	int			error;
  71
  72	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
  73			0, 0, 0, &tp);
  74	if (error)
  75		return error;
  76
  77	xfs_ilock(ip, XFS_ILOCK_EXCL);
  78	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  79
  80	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  81		VFS_I(ip)->i_mode &= ~S_ISUID;
  82		if (VFS_I(ip)->i_mode & S_IXGRP)
  83			VFS_I(ip)->i_mode &= ~S_ISGID;
  84		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  85	}
  86
  87	if (flags & XFS_PREALLOC_SET)
  88		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  89	if (flags & XFS_PREALLOC_CLEAR)
  90		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  91
  92	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  93	if (flags & XFS_PREALLOC_SYNC)
  94		xfs_trans_set_sync(tp);
  95	return xfs_trans_commit(tp);
  96}
  97
  98/*
  99 * Fsync operations on directories are much simpler than on regular files,
 100 * as there is no file data to flush, and thus also no need for explicit
 101 * cache flush operations, and there are no non-transaction metadata updates
 102 * on directories either.
 103 */
 104STATIC int
 105xfs_dir_fsync(
 106	struct file		*file,
 107	loff_t			start,
 108	loff_t			end,
 109	int			datasync)
 110{
 111	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 112	struct xfs_mount	*mp = ip->i_mount;
 113	xfs_lsn_t		lsn = 0;
 114
 115	trace_xfs_dir_fsync(ip);
 116
 117	xfs_ilock(ip, XFS_ILOCK_SHARED);
 118	if (xfs_ipincount(ip))
 119		lsn = ip->i_itemp->ili_last_lsn;
 120	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 121
 122	if (!lsn)
 123		return 0;
 124	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 125}
 126
 127STATIC int
 128xfs_file_fsync(
 129	struct file		*file,
 130	loff_t			start,
 131	loff_t			end,
 132	int			datasync)
 133{
 134	struct inode		*inode = file->f_mapping->host;
 135	struct xfs_inode	*ip = XFS_I(inode);
 136	struct xfs_mount	*mp = ip->i_mount;
 137	int			error = 0;
 138	int			log_flushed = 0;
 139	xfs_lsn_t		lsn = 0;
 140
 141	trace_xfs_file_fsync(ip);
 142
 143	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 144	if (error)
 145		return error;
 146
 147	if (XFS_FORCED_SHUTDOWN(mp))
 148		return -EIO;
 149
 150	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 151
 152	/*
 153	 * If we have an RT and/or log subvolume we need to make sure to flush
 154	 * the write cache the device used for file data first.  This is to
 155	 * ensure newly written file data make it to disk before logging the new
 156	 * inode size in case of an extending write.
 157	 */
 158	if (XFS_IS_REALTIME_INODE(ip))
 159		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
 160	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 161		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 162
 163	/*
 164	 * All metadata updates are logged, which means that we just have to
 165	 * flush the log up to the latest LSN that touched the inode. If we have
 166	 * concurrent fsync/fdatasync() calls, we need them to all block on the
 167	 * log force before we clear the ili_fsync_fields field. This ensures
 168	 * that we don't get a racing sync operation that does not wait for the
 169	 * metadata to hit the journal before returning. If we race with
 170	 * clearing the ili_fsync_fields, then all that will happen is the log
 171	 * force will do nothing as the lsn will already be on disk. We can't
 172	 * race with setting ili_fsync_fields because that is done under
 173	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
 174	 * until after the ili_fsync_fields is cleared.
 175	 */
 176	xfs_ilock(ip, XFS_ILOCK_SHARED);
 177	if (xfs_ipincount(ip)) {
 178		if (!datasync ||
 179		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
 180			lsn = ip->i_itemp->ili_last_lsn;
 181	}
 182
 183	if (lsn) {
 184		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
 185		ip->i_itemp->ili_fsync_fields = 0;
 186	}
 187	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 188
 189	/*
 190	 * If we only have a single device, and the log force about was
 191	 * a no-op we might have to flush the data device cache here.
 192	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 193	 * an already allocated file and thus do not have any metadata to
 194	 * commit.
 195	 */
 196	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 197	    mp->m_logdev_targp == mp->m_ddev_targp)
 198		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 199
 200	return error;
 201}
 202
 203STATIC ssize_t
 204xfs_file_dio_aio_read(
 205	struct kiocb		*iocb,
 206	struct iov_iter		*to)
 207{
 208	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 209	size_t			count = iov_iter_count(to);
 210	ssize_t			ret;
 211
 212	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
 213
 214	if (!count)
 215		return 0; /* skip atime */
 216
 217	file_accessed(iocb->ki_filp);
 218
 219	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 220	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
 221	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 222
 223	return ret;
 224}
 225
 226static noinline ssize_t
 227xfs_file_dax_read(
 228	struct kiocb		*iocb,
 229	struct iov_iter		*to)
 230{
 231	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 232	size_t			count = iov_iter_count(to);
 233	ssize_t			ret = 0;
 234
 235	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
 236
 237	if (!count)
 238		return 0; /* skip atime */
 239
 240	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 
 
 
 
 
 241	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
 242	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 243
 244	file_accessed(iocb->ki_filp);
 245	return ret;
 246}
 247
 248STATIC ssize_t
 249xfs_file_buffered_aio_read(
 250	struct kiocb		*iocb,
 251	struct iov_iter		*to)
 252{
 253	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 254	ssize_t			ret;
 255
 256	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 257
 258	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 
 
 
 
 259	ret = generic_file_read_iter(iocb, to);
 260	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 261
 262	return ret;
 263}
 264
 265STATIC ssize_t
 266xfs_file_read_iter(
 267	struct kiocb		*iocb,
 268	struct iov_iter		*to)
 269{
 270	struct inode		*inode = file_inode(iocb->ki_filp);
 271	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 272	ssize_t			ret = 0;
 273
 274	XFS_STATS_INC(mp, xs_read_calls);
 275
 276	if (XFS_FORCED_SHUTDOWN(mp))
 277		return -EIO;
 278
 279	if (IS_DAX(inode))
 280		ret = xfs_file_dax_read(iocb, to);
 281	else if (iocb->ki_flags & IOCB_DIRECT)
 282		ret = xfs_file_dio_aio_read(iocb, to);
 283	else
 284		ret = xfs_file_buffered_aio_read(iocb, to);
 285
 286	if (ret > 0)
 287		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 288	return ret;
 289}
 290
 291/*
 292 * Zero any on disk space between the current EOF and the new, larger EOF.
 293 *
 294 * This handles the normal case of zeroing the remainder of the last block in
 295 * the file and the unusual case of zeroing blocks out beyond the size of the
 296 * file.  This second case only happens with fixed size extents and when the
 297 * system crashes before the inode size was updated but after blocks were
 298 * allocated.
 299 *
 300 * Expects the iolock to be held exclusive, and will take the ilock internally.
 301 */
 302int					/* error (positive) */
 303xfs_zero_eof(
 304	struct xfs_inode	*ip,
 305	xfs_off_t		offset,		/* starting I/O offset */
 306	xfs_fsize_t		isize,		/* current inode size */
 307	bool			*did_zeroing)
 308{
 309	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 310	ASSERT(offset > isize);
 311
 312	trace_xfs_zero_eof(ip, isize, offset - isize);
 313	return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
 314}
 315
 316/*
 317 * Common pre-write limit and setup checks.
 318 *
 319 * Called with the iolocked held either shared and exclusive according to
 320 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 321 * if called for a direct write beyond i_size.
 322 */
 323STATIC ssize_t
 324xfs_file_aio_write_checks(
 325	struct kiocb		*iocb,
 326	struct iov_iter		*from,
 327	int			*iolock)
 328{
 329	struct file		*file = iocb->ki_filp;
 330	struct inode		*inode = file->f_mapping->host;
 331	struct xfs_inode	*ip = XFS_I(inode);
 332	ssize_t			error = 0;
 333	size_t			count = iov_iter_count(from);
 334	bool			drained_dio = false;
 
 335
 336restart:
 337	error = generic_write_checks(iocb, from);
 338	if (error <= 0)
 339		return error;
 340
 341	error = xfs_break_layouts(inode, iolock);
 342	if (error)
 343		return error;
 344
 345	/*
 346	 * For changing security info in file_remove_privs() we need i_rwsem
 347	 * exclusively.
 348	 */
 349	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 350		xfs_iunlock(ip, *iolock);
 351		*iolock = XFS_IOLOCK_EXCL;
 352		xfs_ilock(ip, *iolock);
 353		goto restart;
 354	}
 355	/*
 356	 * If the offset is beyond the size of the file, we need to zero any
 357	 * blocks that fall between the existing EOF and the start of this
 358	 * write.  If zeroing is needed and we are currently holding the
 359	 * iolock shared, we need to update it to exclusive which implies
 360	 * having to redo all checks before.
 361	 *
 362	 * We need to serialise against EOF updates that occur in IO
 363	 * completions here. We want to make sure that nobody is changing the
 364	 * size while we do this check until we have placed an IO barrier (i.e.
 365	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
 366	 * The spinlock effectively forms a memory barrier once we have the
 367	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
 368	 * and hence be able to correctly determine if we need to run zeroing.
 369	 */
 370	spin_lock(&ip->i_flags_lock);
 371	if (iocb->ki_pos > i_size_read(inode)) {
 372		bool	zero = false;
 373
 374		spin_unlock(&ip->i_flags_lock);
 375		if (!drained_dio) {
 376			if (*iolock == XFS_IOLOCK_SHARED) {
 377				xfs_iunlock(ip, *iolock);
 378				*iolock = XFS_IOLOCK_EXCL;
 379				xfs_ilock(ip, *iolock);
 380				iov_iter_reexpand(from, count);
 381			}
 382			/*
 383			 * We now have an IO submission barrier in place, but
 384			 * AIO can do EOF updates during IO completion and hence
 385			 * we now need to wait for all of them to drain. Non-AIO
 386			 * DIO will have drained before we are given the
 387			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 388			 * no-op.
 389			 */
 390			inode_dio_wait(inode);
 391			drained_dio = true;
 392			goto restart;
 393		}
 394		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
 
 
 
 395		if (error)
 396			return error;
 397	} else
 398		spin_unlock(&ip->i_flags_lock);
 399
 400	/*
 401	 * Updating the timestamps will grab the ilock again from
 402	 * xfs_fs_dirty_inode, so we have to call it after dropping the
 403	 * lock above.  Eventually we should look into a way to avoid
 404	 * the pointless lock roundtrip.
 405	 */
 406	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
 407		error = file_update_time(file);
 408		if (error)
 409			return error;
 410	}
 411
 412	/*
 413	 * If we're writing the file then make sure to clear the setuid and
 414	 * setgid bits if the process is not being run by root.  This keeps
 415	 * people from modifying setuid and setgid binaries.
 416	 */
 417	if (!IS_NOSEC(inode))
 418		return file_remove_privs(file);
 419	return 0;
 420}
 421
 422static int
 423xfs_dio_write_end_io(
 424	struct kiocb		*iocb,
 425	ssize_t			size,
 426	unsigned		flags)
 427{
 428	struct inode		*inode = file_inode(iocb->ki_filp);
 429	struct xfs_inode	*ip = XFS_I(inode);
 430	loff_t			offset = iocb->ki_pos;
 431	bool			update_size = false;
 432	int			error = 0;
 433
 434	trace_xfs_end_io_direct_write(ip, offset, size);
 435
 436	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 437		return -EIO;
 438
 439	if (size <= 0)
 440		return size;
 441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442	/*
 443	 * We need to update the in-core inode size here so that we don't end up
 444	 * with the on-disk inode size being outside the in-core inode size. We
 445	 * have no other method of updating EOF for AIO, so always do it here
 446	 * if necessary.
 447	 *
 448	 * We need to lock the test/set EOF update as we can be racing with
 449	 * other IO completions here to update the EOF. Failing to serialise
 450	 * here can result in EOF moving backwards and Bad Things Happen when
 451	 * that occurs.
 452	 */
 453	spin_lock(&ip->i_flags_lock);
 454	if (offset + size > i_size_read(inode)) {
 455		i_size_write(inode, offset + size);
 456		update_size = true;
 
 
 
 457	}
 458	spin_unlock(&ip->i_flags_lock);
 459
 460	if (flags & IOMAP_DIO_COW) {
 461		error = xfs_reflink_end_cow(ip, offset, size);
 462		if (error)
 463			return error;
 464	}
 465
 466	if (flags & IOMAP_DIO_UNWRITTEN)
 467		error = xfs_iomap_write_unwritten(ip, offset, size);
 468	else if (update_size)
 469		error = xfs_setfilesize(ip, offset, size);
 470
 471	return error;
 472}
 473
 474/*
 475 * xfs_file_dio_aio_write - handle direct IO writes
 476 *
 477 * Lock the inode appropriately to prepare for and issue a direct IO write.
 478 * By separating it from the buffered write path we remove all the tricky to
 479 * follow locking changes and looping.
 480 *
 481 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 482 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 483 * pages are flushed out.
 484 *
 485 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 486 * allowing them to be done in parallel with reads and other direct IO writes.
 487 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 488 * needs to do sub-block zeroing and that requires serialisation against other
 489 * direct IOs to the same block. In this case we need to serialise the
 490 * submission of the unaligned IOs so that we don't get racing block zeroing in
 491 * the dio layer.  To avoid the problem with aio, we also need to wait for
 492 * outstanding IOs to complete so that unwritten extent conversion is completed
 493 * before we try to map the overlapping block. This is currently implemented by
 494 * hitting it with a big hammer (i.e. inode_dio_wait()).
 495 *
 496 * Returns with locks held indicated by @iolock and errors indicated by
 497 * negative return values.
 498 */
 499STATIC ssize_t
 500xfs_file_dio_aio_write(
 501	struct kiocb		*iocb,
 502	struct iov_iter		*from)
 503{
 504	struct file		*file = iocb->ki_filp;
 505	struct address_space	*mapping = file->f_mapping;
 506	struct inode		*inode = mapping->host;
 507	struct xfs_inode	*ip = XFS_I(inode);
 508	struct xfs_mount	*mp = ip->i_mount;
 509	ssize_t			ret = 0;
 510	int			unaligned_io = 0;
 511	int			iolock;
 512	size_t			count = iov_iter_count(from);
 513	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 514					mp->m_rtdev_targp : mp->m_ddev_targp;
 515
 516	/* DIO must be aligned to device logical sector size */
 517	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 518		return -EINVAL;
 519
 520	/*
 521	 * Don't take the exclusive iolock here unless the I/O is unaligned to
 522	 * the file system block size.  We don't need to consider the EOF
 523	 * extension case here because xfs_file_aio_write_checks() will relock
 524	 * the inode as necessary for EOF zeroing cases and fill out the new
 525	 * inode size as appropriate.
 526	 */
 527	if ((iocb->ki_pos & mp->m_blockmask) ||
 528	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
 529		unaligned_io = 1;
 530
 531		/*
 532		 * We can't properly handle unaligned direct I/O to reflink
 533		 * files yet, as we can't unshare a partial block.
 534		 */
 535		if (xfs_is_reflink_inode(ip)) {
 536			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
 537			return -EREMCHG;
 538		}
 539		iolock = XFS_IOLOCK_EXCL;
 540	} else {
 541		iolock = XFS_IOLOCK_SHARED;
 542	}
 543
 544	xfs_ilock(ip, iolock);
 
 
 
 
 
 545
 546	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 547	if (ret)
 548		goto out;
 549	count = iov_iter_count(from);
 550
 551	/*
 552	 * If we are doing unaligned IO, wait for all other IO to drain,
 553	 * otherwise demote the lock if we had to take the exclusive lock
 554	 * for other reasons in xfs_file_aio_write_checks.
 555	 */
 556	if (unaligned_io)
 557		inode_dio_wait(inode);
 558	else if (iolock == XFS_IOLOCK_EXCL) {
 
 
 
 
 
 
 559		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 560		iolock = XFS_IOLOCK_SHARED;
 561	}
 562
 563	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 564
 565	/* If this is a block-aligned directio CoW, remap immediately. */
 566	if (xfs_is_reflink_inode(ip) && !unaligned_io) {
 567		ret = xfs_reflink_allocate_cow_range(ip, iocb->ki_pos, count);
 568		if (ret)
 569			goto out;
 570	}
 571
 572	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
 573out:
 574	xfs_iunlock(ip, iolock);
 575
 576	/*
 577	 * No fallback to buffered IO on errors for XFS, direct IO will either
 578	 * complete fully or fail.
 579	 */
 580	ASSERT(ret < 0 || ret == count);
 581	return ret;
 582}
 583
 584static noinline ssize_t
 585xfs_file_dax_write(
 586	struct kiocb		*iocb,
 587	struct iov_iter		*from)
 588{
 589	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 590	struct xfs_inode	*ip = XFS_I(inode);
 591	int			iolock = XFS_IOLOCK_EXCL;
 592	ssize_t			ret, error = 0;
 593	size_t			count;
 594	loff_t			pos;
 595
 596	xfs_ilock(ip, iolock);
 
 
 
 
 
 
 597	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 598	if (ret)
 599		goto out;
 600
 601	pos = iocb->ki_pos;
 602	count = iov_iter_count(from);
 603
 604	trace_xfs_file_dax_write(ip, count, pos);
 605	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
 606	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 607		i_size_write(inode, iocb->ki_pos);
 608		error = xfs_setfilesize(ip, pos, ret);
 609	}
 610out:
 611	xfs_iunlock(ip, iolock);
 612	return error ? error : ret;
 613}
 614
 615STATIC ssize_t
 616xfs_file_buffered_aio_write(
 617	struct kiocb		*iocb,
 618	struct iov_iter		*from)
 619{
 620	struct file		*file = iocb->ki_filp;
 621	struct address_space	*mapping = file->f_mapping;
 622	struct inode		*inode = mapping->host;
 623	struct xfs_inode	*ip = XFS_I(inode);
 624	ssize_t			ret;
 625	int			enospc = 0;
 626	int			iolock;
 627
 
 
 
 628write_retry:
 629	iolock = XFS_IOLOCK_EXCL;
 630	xfs_ilock(ip, iolock);
 631
 632	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 633	if (ret)
 634		goto out;
 635
 636	/* We can write back this queue in page reclaim */
 637	current->backing_dev_info = inode_to_bdi(inode);
 638
 639	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 640	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
 641	if (likely(ret >= 0))
 642		iocb->ki_pos += ret;
 643
 644	/*
 645	 * If we hit a space limit, try to free up some lingering preallocated
 646	 * space before returning an error. In the case of ENOSPC, first try to
 647	 * write back all dirty inodes to free up some of the excess reserved
 648	 * metadata space. This reduces the chances that the eofblocks scan
 649	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 650	 * also behaves as a filter to prevent too many eofblocks scans from
 651	 * running at the same time.
 652	 */
 653	if (ret == -EDQUOT && !enospc) {
 654		xfs_iunlock(ip, iolock);
 655		enospc = xfs_inode_free_quota_eofblocks(ip);
 656		if (enospc)
 657			goto write_retry;
 658		enospc = xfs_inode_free_quota_cowblocks(ip);
 659		if (enospc)
 660			goto write_retry;
 661		iolock = 0;
 662	} else if (ret == -ENOSPC && !enospc) {
 663		struct xfs_eofblocks eofb = {0};
 664
 665		enospc = 1;
 666		xfs_flush_inodes(ip->i_mount);
 667
 668		xfs_iunlock(ip, iolock);
 669		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 670		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 
 671		goto write_retry;
 672	}
 673
 674	current->backing_dev_info = NULL;
 675out:
 676	if (iolock)
 677		xfs_iunlock(ip, iolock);
 678	return ret;
 679}
 680
 681STATIC ssize_t
 682xfs_file_write_iter(
 683	struct kiocb		*iocb,
 684	struct iov_iter		*from)
 685{
 686	struct file		*file = iocb->ki_filp;
 687	struct address_space	*mapping = file->f_mapping;
 688	struct inode		*inode = mapping->host;
 689	struct xfs_inode	*ip = XFS_I(inode);
 690	ssize_t			ret;
 691	size_t			ocount = iov_iter_count(from);
 692
 693	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 694
 695	if (ocount == 0)
 696		return 0;
 697
 698	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 699		return -EIO;
 700
 701	if (IS_DAX(inode))
 702		ret = xfs_file_dax_write(iocb, from);
 703	else if (iocb->ki_flags & IOCB_DIRECT) {
 704		/*
 705		 * Allow a directio write to fall back to a buffered
 706		 * write *only* in the case that we're doing a reflink
 707		 * CoW.  In all other directio scenarios we do not
 708		 * allow an operation to fall back to buffered mode.
 709		 */
 710		ret = xfs_file_dio_aio_write(iocb, from);
 711		if (ret == -EREMCHG)
 712			goto buffered;
 713	} else {
 714buffered:
 715		ret = xfs_file_buffered_aio_write(iocb, from);
 716	}
 717
 718	if (ret > 0) {
 719		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 720
 721		/* Handle various SYNC-type writes */
 722		ret = generic_write_sync(iocb, ret);
 723	}
 724	return ret;
 725}
 726
 727#define	XFS_FALLOC_FL_SUPPORTED						\
 728		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 729		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 730		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 731
 732STATIC long
 733xfs_file_fallocate(
 734	struct file		*file,
 735	int			mode,
 736	loff_t			offset,
 737	loff_t			len)
 738{
 739	struct inode		*inode = file_inode(file);
 740	struct xfs_inode	*ip = XFS_I(inode);
 741	long			error;
 742	enum xfs_prealloc_flags	flags = 0;
 743	uint			iolock = XFS_IOLOCK_EXCL;
 744	loff_t			new_size = 0;
 745	bool			do_file_insert = 0;
 746
 747	if (!S_ISREG(inode->i_mode))
 748		return -EINVAL;
 749	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 750		return -EOPNOTSUPP;
 751
 752	xfs_ilock(ip, iolock);
 753	error = xfs_break_layouts(inode, &iolock);
 754	if (error)
 755		goto out_unlock;
 756
 757	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 758	iolock |= XFS_MMAPLOCK_EXCL;
 759
 760	if (mode & FALLOC_FL_PUNCH_HOLE) {
 761		error = xfs_free_file_space(ip, offset, len);
 762		if (error)
 763			goto out_unlock;
 764	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
 765		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 766
 767		if (offset & blksize_mask || len & blksize_mask) {
 768			error = -EINVAL;
 769			goto out_unlock;
 770		}
 771
 772		/*
 773		 * There is no need to overlap collapse range with EOF,
 774		 * in which case it is effectively a truncate operation
 775		 */
 776		if (offset + len >= i_size_read(inode)) {
 777			error = -EINVAL;
 778			goto out_unlock;
 779		}
 780
 781		new_size = i_size_read(inode) - len;
 782
 783		error = xfs_collapse_file_space(ip, offset, len);
 784		if (error)
 785			goto out_unlock;
 786	} else if (mode & FALLOC_FL_INSERT_RANGE) {
 787		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 
 788
 789		new_size = i_size_read(inode) + len;
 790		if (offset & blksize_mask || len & blksize_mask) {
 791			error = -EINVAL;
 792			goto out_unlock;
 793		}
 794
 795		/* check the new inode size does not wrap through zero */
 796		if (new_size > inode->i_sb->s_maxbytes) {
 
 
 
 797			error = -EFBIG;
 798			goto out_unlock;
 799		}
 
 800
 801		/* Offset should be less than i_size */
 802		if (offset >= i_size_read(inode)) {
 803			error = -EINVAL;
 804			goto out_unlock;
 805		}
 806		do_file_insert = 1;
 807	} else {
 808		flags |= XFS_PREALLOC_SET;
 809
 810		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 811		    offset + len > i_size_read(inode)) {
 812			new_size = offset + len;
 813			error = inode_newsize_ok(inode, new_size);
 814			if (error)
 815				goto out_unlock;
 816		}
 817
 818		if (mode & FALLOC_FL_ZERO_RANGE)
 819			error = xfs_zero_file_space(ip, offset, len);
 820		else {
 821			if (mode & FALLOC_FL_UNSHARE_RANGE) {
 822				error = xfs_reflink_unshare(ip, offset, len);
 823				if (error)
 824					goto out_unlock;
 825			}
 826			error = xfs_alloc_file_space(ip, offset, len,
 827						     XFS_BMAPI_PREALLOC);
 828		}
 829		if (error)
 830			goto out_unlock;
 831	}
 832
 833	if (file->f_flags & O_DSYNC)
 834		flags |= XFS_PREALLOC_SYNC;
 835
 836	error = xfs_update_prealloc_flags(ip, flags);
 837	if (error)
 838		goto out_unlock;
 839
 840	/* Change file size if needed */
 841	if (new_size) {
 842		struct iattr iattr;
 843
 844		iattr.ia_valid = ATTR_SIZE;
 845		iattr.ia_size = new_size;
 846		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
 847		if (error)
 848			goto out_unlock;
 849	}
 850
 851	/*
 852	 * Perform hole insertion now that the file size has been
 853	 * updated so that if we crash during the operation we don't
 854	 * leave shifted extents past EOF and hence losing access to
 855	 * the data that is contained within them.
 856	 */
 857	if (do_file_insert)
 858		error = xfs_insert_file_space(ip, offset, len);
 859
 860out_unlock:
 861	xfs_iunlock(ip, iolock);
 862	return error;
 863}
 864
 865STATIC int
 866xfs_file_clone_range(
 867	struct file	*file_in,
 868	loff_t		pos_in,
 869	struct file	*file_out,
 870	loff_t		pos_out,
 871	u64		len)
 872{
 873	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
 874				     len, false);
 875}
 876
 877STATIC ssize_t
 878xfs_file_dedupe_range(
 879	struct file	*src_file,
 880	u64		loff,
 881	u64		len,
 882	struct file	*dst_file,
 883	u64		dst_loff)
 884{
 
 
 885	int		error;
 886
 
 
 
 
 
 
 
 
 887	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
 888				     len, true);
 889	if (error)
 890		return error;
 891	return len;
 892}
 893
 894STATIC int
 895xfs_file_open(
 896	struct inode	*inode,
 897	struct file	*file)
 898{
 899	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
 900		return -EFBIG;
 901	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 902		return -EIO;
 
 903	return 0;
 904}
 905
 906STATIC int
 907xfs_dir_open(
 908	struct inode	*inode,
 909	struct file	*file)
 910{
 911	struct xfs_inode *ip = XFS_I(inode);
 912	int		mode;
 913	int		error;
 914
 915	error = xfs_file_open(inode, file);
 916	if (error)
 917		return error;
 918
 919	/*
 920	 * If there are any blocks, read-ahead block 0 as we're almost
 921	 * certain to have the next operation be a read there.
 922	 */
 923	mode = xfs_ilock_data_map_shared(ip);
 924	if (ip->i_d.di_nextents > 0)
 925		error = xfs_dir3_data_readahead(ip, 0, -1);
 926	xfs_iunlock(ip, mode);
 927	return error;
 928}
 929
 930STATIC int
 931xfs_file_release(
 932	struct inode	*inode,
 933	struct file	*filp)
 934{
 935	return xfs_release(XFS_I(inode));
 936}
 937
 938STATIC int
 939xfs_file_readdir(
 940	struct file	*file,
 941	struct dir_context *ctx)
 942{
 943	struct inode	*inode = file_inode(file);
 944	xfs_inode_t	*ip = XFS_I(inode);
 945	size_t		bufsize;
 946
 947	/*
 948	 * The Linux API doesn't pass down the total size of the buffer
 949	 * we read into down to the filesystem.  With the filldir concept
 950	 * it's not needed for correct information, but the XFS dir2 leaf
 951	 * code wants an estimate of the buffer size to calculate it's
 952	 * readahead window and size the buffers used for mapping to
 953	 * physical blocks.
 954	 *
 955	 * Try to give it an estimate that's good enough, maybe at some
 956	 * point we can change the ->readdir prototype to include the
 957	 * buffer size.  For now we use the current glibc buffer size.
 958	 */
 959	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
 960
 961	return xfs_readdir(ip, ctx, bufsize);
 962}
 963
 964/*
 965 * This type is designed to indicate the type of offset we would like
 966 * to search from page cache for xfs_seek_hole_data().
 967 */
 968enum {
 969	HOLE_OFF = 0,
 970	DATA_OFF,
 971};
 972
 973/*
 974 * Lookup the desired type of offset from the given page.
 975 *
 976 * On success, return true and the offset argument will point to the
 977 * start of the region that was found.  Otherwise this function will
 978 * return false and keep the offset argument unchanged.
 979 */
 980STATIC bool
 981xfs_lookup_buffer_offset(
 982	struct page		*page,
 983	loff_t			*offset,
 984	unsigned int		type)
 985{
 986	loff_t			lastoff = page_offset(page);
 987	bool			found = false;
 988	struct buffer_head	*bh, *head;
 989
 990	bh = head = page_buffers(page);
 991	do {
 992		/*
 993		 * Unwritten extents that have data in the page
 994		 * cache covering them can be identified by the
 995		 * BH_Unwritten state flag.  Pages with multiple
 996		 * buffers might have a mix of holes, data and
 997		 * unwritten extents - any buffer with valid
 998		 * data in it should have BH_Uptodate flag set
 999		 * on it.
1000		 */
1001		if (buffer_unwritten(bh) ||
1002		    buffer_uptodate(bh)) {
1003			if (type == DATA_OFF)
1004				found = true;
1005		} else {
1006			if (type == HOLE_OFF)
1007				found = true;
1008		}
1009
1010		if (found) {
1011			*offset = lastoff;
1012			break;
1013		}
1014		lastoff += bh->b_size;
1015	} while ((bh = bh->b_this_page) != head);
1016
1017	return found;
1018}
1019
1020/*
1021 * This routine is called to find out and return a data or hole offset
1022 * from the page cache for unwritten extents according to the desired
1023 * type for xfs_seek_hole_data().
1024 *
1025 * The argument offset is used to tell where we start to search from the
1026 * page cache.  Map is used to figure out the end points of the range to
1027 * lookup pages.
1028 *
1029 * Return true if the desired type of offset was found, and the argument
1030 * offset is filled with that address.  Otherwise, return false and keep
1031 * offset unchanged.
1032 */
1033STATIC bool
1034xfs_find_get_desired_pgoff(
1035	struct inode		*inode,
1036	struct xfs_bmbt_irec	*map,
1037	unsigned int		type,
1038	loff_t			*offset)
1039{
1040	struct xfs_inode	*ip = XFS_I(inode);
1041	struct xfs_mount	*mp = ip->i_mount;
1042	struct pagevec		pvec;
1043	pgoff_t			index;
1044	pgoff_t			end;
1045	loff_t			endoff;
1046	loff_t			startoff = *offset;
1047	loff_t			lastoff = startoff;
1048	bool			found = false;
1049
1050	pagevec_init(&pvec, 0);
1051
1052	index = startoff >> PAGE_SHIFT;
1053	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1054	end = endoff >> PAGE_SHIFT;
1055	do {
1056		int		want;
1057		unsigned	nr_pages;
1058		unsigned int	i;
1059
1060		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1061		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1062					  want);
1063		/*
1064		 * No page mapped into given range.  If we are searching holes
1065		 * and if this is the first time we got into the loop, it means
1066		 * that the given offset is landed in a hole, return it.
1067		 *
1068		 * If we have already stepped through some block buffers to find
1069		 * holes but they all contains data.  In this case, the last
1070		 * offset is already updated and pointed to the end of the last
1071		 * mapped page, if it does not reach the endpoint to search,
1072		 * that means there should be a hole between them.
1073		 */
1074		if (nr_pages == 0) {
1075			/* Data search found nothing */
1076			if (type == DATA_OFF)
1077				break;
1078
1079			ASSERT(type == HOLE_OFF);
1080			if (lastoff == startoff || lastoff < endoff) {
1081				found = true;
1082				*offset = lastoff;
1083			}
1084			break;
1085		}
1086
1087		/*
1088		 * At lease we found one page.  If this is the first time we
1089		 * step into the loop, and if the first page index offset is
1090		 * greater than the given search offset, a hole was found.
1091		 */
1092		if (type == HOLE_OFF && lastoff == startoff &&
1093		    lastoff < page_offset(pvec.pages[0])) {
1094			found = true;
1095			break;
1096		}
1097
1098		for (i = 0; i < nr_pages; i++) {
1099			struct page	*page = pvec.pages[i];
1100			loff_t		b_offset;
1101
1102			/*
1103			 * At this point, the page may be truncated or
1104			 * invalidated (changing page->mapping to NULL),
1105			 * or even swizzled back from swapper_space to tmpfs
1106			 * file mapping. However, page->index will not change
1107			 * because we have a reference on the page.
1108			 *
1109			 * Searching done if the page index is out of range.
1110			 * If the current offset is not reaches the end of
1111			 * the specified search range, there should be a hole
1112			 * between them.
1113			 */
1114			if (page->index > end) {
1115				if (type == HOLE_OFF && lastoff < endoff) {
1116					*offset = lastoff;
1117					found = true;
1118				}
1119				goto out;
1120			}
1121
1122			lock_page(page);
1123			/*
1124			 * Page truncated or invalidated(page->mapping == NULL).
1125			 * We can freely skip it and proceed to check the next
1126			 * page.
1127			 */
1128			if (unlikely(page->mapping != inode->i_mapping)) {
1129				unlock_page(page);
1130				continue;
1131			}
1132
1133			if (!page_has_buffers(page)) {
1134				unlock_page(page);
1135				continue;
1136			}
1137
1138			found = xfs_lookup_buffer_offset(page, &b_offset, type);
1139			if (found) {
1140				/*
1141				 * The found offset may be less than the start
1142				 * point to search if this is the first time to
1143				 * come here.
1144				 */
1145				*offset = max_t(loff_t, startoff, b_offset);
1146				unlock_page(page);
1147				goto out;
1148			}
1149
1150			/*
1151			 * We either searching data but nothing was found, or
1152			 * searching hole but found a data buffer.  In either
1153			 * case, probably the next page contains the desired
1154			 * things, update the last offset to it so.
1155			 */
1156			lastoff = page_offset(page) + PAGE_SIZE;
1157			unlock_page(page);
1158		}
1159
1160		/*
1161		 * The number of returned pages less than our desired, search
1162		 * done.  In this case, nothing was found for searching data,
1163		 * but we found a hole behind the last offset.
1164		 */
1165		if (nr_pages < want) {
1166			if (type == HOLE_OFF) {
1167				*offset = lastoff;
1168				found = true;
1169			}
1170			break;
1171		}
1172
1173		index = pvec.pages[i - 1]->index + 1;
1174		pagevec_release(&pvec);
1175	} while (index <= end);
1176
1177out:
1178	pagevec_release(&pvec);
1179	return found;
1180}
1181
1182/*
1183 * caller must lock inode with xfs_ilock_data_map_shared,
1184 * can we craft an appropriate ASSERT?
1185 *
1186 * end is because the VFS-level lseek interface is defined such that any
1187 * offset past i_size shall return -ENXIO, but we use this for quota code
1188 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1189 */
1190loff_t
1191__xfs_seek_hole_data(
1192	struct inode		*inode,
1193	loff_t			start,
1194	loff_t			end,
1195	int			whence)
1196{
1197	struct xfs_inode	*ip = XFS_I(inode);
1198	struct xfs_mount	*mp = ip->i_mount;
1199	loff_t			uninitialized_var(offset);
1200	xfs_fileoff_t		fsbno;
1201	xfs_filblks_t		lastbno;
1202	int			error;
1203
1204	if (start >= end) {
1205		error = -ENXIO;
1206		goto out_error;
1207	}
1208
1209	/*
1210	 * Try to read extents from the first block indicated
1211	 * by fsbno to the end block of the file.
1212	 */
1213	fsbno = XFS_B_TO_FSBT(mp, start);
1214	lastbno = XFS_B_TO_FSB(mp, end);
1215
1216	for (;;) {
1217		struct xfs_bmbt_irec	map[2];
1218		int			nmap = 2;
1219		unsigned int		i;
1220
1221		error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
1222				       XFS_BMAPI_ENTIRE);
1223		if (error)
1224			goto out_error;
1225
1226		/* No extents at given offset, must be beyond EOF */
1227		if (nmap == 0) {
1228			error = -ENXIO;
1229			goto out_error;
1230		}
1231
1232		for (i = 0; i < nmap; i++) {
1233			offset = max_t(loff_t, start,
1234				       XFS_FSB_TO_B(mp, map[i].br_startoff));
1235
1236			/* Landed in the hole we wanted? */
1237			if (whence == SEEK_HOLE &&
1238			    map[i].br_startblock == HOLESTARTBLOCK)
1239				goto out;
1240
1241			/* Landed in the data extent we wanted? */
1242			if (whence == SEEK_DATA &&
1243			    (map[i].br_startblock == DELAYSTARTBLOCK ||
1244			     (map[i].br_state == XFS_EXT_NORM &&
1245			      !isnullstartblock(map[i].br_startblock))))
1246				goto out;
1247
1248			/*
1249			 * Landed in an unwritten extent, try to search
1250			 * for hole or data from page cache.
1251			 */
1252			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1253				if (xfs_find_get_desired_pgoff(inode, &map[i],
1254				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1255							&offset))
1256					goto out;
1257			}
1258		}
1259
1260		/*
1261		 * We only received one extent out of the two requested. This
1262		 * means we've hit EOF and didn't find what we are looking for.
1263		 */
1264		if (nmap == 1) {
1265			/*
1266			 * If we were looking for a hole, set offset to
1267			 * the end of the file (i.e., there is an implicit
1268			 * hole at the end of any file).
1269		 	 */
1270			if (whence == SEEK_HOLE) {
1271				offset = end;
1272				break;
1273			}
1274			/*
1275			 * If we were looking for data, it's nowhere to be found
1276			 */
1277			ASSERT(whence == SEEK_DATA);
1278			error = -ENXIO;
1279			goto out_error;
1280		}
1281
1282		ASSERT(i > 1);
1283
1284		/*
1285		 * Nothing was found, proceed to the next round of search
1286		 * if the next reading offset is not at or beyond EOF.
1287		 */
1288		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1289		start = XFS_FSB_TO_B(mp, fsbno);
1290		if (start >= end) {
1291			if (whence == SEEK_HOLE) {
1292				offset = end;
1293				break;
1294			}
1295			ASSERT(whence == SEEK_DATA);
1296			error = -ENXIO;
1297			goto out_error;
1298		}
1299	}
1300
1301out:
1302	/*
1303	 * If at this point we have found the hole we wanted, the returned
1304	 * offset may be bigger than the file size as it may be aligned to
1305	 * page boundary for unwritten extents.  We need to deal with this
1306	 * situation in particular.
1307	 */
1308	if (whence == SEEK_HOLE)
1309		offset = min_t(loff_t, offset, end);
1310
1311	return offset;
1312
1313out_error:
1314	return error;
1315}
1316
1317STATIC loff_t
1318xfs_seek_hole_data(
1319	struct file		*file,
1320	loff_t			start,
1321	int			whence)
1322{
1323	struct inode		*inode = file->f_mapping->host;
1324	struct xfs_inode	*ip = XFS_I(inode);
1325	struct xfs_mount	*mp = ip->i_mount;
1326	uint			lock;
1327	loff_t			offset, end;
1328	int			error = 0;
1329
1330	if (XFS_FORCED_SHUTDOWN(mp))
1331		return -EIO;
1332
1333	lock = xfs_ilock_data_map_shared(ip);
1334
1335	end = i_size_read(inode);
1336	offset = __xfs_seek_hole_data(inode, start, end, whence);
1337	if (offset < 0) {
1338		error = offset;
1339		goto out_unlock;
1340	}
1341
1342	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1343
1344out_unlock:
1345	xfs_iunlock(ip, lock);
1346
1347	if (error)
1348		return error;
1349	return offset;
1350}
1351
1352STATIC loff_t
1353xfs_file_llseek(
1354	struct file	*file,
1355	loff_t		offset,
1356	int		whence)
1357{
1358	switch (whence) {
1359	case SEEK_END:
1360	case SEEK_CUR:
1361	case SEEK_SET:
1362		return generic_file_llseek(file, offset, whence);
1363	case SEEK_HOLE:
 
 
1364	case SEEK_DATA:
1365		return xfs_seek_hole_data(file, offset, whence);
1366	default:
1367		return -EINVAL;
1368	}
 
 
 
 
1369}
1370
1371/*
1372 * Locking for serialisation of IO during page faults. This results in a lock
1373 * ordering of:
1374 *
1375 * mmap_sem (MM)
1376 *   sb_start_pagefault(vfs, freeze)
1377 *     i_mmaplock (XFS - truncate serialisation)
1378 *       page_lock (MM)
1379 *         i_lock (XFS - extent map serialisation)
1380 */
1381
1382/*
1383 * mmap()d file has taken write protection fault and is being made writable. We
1384 * can set the page state up correctly for a writable page, which means we can
1385 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1386 * mapping.
1387 */
1388STATIC int
1389xfs_filemap_page_mkwrite(
1390	struct vm_area_struct	*vma,
1391	struct vm_fault		*vmf)
1392{
1393	struct inode		*inode = file_inode(vma->vm_file);
 
1394	int			ret;
1395
1396	trace_xfs_filemap_page_mkwrite(XFS_I(inode));
 
 
 
 
 
1397
1398	sb_start_pagefault(inode->i_sb);
1399	file_update_time(vma->vm_file);
1400	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
 
1401
1402	if (IS_DAX(inode)) {
1403		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
 
1404	} else {
1405		ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
1406		ret = block_page_mkwrite_return(ret);
 
 
1407	}
1408
1409	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1410	sb_end_pagefault(inode->i_sb);
1411
 
 
1412	return ret;
1413}
1414
1415STATIC int
1416xfs_filemap_fault(
1417	struct vm_area_struct	*vma,
1418	struct vm_fault		*vmf)
1419{
1420	struct inode		*inode = file_inode(vma->vm_file);
1421	int			ret;
1422
1423	trace_xfs_filemap_fault(XFS_I(inode));
1424
1425	/* DAX can shortcut the normal fault path on write faults! */
1426	if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1427		return xfs_filemap_page_mkwrite(vma, vmf);
1428
1429	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1430	if (IS_DAX(inode))
1431		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
1432	else
1433		ret = filemap_fault(vma, vmf);
1434	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1435
1436	return ret;
1437}
1438
1439/*
1440 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1441 * both read and write faults. Hence we need to handle both cases. There is no
1442 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1443 * handle both cases here. @flags carries the information on the type of fault
1444 * occuring.
1445 */
1446STATIC int
1447xfs_filemap_pmd_fault(
1448	struct vm_area_struct	*vma,
1449	unsigned long		addr,
1450	pmd_t			*pmd,
1451	unsigned int		flags)
1452{
1453	struct inode		*inode = file_inode(vma->vm_file);
1454	struct xfs_inode	*ip = XFS_I(inode);
1455	int			ret;
1456
1457	if (!IS_DAX(inode))
1458		return VM_FAULT_FALLBACK;
1459
1460	trace_xfs_filemap_pmd_fault(ip);
 
 
 
1461
1462	if (flags & FAULT_FLAG_WRITE) {
1463		sb_start_pagefault(inode->i_sb);
1464		file_update_time(vma->vm_file);
1465	}
1466
1467	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1468	ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
1469	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1470
1471	if (flags & FAULT_FLAG_WRITE)
1472		sb_end_pagefault(inode->i_sb);
1473
1474	return ret;
1475}
1476
1477/*
1478 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1479 * updates on write faults. In reality, it's need to serialise against
1480 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1481 * to ensure we serialise the fault barrier in place.
1482 */
1483static int
1484xfs_filemap_pfn_mkwrite(
1485	struct vm_area_struct	*vma,
1486	struct vm_fault		*vmf)
1487{
1488
1489	struct inode		*inode = file_inode(vma->vm_file);
1490	struct xfs_inode	*ip = XFS_I(inode);
1491	int			ret = VM_FAULT_NOPAGE;
1492	loff_t			size;
1493
1494	trace_xfs_filemap_pfn_mkwrite(ip);
1495
1496	sb_start_pagefault(inode->i_sb);
1497	file_update_time(vma->vm_file);
1498
1499	/* check if the faulting page hasn't raced with truncate */
1500	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1501	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502	if (vmf->pgoff >= size)
1503		ret = VM_FAULT_SIGBUS;
1504	else if (IS_DAX(inode))
1505		ret = dax_pfn_mkwrite(vma, vmf);
1506	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1507	sb_end_pagefault(inode->i_sb);
1508	return ret;
1509
1510}
1511
1512static const struct vm_operations_struct xfs_file_vm_ops = {
1513	.fault		= xfs_filemap_fault,
1514	.pmd_fault	= xfs_filemap_pmd_fault,
1515	.map_pages	= filemap_map_pages,
1516	.page_mkwrite	= xfs_filemap_page_mkwrite,
1517	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1518};
1519
1520STATIC int
1521xfs_file_mmap(
1522	struct file	*filp,
1523	struct vm_area_struct *vma)
1524{
 
 
 
 
 
 
 
1525	file_accessed(filp);
1526	vma->vm_ops = &xfs_file_vm_ops;
1527	if (IS_DAX(file_inode(filp)))
1528		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1529	return 0;
1530}
1531
1532const struct file_operations xfs_file_operations = {
1533	.llseek		= xfs_file_llseek,
1534	.read_iter	= xfs_file_read_iter,
1535	.write_iter	= xfs_file_write_iter,
1536	.splice_read	= generic_file_splice_read,
1537	.splice_write	= iter_file_splice_write,
1538	.unlocked_ioctl	= xfs_file_ioctl,
1539#ifdef CONFIG_COMPAT
1540	.compat_ioctl	= xfs_file_compat_ioctl,
1541#endif
1542	.mmap		= xfs_file_mmap,
 
1543	.open		= xfs_file_open,
1544	.release	= xfs_file_release,
1545	.fsync		= xfs_file_fsync,
1546	.get_unmapped_area = thp_get_unmapped_area,
1547	.fallocate	= xfs_file_fallocate,
1548	.clone_file_range = xfs_file_clone_range,
1549	.dedupe_file_range = xfs_file_dedupe_range,
1550};
1551
1552const struct file_operations xfs_dir_file_operations = {
1553	.open		= xfs_dir_open,
1554	.read		= generic_read_dir,
1555	.iterate_shared	= xfs_file_readdir,
1556	.llseek		= generic_file_llseek,
1557	.unlocked_ioctl	= xfs_file_ioctl,
1558#ifdef CONFIG_COMPAT
1559	.compat_ioctl	= xfs_file_compat_ioctl,
1560#endif
1561	.fsync		= xfs_dir_fsync,
1562};