Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
 
 
  13#include "xfs_inode.h"
  14#include "xfs_trans.h"
  15#include "xfs_inode_item.h"
  16#include "xfs_bmap.h"
  17#include "xfs_bmap_util.h"
 
  18#include "xfs_dir2.h"
  19#include "xfs_dir2_priv.h"
  20#include "xfs_ioctl.h"
  21#include "xfs_trace.h"
  22#include "xfs_log.h"
  23#include "xfs_icache.h"
  24#include "xfs_pnfs.h"
  25#include "xfs_iomap.h"
  26#include "xfs_reflink.h"
  27
  28#include <linux/dax.h>
  29#include <linux/falloc.h>
 
  30#include <linux/backing-dev.h>
  31#include <linux/mman.h>
  32#include <linux/fadvise.h>
  33#include <linux/mount.h>
  34
  35static const struct vm_operations_struct xfs_file_vm_ops;
  36
  37/*
  38 * Decide if the given file range is aligned to the size of the fundamental
  39 * allocation unit for the file.
  40 */
  41static bool
  42xfs_is_falloc_aligned(
  43	struct xfs_inode	*ip,
  44	loff_t			pos,
  45	long long int		len)
  46{
  47	struct xfs_mount	*mp = ip->i_mount;
  48	uint64_t		mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49
  50	if (XFS_IS_REALTIME_INODE(ip)) {
  51		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
  52			u64	rextbytes;
  53			u32	mod;
  54
  55			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
  56			div_u64_rem(pos, rextbytes, &mod);
  57			if (mod)
  58				return false;
  59			div_u64_rem(len, rextbytes, &mod);
  60			return mod == 0;
  61		}
  62		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
  63	} else {
  64		mask = mp->m_sb.sb_blocksize - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65	}
  66
  67	return !((pos | len) & mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68}
  69
  70/*
  71 * Fsync operations on directories are much simpler than on regular files,
  72 * as there is no file data to flush, and thus also no need for explicit
  73 * cache flush operations, and there are no non-transaction metadata updates
  74 * on directories either.
  75 */
  76STATIC int
  77xfs_dir_fsync(
  78	struct file		*file,
  79	loff_t			start,
  80	loff_t			end,
  81	int			datasync)
  82{
  83	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 
  84
  85	trace_xfs_dir_fsync(ip);
  86	return xfs_log_force_inode(ip);
  87}
  88
  89static xfs_csn_t
  90xfs_fsync_seq(
  91	struct xfs_inode	*ip,
  92	bool			datasync)
  93{
  94	if (!xfs_ipincount(ip))
  95		return 0;
  96	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  97		return 0;
  98	return ip->i_itemp->ili_commit_seq;
  99}
 100
 101/*
 102 * All metadata updates are logged, which means that we just have to flush the
 103 * log up to the latest LSN that touched the inode.
 104 *
 105 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
 106 * the log force before we clear the ili_fsync_fields field. This ensures that
 107 * we don't get a racing sync operation that does not wait for the metadata to
 108 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
 109 * then all that will happen is the log force will do nothing as the lsn will
 110 * already be on disk.  We can't race with setting ili_fsync_fields because that
 111 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
 112 * shared until after the ili_fsync_fields is cleared.
 113 */
 114static  int
 115xfs_fsync_flush_log(
 116	struct xfs_inode	*ip,
 117	bool			datasync,
 118	int			*log_flushed)
 119{
 120	int			error = 0;
 121	xfs_csn_t		seq;
 122
 123	xfs_ilock(ip, XFS_ILOCK_SHARED);
 124	seq = xfs_fsync_seq(ip, datasync);
 125	if (seq) {
 126		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
 127					  log_flushed);
 128
 129		spin_lock(&ip->i_itemp->ili_lock);
 130		ip->i_itemp->ili_fsync_fields = 0;
 131		spin_unlock(&ip->i_itemp->ili_lock);
 132	}
 133	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 134	return error;
 
 
 
 135}
 136
 137STATIC int
 138xfs_file_fsync(
 139	struct file		*file,
 140	loff_t			start,
 141	loff_t			end,
 142	int			datasync)
 143{
 144	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 145	struct xfs_mount	*mp = ip->i_mount;
 146	int			error, err2;
 147	int			log_flushed = 0;
 
 148
 149	trace_xfs_file_fsync(ip);
 150
 151	error = file_write_and_wait_range(file, start, end);
 152	if (error)
 153		return error;
 154
 155	if (xfs_is_shutdown(mp))
 156		return -EIO;
 157
 158	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 159
 160	/*
 161	 * If we have an RT and/or log subvolume we need to make sure to flush
 162	 * the write cache the device used for file data first.  This is to
 163	 * ensure newly written file data make it to disk before logging the new
 164	 * inode size in case of an extending write.
 165	 */
 166	if (XFS_IS_REALTIME_INODE(ip))
 167		error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
 168	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 169		error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 
 
 
 170
 171	/*
 172	 * Any inode that has dirty modifications in the log is pinned.  The
 173	 * racy check here for a pinned inode will not catch modifications
 174	 * that happen concurrently to the fsync call, but fsync semantics
 175	 * only require to sync previously completed I/O.
 
 
 
 
 
 
 
 176	 */
 
 177	if (xfs_ipincount(ip)) {
 178		err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
 179		if (err2 && !error)
 180			error = err2;
 181	}
 182
 
 
 
 
 
 
 183	/*
 184	 * If we only have a single device, and the log force about was
 185	 * a no-op we might have to flush the data device cache here.
 186	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 187	 * an already allocated file and thus do not have any metadata to
 188	 * commit.
 189	 */
 190	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 191	    mp->m_logdev_targp == mp->m_ddev_targp) {
 192		err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 193		if (err2 && !error)
 194			error = err2;
 195	}
 196
 197	return error;
 198}
 199
 200static int
 201xfs_ilock_iocb(
 202	struct kiocb		*iocb,
 203	unsigned int		lock_mode)
 204{
 205	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 
 
 
 
 
 
 
 
 206
 207	if (iocb->ki_flags & IOCB_NOWAIT) {
 208		if (!xfs_ilock_nowait(ip, lock_mode))
 209			return -EAGAIN;
 210	} else {
 211		xfs_ilock(ip, lock_mode);
 
 
 
 
 
 
 
 
 
 
 
 
 212	}
 213
 214	return 0;
 215}
 
 216
 217static int
 218xfs_ilock_iocb_for_write(
 219	struct kiocb		*iocb,
 220	unsigned int		*lock_mode)
 221{
 222	ssize_t			ret;
 223	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 224
 225	ret = xfs_ilock_iocb(iocb, *lock_mode);
 226	if (ret)
 227		return ret;
 228
 229	if (*lock_mode == XFS_IOLOCK_EXCL)
 230		return 0;
 231	if (!xfs_iflags_test(ip, XFS_IREMAPPING))
 232		return 0;
 
 
 
 
 
 
 
 
 
 
 233
 234	xfs_iunlock(ip, *lock_mode);
 235	*lock_mode = XFS_IOLOCK_EXCL;
 236	return xfs_ilock_iocb(iocb, *lock_mode);
 237}
 
 
 
 
 
 
 
 
 
 
 
 
 
 238
 239static unsigned int
 240xfs_ilock_for_write_fault(
 241	struct xfs_inode	*ip)
 242{
 243	/* get a shared lock if no remapping in progress */
 244	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
 245	if (!xfs_iflags_test(ip, XFS_IREMAPPING))
 246		return XFS_MMAPLOCK_SHARED;
 
 
 
 247
 248	/* wait for remapping to complete */
 249	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
 250	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 251	return XFS_MMAPLOCK_EXCL;
 
 
 
 
 252}
 253
 254STATIC ssize_t
 255xfs_file_dio_read(
 256	struct kiocb		*iocb,
 257	struct iov_iter		*to)
 
 
 
 258{
 259	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 
 260	ssize_t			ret;
 261
 262	trace_xfs_file_direct_read(iocb, to);
 263
 264	if (!iov_iter_count(to))
 265		return 0; /* skip atime */
 266
 267	file_accessed(iocb->ki_filp);
 
 268
 269	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 270	if (ret)
 271		return ret;
 272	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
 273	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275	return ret;
 276}
 277
 278static noinline ssize_t
 279xfs_file_dax_read(
 280	struct kiocb		*iocb,
 281	struct iov_iter		*to)
 
 
 
 
 
 
 
 
 282{
 283	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 284	ssize_t			ret = 0;
 
 
 
 
 
 285
 286	trace_xfs_file_dax_read(iocb, to);
 
 
 
 
 287
 288	if (!iov_iter_count(to))
 289		return 0; /* skip atime */
 290
 291	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 292	if (ret)
 293		return ret;
 294	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
 295	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
 296
 297	file_accessed(iocb->ki_filp);
 298	return ret;
 
 
 
 299}
 300
 301STATIC ssize_t
 302xfs_file_buffered_read(
 303	struct kiocb		*iocb,
 304	struct iov_iter		*to)
 
 
 
 
 
 
 
 
 
 
 
 
 
 305{
 306	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 307	ssize_t			ret;
 
 
 
 
 
 
 
 
 308
 309	trace_xfs_file_buffered_read(iocb, to);
 
 310
 311	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 312	if (ret)
 313		return ret;
 314	ret = generic_file_read_iter(iocb, to);
 315	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 316
 317	return ret;
 318}
 319
 320STATIC ssize_t
 321xfs_file_read_iter(
 322	struct kiocb		*iocb,
 323	struct iov_iter		*to)
 324{
 325	struct inode		*inode = file_inode(iocb->ki_filp);
 326	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 327	ssize_t			ret = 0;
 328
 329	XFS_STATS_INC(mp, xs_read_calls);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330
 331	if (xfs_is_shutdown(mp))
 332		return -EIO;
 
 
 
 
 
 
 
 
 
 333
 334	if (IS_DAX(inode))
 335		ret = xfs_file_dax_read(iocb, to);
 336	else if (iocb->ki_flags & IOCB_DIRECT)
 337		ret = xfs_file_dio_read(iocb, to);
 338	else
 339		ret = xfs_file_buffered_read(iocb, to);
 340
 341	if (ret > 0)
 342		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 343	return ret;
 344}
 
 
 345
 346STATIC ssize_t
 347xfs_file_splice_read(
 348	struct file		*in,
 349	loff_t			*ppos,
 350	struct pipe_inode_info	*pipe,
 351	size_t			len,
 352	unsigned int		flags)
 353{
 354	struct inode		*inode = file_inode(in);
 355	struct xfs_inode	*ip = XFS_I(inode);
 356	struct xfs_mount	*mp = ip->i_mount;
 357	ssize_t			ret = 0;
 358
 359	XFS_STATS_INC(mp, xs_read_calls);
 
 360
 361	if (xfs_is_shutdown(mp))
 362		return -EIO;
 
 363
 364	trace_xfs_file_splice_read(ip, *ppos, len);
 
 
 
 365
 366	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 367	ret = filemap_splice_read(in, ppos, pipe, len, flags);
 368	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 369	if (ret > 0)
 370		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 371	return ret;
 372}
 373
 374/*
 375 * Common pre-write limit and setup checks.
 376 *
 377 * Called with the iolocked held either shared and exclusive according to
 378 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 379 * if called for a direct write beyond i_size.
 380 */
 381STATIC ssize_t
 382xfs_file_write_checks(
 383	struct kiocb		*iocb,
 384	struct iov_iter		*from,
 385	unsigned int		*iolock)
 386{
 387	struct file		*file = iocb->ki_filp;
 388	struct inode		*inode = file->f_mapping->host;
 389	struct xfs_inode	*ip = XFS_I(inode);
 390	ssize_t			error = 0;
 391	size_t			count = iov_iter_count(from);
 392	bool			drained_dio = false;
 393	loff_t			isize;
 394
 395restart:
 396	error = generic_write_checks(iocb, from);
 397	if (error <= 0)
 398		return error;
 399
 400	if (iocb->ki_flags & IOCB_NOWAIT) {
 401		error = break_layout(inode, false);
 402		if (error == -EWOULDBLOCK)
 403			error = -EAGAIN;
 404	} else {
 405		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
 406	}
 407
 408	if (error)
 409		return error;
 410
 411	/*
 412	 * For changing security info in file_remove_privs() we need i_rwsem
 413	 * exclusively.
 414	 */
 415	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 416		xfs_iunlock(ip, *iolock);
 417		*iolock = XFS_IOLOCK_EXCL;
 418		error = xfs_ilock_iocb(iocb, *iolock);
 419		if (error) {
 420			*iolock = 0;
 421			return error;
 422		}
 423		goto restart;
 424	}
 425
 426	/*
 427	 * If the offset is beyond the size of the file, we need to zero any
 428	 * blocks that fall between the existing EOF and the start of this
 429	 * write.  If zeroing is needed and we are currently holding the iolock
 430	 * shared, we need to update it to exclusive which implies having to
 431	 * redo all checks before.
 432	 *
 433	 * We need to serialise against EOF updates that occur in IO completions
 434	 * here. We want to make sure that nobody is changing the size while we
 435	 * do this check until we have placed an IO barrier (i.e.  hold the
 436	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
 437	 * spinlock effectively forms a memory barrier once we have the
 438	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
 439	 * hence be able to correctly determine if we need to run zeroing.
 440	 *
 441	 * We can do an unlocked check here safely as IO completion can only
 442	 * extend EOF. Truncate is locked out at this point, so the EOF can
 443	 * not move backwards, only forwards. Hence we only need to take the
 444	 * slow path and spin locks when we are at or beyond the current EOF.
 
 
 
 445	 */
 446	if (iocb->ki_pos <= i_size_read(inode))
 447		goto out;
 448
 449	spin_lock(&ip->i_flags_lock);
 450	isize = i_size_read(inode);
 451	if (iocb->ki_pos > isize) {
 452		spin_unlock(&ip->i_flags_lock);
 453
 454		if (iocb->ki_flags & IOCB_NOWAIT)
 455			return -EAGAIN;
 456
 
 457		if (!drained_dio) {
 458			if (*iolock == XFS_IOLOCK_SHARED) {
 459				xfs_iunlock(ip, *iolock);
 460				*iolock = XFS_IOLOCK_EXCL;
 461				xfs_ilock(ip, *iolock);
 462				iov_iter_reexpand(from, count);
 463			}
 464			/*
 465			 * We now have an IO submission barrier in place, but
 466			 * AIO can do EOF updates during IO completion and hence
 467			 * we now need to wait for all of them to drain. Non-AIO
 468			 * DIO will have drained before we are given the
 469			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 470			 * no-op.
 471			 */
 472			inode_dio_wait(inode);
 473			drained_dio = true;
 474			goto restart;
 475		}
 476
 477		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
 478		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
 479		if (error)
 480			return error;
 481	} else
 482		spin_unlock(&ip->i_flags_lock);
 483
 484out:
 485	return kiocb_modified(iocb);
 486}
 487
 488static int
 489xfs_dio_write_end_io(
 490	struct kiocb		*iocb,
 491	ssize_t			size,
 492	int			error,
 493	unsigned		flags)
 494{
 495	struct inode		*inode = file_inode(iocb->ki_filp);
 496	struct xfs_inode	*ip = XFS_I(inode);
 497	loff_t			offset = iocb->ki_pos;
 498	unsigned int		nofs_flag;
 499
 500	trace_xfs_end_io_direct_write(ip, offset, size);
 501
 502	if (xfs_is_shutdown(ip->i_mount))
 503		return -EIO;
 504
 505	if (error)
 506		return error;
 507	if (!size)
 508		return 0;
 509
 510	/*
 511	 * Capture amount written on completion as we can't reliably account
 512	 * for it on submission.
 513	 */
 514	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
 515
 516	/*
 517	 * We can allocate memory here while doing writeback on behalf of
 518	 * memory reclaim.  To avoid memory allocation deadlocks set the
 519	 * task-wide nofs context for the following operations.
 
 520	 */
 521	nofs_flag = memalloc_nofs_save();
 522
 523	if (flags & IOMAP_DIO_COW) {
 524		error = xfs_reflink_end_cow(ip, offset, size);
 525		if (error)
 526			goto out;
 527	}
 528
 529	/*
 530	 * Unwritten conversion updates the in-core isize after extent
 531	 * conversion but before updating the on-disk size. Updating isize any
 532	 * earlier allows a racing dio read to find unwritten extents before
 533	 * they are converted.
 534	 */
 535	if (flags & IOMAP_DIO_UNWRITTEN) {
 536		error = xfs_iomap_write_unwritten(ip, offset, size, true);
 537		goto out;
 538	}
 539
 540	/*
 541	 * We need to update the in-core inode size here so that we don't end up
 542	 * with the on-disk inode size being outside the in-core inode size. We
 543	 * have no other method of updating EOF for AIO, so always do it here
 544	 * if necessary.
 545	 *
 546	 * We need to lock the test/set EOF update as we can be racing with
 547	 * other IO completions here to update the EOF. Failing to serialise
 548	 * here can result in EOF moving backwards and Bad Things Happen when
 549	 * that occurs.
 550	 *
 551	 * As IO completion only ever extends EOF, we can do an unlocked check
 552	 * here to avoid taking the spinlock. If we land within the current EOF,
 553	 * then we do not need to do an extending update at all, and we don't
 554	 * need to take the lock to check this. If we race with an update moving
 555	 * EOF, then we'll either still be beyond EOF and need to take the lock,
 556	 * or we'll be within EOF and we don't need to take it at all.
 557	 */
 558	if (offset + size <= i_size_read(inode))
 559		goto out;
 560
 561	spin_lock(&ip->i_flags_lock);
 562	if (offset + size > i_size_read(inode)) {
 563		i_size_write(inode, offset + size);
 564		spin_unlock(&ip->i_flags_lock);
 565		error = xfs_setfilesize(ip, offset, size);
 566	} else {
 567		spin_unlock(&ip->i_flags_lock);
 568	}
 569
 570out:
 571	memalloc_nofs_restore(nofs_flag);
 572	return error;
 573}
 574
 575static const struct iomap_dio_ops xfs_dio_write_ops = {
 576	.end_io		= xfs_dio_write_end_io,
 577};
 578
 579/*
 580 * Handle block aligned direct I/O writes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581 */
 582static noinline ssize_t
 583xfs_file_dio_write_aligned(
 584	struct xfs_inode	*ip,
 585	struct kiocb		*iocb,
 586	struct iov_iter		*from)
 587{
 588	unsigned int		iolock = XFS_IOLOCK_SHARED;
 589	ssize_t			ret;
 
 
 
 
 
 
 
 
 
 
 
 
 590
 591	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 592	if (ret)
 593		return ret;
 594	ret = xfs_file_write_checks(iocb, from, &iolock);
 595	if (ret)
 596		goto out_unlock;
 
 597
 598	/*
 599	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
 600	 * the iolock back to shared if we had to take the exclusive lock in
 601	 * xfs_file_write_checks() for other reasons.
 
 
 602	 */
 603	if (iolock == XFS_IOLOCK_EXCL) {
 604		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 
 605		iolock = XFS_IOLOCK_SHARED;
 606	}
 607	trace_xfs_file_direct_write(iocb, from);
 608	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 609			   &xfs_dio_write_ops, 0, NULL, 0);
 610out_unlock:
 611	if (iolock)
 612		xfs_iunlock(ip, iolock);
 613	return ret;
 614}
 615
 616/*
 617 * Handle block unaligned direct I/O writes
 618 *
 619 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 620 * them to be done in parallel with reads and other direct I/O writes.  However,
 621 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 622 * to do sub-block zeroing and that requires serialisation against other direct
 623 * I/O to the same block.  In this case we need to serialise the submission of
 624 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
 625 * In the case where sub-block zeroing is not required, we can do concurrent
 626 * sub-block dios to the same block successfully.
 627 *
 628 * Optimistically submit the I/O using the shared lock first, but use the
 629 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 630 * if block allocation or partial block zeroing would be required.  In that case
 631 * we try again with the exclusive lock.
 632 */
 633static noinline ssize_t
 634xfs_file_dio_write_unaligned(
 635	struct xfs_inode	*ip,
 636	struct kiocb		*iocb,
 637	struct iov_iter		*from)
 638{
 639	size_t			isize = i_size_read(VFS_I(ip));
 640	size_t			count = iov_iter_count(from);
 641	unsigned int		iolock = XFS_IOLOCK_SHARED;
 642	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
 643	ssize_t			ret;
 644
 645	/*
 646	 * Extending writes need exclusivity because of the sub-block zeroing
 647	 * that the DIO code always does for partial tail blocks beyond EOF, so
 648	 * don't even bother trying the fast path in this case.
 649	 */
 650	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
 651		if (iocb->ki_flags & IOCB_NOWAIT)
 652			return -EAGAIN;
 653retry_exclusive:
 654		iolock = XFS_IOLOCK_EXCL;
 655		flags = IOMAP_DIO_FORCE_WAIT;
 656	}
 657
 658	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 659	if (ret)
 660		return ret;
 
 
 
 661
 662	/*
 663	 * We can't properly handle unaligned direct I/O to reflink files yet,
 664	 * as we can't unshare a partial block.
 665	 */
 666	if (xfs_is_cow_inode(ip)) {
 667		trace_xfs_reflink_bounce_dio_write(iocb, from);
 668		ret = -ENOTBLK;
 669		goto out_unlock;
 
 
 
 
 
 
 
 
 670	}
 671
 672	ret = xfs_file_write_checks(iocb, from, &iolock);
 673	if (ret)
 674		goto out_unlock;
 675
 676	/*
 677	 * If we are doing exclusive unaligned I/O, this must be the only I/O
 678	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
 679	 * conversions from the AIO end_io handler.  Wait for all other I/O to
 680	 * drain first.
 681	 */
 682	if (flags & IOMAP_DIO_FORCE_WAIT)
 683		inode_dio_wait(VFS_I(ip));
 684
 685	trace_xfs_file_direct_write(iocb, from);
 686	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 687			   &xfs_dio_write_ops, flags, NULL, 0);
 688
 689	/*
 690	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
 691	 * layer rejected it for mapping or locking reasons. If we are doing
 692	 * nonblocking user I/O, propagate the error.
 693	 */
 694	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
 695		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
 696		xfs_iunlock(ip, iolock);
 697		goto retry_exclusive;
 698	}
 699
 700out_unlock:
 701	if (iolock)
 702		xfs_iunlock(ip, iolock);
 703	return ret;
 704}
 705
 706static ssize_t
 707xfs_file_dio_write(
 708	struct kiocb		*iocb,
 709	struct iov_iter		*from)
 710{
 711	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 712	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
 713	size_t			count = iov_iter_count(from);
 714
 715	/* direct I/O must be aligned to device logical sector size */
 716	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 717		return -EINVAL;
 718	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
 719		return xfs_file_dio_write_unaligned(ip, iocb, from);
 720	return xfs_file_dio_write_aligned(ip, iocb, from);
 721}
 722
 723static noinline ssize_t
 724xfs_file_dax_write(
 725	struct kiocb		*iocb,
 726	struct iov_iter		*from)
 727{
 728	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 729	struct xfs_inode	*ip = XFS_I(inode);
 730	unsigned int		iolock = XFS_IOLOCK_EXCL;
 731	ssize_t			ret, error = 0;
 732	loff_t			pos;
 733
 734	ret = xfs_ilock_iocb(iocb, iolock);
 735	if (ret)
 736		return ret;
 737	ret = xfs_file_write_checks(iocb, from, &iolock);
 738	if (ret)
 739		goto out;
 740
 741	pos = iocb->ki_pos;
 742
 743	trace_xfs_file_dax_write(iocb, from);
 744	ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
 745	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 746		i_size_write(inode, iocb->ki_pos);
 747		error = xfs_setfilesize(ip, pos, ret);
 748	}
 749out:
 750	if (iolock)
 751		xfs_iunlock(ip, iolock);
 752	if (error)
 753		return error;
 754
 755	if (ret > 0) {
 756		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 757
 758		/* Handle various SYNC-type writes */
 759		ret = generic_write_sync(iocb, ret);
 760	}
 
 
 
 
 
 
 
 
 761	return ret;
 762}
 763
 764STATIC ssize_t
 765xfs_file_buffered_write(
 766	struct kiocb		*iocb,
 767	struct iov_iter		*from)
 768{
 769	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 770	struct xfs_inode	*ip = XFS_I(inode);
 771	ssize_t			ret;
 772	bool			cleared_space = false;
 773	unsigned int		iolock;
 774
 775write_retry:
 776	iolock = XFS_IOLOCK_EXCL;
 777	ret = xfs_ilock_iocb(iocb, iolock);
 778	if (ret)
 779		return ret;
 780
 781	ret = xfs_file_write_checks(iocb, from, &iolock);
 782	if (ret)
 783		goto out;
 784
 785	trace_xfs_file_buffered_write(iocb, from);
 786	ret = iomap_file_buffered_write(iocb, from,
 787			&xfs_buffered_write_iomap_ops);
 
 
 
 
 
 
 788
 789	/*
 790	 * If we hit a space limit, try to free up some lingering preallocated
 791	 * space before returning an error. In the case of ENOSPC, first try to
 792	 * write back all dirty inodes to free up some of the excess reserved
 793	 * metadata space. This reduces the chances that the eofblocks scan
 794	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 795	 * also behaves as a filter to prevent too many eofblocks scans from
 796	 * running at the same time.  Use a synchronous scan to increase the
 797	 * effectiveness of the scan.
 798	 */
 799	if (ret == -EDQUOT && !cleared_space) {
 800		xfs_iunlock(ip, iolock);
 801		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
 802		cleared_space = true;
 803		goto write_retry;
 804	} else if (ret == -ENOSPC && !cleared_space) {
 805		struct xfs_icwalk	icw = {0};
 806
 807		cleared_space = true;
 808		xfs_flush_inodes(ip->i_mount);
 809
 810		xfs_iunlock(ip, iolock);
 811		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
 812		xfs_blockgc_free_space(ip->i_mount, &icw);
 813		goto write_retry;
 814	}
 815
 
 816out:
 817	if (iolock)
 818		xfs_iunlock(ip, iolock);
 819
 820	if (ret > 0) {
 821		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 822		/* Handle various SYNC-type writes */
 823		ret = generic_write_sync(iocb, ret);
 824	}
 825	return ret;
 826}
 827
 828STATIC ssize_t
 829xfs_file_write_iter(
 830	struct kiocb		*iocb,
 831	struct iov_iter		*from)
 832{
 833	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 834	struct xfs_inode	*ip = XFS_I(inode);
 835	ssize_t			ret;
 836	size_t			ocount = iov_iter_count(from);
 837
 838	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 839
 840	if (ocount == 0)
 841		return 0;
 842
 843	if (xfs_is_shutdown(ip->i_mount))
 844		return -EIO;
 845
 846	if (IS_DAX(inode))
 847		return xfs_file_dax_write(iocb, from);
 848
 849	if (iocb->ki_flags & IOCB_DIRECT) {
 850		/*
 851		 * Allow a directio write to fall back to a buffered
 852		 * write *only* in the case that we're doing a reflink
 853		 * CoW.  In all other directio scenarios we do not
 854		 * allow an operation to fall back to buffered mode.
 855		 */
 856		ret = xfs_file_dio_write(iocb, from);
 857		if (ret != -ENOTBLK)
 858			return ret;
 859	}
 860
 861	return xfs_file_buffered_write(iocb, from);
 862}
 863
 864static void
 865xfs_wait_dax_page(
 866	struct inode		*inode)
 867{
 868	struct xfs_inode        *ip = XFS_I(inode);
 869
 870	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
 871	schedule();
 872	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 873}
 874
 875int
 876xfs_break_dax_layouts(
 877	struct inode		*inode,
 878	bool			*retry)
 879{
 880	struct page		*page;
 881
 882	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
 883
 884	page = dax_layout_busy_page(inode->i_mapping);
 885	if (!page)
 886		return 0;
 887
 888	*retry = true;
 889	return ___wait_var_event(&page->_refcount,
 890			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
 891			0, 0, xfs_wait_dax_page(inode));
 892}
 893
 894int
 895xfs_break_layouts(
 896	struct inode		*inode,
 897	uint			*iolock,
 898	enum layout_break_reason reason)
 899{
 900	bool			retry;
 901	int			error;
 902
 903	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
 904
 905	do {
 906		retry = false;
 907		switch (reason) {
 908		case BREAK_UNMAP:
 909			error = xfs_break_dax_layouts(inode, &retry);
 910			if (error || retry)
 911				break;
 912			fallthrough;
 913		case BREAK_WRITE:
 914			error = xfs_break_leased_layouts(inode, iolock, &retry);
 915			break;
 916		default:
 917			WARN_ON_ONCE(1);
 918			error = -EINVAL;
 919		}
 920	} while (error == 0 && retry);
 921
 922	return error;
 923}
 924
 925/* Does this file, inode, or mount want synchronous writes? */
 926static inline bool xfs_file_sync_writes(struct file *filp)
 927{
 928	struct xfs_inode	*ip = XFS_I(file_inode(filp));
 929
 930	if (xfs_has_wsync(ip->i_mount))
 931		return true;
 932	if (filp->f_flags & (__O_SYNC | O_DSYNC))
 933		return true;
 934	if (IS_SYNC(file_inode(filp)))
 935		return true;
 936
 937	return false;
 
 
 
 
 
 938}
 939
 940#define	XFS_FALLOC_FL_SUPPORTED						\
 941		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 942		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 943		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 944
 945STATIC long
 946xfs_file_fallocate(
 947	struct file		*file,
 948	int			mode,
 949	loff_t			offset,
 950	loff_t			len)
 951{
 952	struct inode		*inode = file_inode(file);
 953	struct xfs_inode	*ip = XFS_I(inode);
 954	long			error;
 955	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 
 956	loff_t			new_size = 0;
 957	bool			do_file_insert = false;
 958
 959	if (!S_ISREG(inode->i_mode))
 960		return -EINVAL;
 961	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 962		return -EOPNOTSUPP;
 963
 964	xfs_ilock(ip, iolock);
 965	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
 966	if (error)
 967		goto out_unlock;
 968
 969	/*
 970	 * Must wait for all AIO to complete before we continue as AIO can
 971	 * change the file size on completion without holding any locks we
 972	 * currently hold. We must do this first because AIO can update both
 973	 * the on disk and in memory inode sizes, and the operations that follow
 974	 * require the in-memory size to be fully up-to-date.
 975	 */
 976	inode_dio_wait(inode);
 977
 978	/*
 979	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
 980	 * the cached range over the first operation we are about to run.
 981	 *
 982	 * We care about zero and collapse here because they both run a hole
 983	 * punch over the range first. Because that can zero data, and the range
 984	 * of invalidation for the shift operations is much larger, we still do
 985	 * the required flush for collapse in xfs_prepare_shift().
 986	 *
 987	 * Insert has the same range requirements as collapse, and we extend the
 988	 * file first which can zero data. Hence insert has the same
 989	 * flush/invalidate requirements as collapse and so they are both
 990	 * handled at the right time by xfs_prepare_shift().
 991	 */
 992	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
 993		    FALLOC_FL_COLLAPSE_RANGE)) {
 994		error = xfs_flush_unmap_range(ip, offset, len);
 995		if (error)
 996			goto out_unlock;
 997	}
 998
 999	error = file_modified(file);
1000	if (error)
1001		goto out_unlock;
1002
1003	if (mode & FALLOC_FL_PUNCH_HOLE) {
1004		error = xfs_free_file_space(ip, offset, len);
1005		if (error)
1006			goto out_unlock;
1007	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1008		if (!xfs_is_falloc_aligned(ip, offset, len)) {
 
 
1009			error = -EINVAL;
1010			goto out_unlock;
1011		}
1012
1013		/*
1014		 * There is no need to overlap collapse range with EOF,
1015		 * in which case it is effectively a truncate operation
1016		 */
1017		if (offset + len >= i_size_read(inode)) {
1018			error = -EINVAL;
1019			goto out_unlock;
1020		}
1021
1022		new_size = i_size_read(inode) - len;
1023
1024		error = xfs_collapse_file_space(ip, offset, len);
1025		if (error)
1026			goto out_unlock;
1027	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1028		loff_t		isize = i_size_read(inode);
1029
1030		if (!xfs_is_falloc_aligned(ip, offset, len)) {
 
1031			error = -EINVAL;
1032			goto out_unlock;
1033		}
1034
1035		/*
1036		 * New inode size must not exceed ->s_maxbytes, accounting for
1037		 * possible signed overflow.
1038		 */
1039		if (inode->i_sb->s_maxbytes - isize < len) {
1040			error = -EFBIG;
1041			goto out_unlock;
1042		}
1043		new_size = isize + len;
1044
1045		/* Offset should be less than i_size */
1046		if (offset >= isize) {
1047			error = -EINVAL;
1048			goto out_unlock;
1049		}
1050		do_file_insert = true;
1051	} else {
 
 
1052		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1053		    offset + len > i_size_read(inode)) {
1054			new_size = offset + len;
1055			error = inode_newsize_ok(inode, new_size);
1056			if (error)
1057				goto out_unlock;
1058		}
1059
1060		if (mode & FALLOC_FL_ZERO_RANGE) {
1061			/*
1062			 * Punch a hole and prealloc the range.  We use a hole
1063			 * punch rather than unwritten extent conversion for two
1064			 * reasons:
1065			 *
1066			 *   1.) Hole punch handles partial block zeroing for us.
1067			 *   2.) If prealloc returns ENOSPC, the file range is
1068			 *       still zero-valued by virtue of the hole punch.
1069			 */
1070			unsigned int blksize = i_blocksize(inode);
1071
1072			trace_xfs_zero_file_space(ip);
1073
1074			error = xfs_free_file_space(ip, offset, len);
1075			if (error)
1076				goto out_unlock;
1077
1078			len = round_up(offset + len, blksize) -
1079			      round_down(offset, blksize);
1080			offset = round_down(offset, blksize);
1081		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1082			error = xfs_reflink_unshare(ip, offset, len);
1083			if (error)
1084				goto out_unlock;
1085		} else {
1086			/*
1087			 * If always_cow mode we can't use preallocations and
1088			 * thus should not create them.
1089			 */
1090			if (xfs_is_always_cow_inode(ip)) {
1091				error = -EOPNOTSUPP;
1092				goto out_unlock;
1093			}
1094		}
1095
1096		if (!xfs_is_always_cow_inode(ip)) {
1097			error = xfs_alloc_file_space(ip, offset, len);
1098			if (error)
1099				goto out_unlock;
1100		}
1101	}
1102
1103	/* Change file size if needed */
1104	if (new_size) {
1105		struct iattr iattr;
1106
1107		iattr.ia_valid = ATTR_SIZE;
1108		iattr.ia_size = new_size;
1109		error = xfs_vn_setattr_size(file_mnt_idmap(file),
1110					    file_dentry(file), &iattr);
1111		if (error)
1112			goto out_unlock;
1113	}
1114
1115	/*
1116	 * Perform hole insertion now that the file size has been
1117	 * updated so that if we crash during the operation we don't
1118	 * leave shifted extents past EOF and hence losing access to
1119	 * the data that is contained within them.
1120	 */
1121	if (do_file_insert) {
1122		error = xfs_insert_file_space(ip, offset, len);
1123		if (error)
1124			goto out_unlock;
1125	}
1126
1127	if (xfs_file_sync_writes(file))
1128		error = xfs_log_force_inode(ip);
1129
1130out_unlock:
1131	xfs_iunlock(ip, iolock);
1132	return error;
1133}
1134
1135STATIC int
1136xfs_file_fadvise(
1137	struct file	*file,
1138	loff_t		start,
1139	loff_t		end,
1140	int		advice)
1141{
1142	struct xfs_inode *ip = XFS_I(file_inode(file));
1143	int ret;
1144	int lockflags = 0;
1145
1146	/*
1147	 * Operations creating pages in page cache need protection from hole
1148	 * punching and similar ops
1149	 */
1150	if (advice == POSIX_FADV_WILLNEED) {
1151		lockflags = XFS_IOLOCK_SHARED;
1152		xfs_ilock(ip, lockflags);
1153	}
1154	ret = generic_fadvise(file, start, end, advice);
1155	if (lockflags)
1156		xfs_iunlock(ip, lockflags);
1157	return ret;
1158}
1159
1160STATIC loff_t
1161xfs_file_remap_range(
1162	struct file		*file_in,
1163	loff_t			pos_in,
1164	struct file		*file_out,
1165	loff_t			pos_out,
1166	loff_t			len,
1167	unsigned int		remap_flags)
1168{
1169	struct inode		*inode_in = file_inode(file_in);
1170	struct xfs_inode	*src = XFS_I(inode_in);
1171	struct inode		*inode_out = file_inode(file_out);
1172	struct xfs_inode	*dest = XFS_I(inode_out);
1173	struct xfs_mount	*mp = src->i_mount;
1174	loff_t			remapped = 0;
1175	xfs_extlen_t		cowextsize;
1176	int			ret;
1177
1178	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1179		return -EINVAL;
1180
1181	if (!xfs_has_reflink(mp))
1182		return -EOPNOTSUPP;
1183
1184	if (xfs_is_shutdown(mp))
1185		return -EIO;
1186
1187	/* Prepare and then clone file data. */
1188	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1189			&len, remap_flags);
1190	if (ret || len == 0)
1191		return ret;
1192
1193	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1194
1195	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1196			&remapped);
1197	if (ret)
1198		goto out_unlock;
1199
1200	/*
1201	 * Carry the cowextsize hint from src to dest if we're sharing the
1202	 * entire source file to the entire destination file, the source file
1203	 * has a cowextsize hint, and the destination file does not.
1204	 */
1205	cowextsize = 0;
1206	if (pos_in == 0 && len == i_size_read(inode_in) &&
1207	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1208	    pos_out == 0 && len >= i_size_read(inode_out) &&
1209	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1210		cowextsize = src->i_cowextsize;
1211
1212	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1213			remap_flags);
1214	if (ret)
1215		goto out_unlock;
1216
1217	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1218		xfs_log_force_inode(dest);
1219out_unlock:
1220	xfs_iunlock2_remapping(src, dest);
1221	if (ret)
1222		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1223	return remapped > 0 ? remapped : ret;
1224}
1225
1226STATIC int
1227xfs_file_open(
1228	struct inode	*inode,
1229	struct file	*file)
1230{
1231	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
 
 
1232		return -EIO;
1233	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
1234			FMODE_DIO_PARALLEL_WRITE | FMODE_CAN_ODIRECT;
1235	return generic_file_open(inode, file);
1236}
1237
1238STATIC int
1239xfs_dir_open(
1240	struct inode	*inode,
1241	struct file	*file)
1242{
1243	struct xfs_inode *ip = XFS_I(inode);
1244	unsigned int	mode;
1245	int		error;
1246
1247	error = xfs_file_open(inode, file);
1248	if (error)
1249		return error;
1250
1251	/*
1252	 * If there are any blocks, read-ahead block 0 as we're almost
1253	 * certain to have the next operation be a read there.
1254	 */
1255	mode = xfs_ilock_data_map_shared(ip);
1256	if (ip->i_df.if_nextents > 0)
1257		error = xfs_dir3_data_readahead(ip, 0, 0);
1258	xfs_iunlock(ip, mode);
1259	return error;
1260}
1261
1262STATIC int
1263xfs_file_release(
1264	struct inode	*inode,
1265	struct file	*filp)
1266{
1267	return xfs_release(XFS_I(inode));
1268}
1269
1270STATIC int
1271xfs_file_readdir(
1272	struct file	*file,
1273	struct dir_context *ctx)
1274{
1275	struct inode	*inode = file_inode(file);
1276	xfs_inode_t	*ip = XFS_I(inode);
1277	size_t		bufsize;
1278
1279	/*
1280	 * The Linux API doesn't pass down the total size of the buffer
1281	 * we read into down to the filesystem.  With the filldir concept
1282	 * it's not needed for correct information, but the XFS dir2 leaf
1283	 * code wants an estimate of the buffer size to calculate it's
1284	 * readahead window and size the buffers used for mapping to
1285	 * physical blocks.
1286	 *
1287	 * Try to give it an estimate that's good enough, maybe at some
1288	 * point we can change the ->readdir prototype to include the
1289	 * buffer size.  For now we use the current glibc buffer size.
1290	 */
1291	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292
1293	return xfs_readdir(NULL, ip, ctx, bufsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1294}
1295
1296STATIC loff_t
1297xfs_file_llseek(
1298	struct file	*file,
1299	loff_t		offset,
1300	int		whence)
1301{
1302	struct inode		*inode = file->f_mapping->host;
 
 
 
 
 
1303
1304	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1305		return -EIO;
1306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307	switch (whence) {
1308	default:
 
 
1309		return generic_file_llseek(file, offset, whence);
1310	case SEEK_HOLE:
1311		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1312		break;
1313	case SEEK_DATA:
1314		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1315		break;
 
1316	}
1317
1318	if (offset < 0)
1319		return offset;
1320	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1321}
1322
1323#ifdef CONFIG_FS_DAX
1324static inline vm_fault_t
1325xfs_dax_fault(
1326	struct vm_fault		*vmf,
1327	unsigned int		order,
1328	bool			write_fault,
1329	pfn_t			*pfn)
1330{
1331	return dax_iomap_fault(vmf, order, pfn, NULL,
1332			(write_fault && !vmf->cow_page) ?
1333				&xfs_dax_write_iomap_ops :
1334				&xfs_read_iomap_ops);
1335}
1336#else
1337static inline vm_fault_t
1338xfs_dax_fault(
1339	struct vm_fault		*vmf,
1340	unsigned int		order,
1341	bool			write_fault,
1342	pfn_t			*pfn)
1343{
1344	ASSERT(0);
1345	return VM_FAULT_SIGBUS;
1346}
1347#endif
1348
1349/*
1350 * Locking for serialisation of IO during page faults. This results in a lock
1351 * ordering of:
1352 *
1353 * mmap_lock (MM)
1354 *   sb_start_pagefault(vfs, freeze)
1355 *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1356 *       page_lock (MM)
1357 *         i_lock (XFS - extent map serialisation)
1358 */
1359static vm_fault_t
1360__xfs_filemap_fault(
1361	struct vm_fault		*vmf,
1362	unsigned int		order,
1363	bool			write_fault)
1364{
1365	struct inode		*inode = file_inode(vmf->vma->vm_file);
1366	struct xfs_inode	*ip = XFS_I(inode);
1367	vm_fault_t		ret;
1368	unsigned int		lock_mode = 0;
1369
1370	trace_xfs_filemap_fault(ip, order, write_fault);
 
 
 
 
 
 
 
 
 
 
 
 
1371
1372	if (write_fault) {
1373		sb_start_pagefault(inode->i_sb);
1374		file_update_time(vmf->vma->vm_file);
1375	}
1376
1377	if (IS_DAX(inode) || write_fault)
1378		lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
 
1379
1380	if (IS_DAX(inode)) {
1381		pfn_t pfn;
1382
1383		ret = xfs_dax_fault(vmf, order, write_fault, &pfn);
1384		if (ret & VM_FAULT_NEEDDSYNC)
1385			ret = dax_finish_sync_fault(vmf, order, pfn);
1386	} else if (write_fault) {
1387		ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
1388	} else {
1389		ret = filemap_fault(vmf);
 
1390	}
1391
1392	if (lock_mode)
1393		xfs_iunlock(XFS_I(inode), lock_mode);
1394
1395	if (write_fault)
1396		sb_end_pagefault(inode->i_sb);
1397	return ret;
1398}
1399
1400static inline bool
1401xfs_is_write_fault(
1402	struct vm_fault		*vmf)
1403{
1404	return (vmf->flags & FAULT_FLAG_WRITE) &&
1405	       (vmf->vma->vm_flags & VM_SHARED);
1406}
1407
1408static vm_fault_t
1409xfs_filemap_fault(
 
1410	struct vm_fault		*vmf)
1411{
 
 
 
 
 
1412	/* DAX can shortcut the normal fault path on write faults! */
1413	return __xfs_filemap_fault(vmf, 0,
1414			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1415			xfs_is_write_fault(vmf));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416}
1417
1418static vm_fault_t
1419xfs_filemap_huge_fault(
1420	struct vm_fault		*vmf,
1421	unsigned int		order)
 
 
 
 
 
 
 
 
 
1422{
1423	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
 
 
 
 
1424		return VM_FAULT_FALLBACK;
1425
1426	/* DAX can shortcut the normal fault path on write faults! */
1427	return __xfs_filemap_fault(vmf, order,
1428			xfs_is_write_fault(vmf));
1429}
1430
1431static vm_fault_t
1432xfs_filemap_page_mkwrite(
1433	struct vm_fault		*vmf)
1434{
1435	return __xfs_filemap_fault(vmf, 0, true);
 
 
 
 
 
 
 
 
 
1436}
1437
1438/*
1439 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1440 * on write faults. In reality, it needs to serialise against truncate and
1441 * prepare memory for writing so handle is as standard write fault.
 
1442 */
1443static vm_fault_t
1444xfs_filemap_pfn_mkwrite(
 
1445	struct vm_fault		*vmf)
1446{
1447
1448	return __xfs_filemap_fault(vmf, 0, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449}
1450
1451static const struct vm_operations_struct xfs_file_vm_ops = {
1452	.fault		= xfs_filemap_fault,
1453	.huge_fault	= xfs_filemap_huge_fault,
1454	.map_pages	= filemap_map_pages,
1455	.page_mkwrite	= xfs_filemap_page_mkwrite,
1456	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1457};
1458
1459STATIC int
1460xfs_file_mmap(
1461	struct file		*file,
1462	struct vm_area_struct	*vma)
1463{
1464	struct inode		*inode = file_inode(file);
1465	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1466
1467	/*
1468	 * We don't support synchronous mappings for non-DAX files and
1469	 * for DAX files if underneath dax_device is not synchronous.
1470	 */
1471	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1472		return -EOPNOTSUPP;
1473
1474	file_accessed(file);
1475	vma->vm_ops = &xfs_file_vm_ops;
1476	if (IS_DAX(inode))
1477		vm_flags_set(vma, VM_HUGEPAGE);
1478	return 0;
1479}
1480
1481const struct file_operations xfs_file_operations = {
1482	.llseek		= xfs_file_llseek,
1483	.read_iter	= xfs_file_read_iter,
1484	.write_iter	= xfs_file_write_iter,
1485	.splice_read	= xfs_file_splice_read,
1486	.splice_write	= iter_file_splice_write,
1487	.iopoll		= iocb_bio_iopoll,
1488	.unlocked_ioctl	= xfs_file_ioctl,
1489#ifdef CONFIG_COMPAT
1490	.compat_ioctl	= xfs_file_compat_ioctl,
1491#endif
1492	.mmap		= xfs_file_mmap,
1493	.mmap_supported_flags = MAP_SYNC,
1494	.open		= xfs_file_open,
1495	.release	= xfs_file_release,
1496	.fsync		= xfs_file_fsync,
1497	.get_unmapped_area = thp_get_unmapped_area,
1498	.fallocate	= xfs_file_fallocate,
1499	.fadvise	= xfs_file_fadvise,
1500	.remap_file_range = xfs_file_remap_range,
1501};
1502
1503const struct file_operations xfs_dir_file_operations = {
1504	.open		= xfs_dir_open,
1505	.read		= generic_read_dir,
1506	.iterate_shared	= xfs_file_readdir,
1507	.llseek		= generic_file_llseek,
1508	.unlocked_ioctl	= xfs_file_ioctl,
1509#ifdef CONFIG_COMPAT
1510	.compat_ioctl	= xfs_file_compat_ioctl,
1511#endif
1512	.fsync		= xfs_dir_fsync,
1513};
v4.6
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_da_format.h"
  26#include "xfs_da_btree.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_bmap.h"
  31#include "xfs_bmap_util.h"
  32#include "xfs_error.h"
  33#include "xfs_dir2.h"
  34#include "xfs_dir2_priv.h"
  35#include "xfs_ioctl.h"
  36#include "xfs_trace.h"
  37#include "xfs_log.h"
  38#include "xfs_icache.h"
  39#include "xfs_pnfs.h"
 
 
  40
  41#include <linux/dcache.h>
  42#include <linux/falloc.h>
  43#include <linux/pagevec.h>
  44#include <linux/backing-dev.h>
 
 
 
  45
  46static const struct vm_operations_struct xfs_file_vm_ops;
  47
  48/*
  49 * Locking primitives for read and write IO paths to ensure we consistently use
  50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
  51 */
  52static inline void
  53xfs_rw_ilock(
  54	struct xfs_inode	*ip,
  55	int			type)
 
  56{
  57	if (type & XFS_IOLOCK_EXCL)
  58		inode_lock(VFS_I(ip));
  59	xfs_ilock(ip, type);
  60}
  61
  62static inline void
  63xfs_rw_iunlock(
  64	struct xfs_inode	*ip,
  65	int			type)
  66{
  67	xfs_iunlock(ip, type);
  68	if (type & XFS_IOLOCK_EXCL)
  69		inode_unlock(VFS_I(ip));
  70}
  71
  72static inline void
  73xfs_rw_ilock_demote(
  74	struct xfs_inode	*ip,
  75	int			type)
  76{
  77	xfs_ilock_demote(ip, type);
  78	if (type & XFS_IOLOCK_EXCL)
  79		inode_unlock(VFS_I(ip));
  80}
  81
  82/*
  83 * xfs_iozero clears the specified range supplied via the page cache (except in
  84 * the DAX case). Writes through the page cache will allocate blocks over holes,
  85 * though the callers usually map the holes first and avoid them. If a block is
  86 * not completely zeroed, then it will be read from disk before being partially
  87 * zeroed.
  88 *
  89 * In the DAX case, we can just directly write to the underlying pages. This
  90 * will not allocate blocks, but will avoid holes and unwritten extents and so
  91 * not do unnecessary work.
  92 */
  93int
  94xfs_iozero(
  95	struct xfs_inode	*ip,	/* inode			*/
  96	loff_t			pos,	/* offset in file		*/
  97	size_t			count)	/* size of data to zero		*/
  98{
  99	struct page		*page;
 100	struct address_space	*mapping;
 101	int			status = 0;
 102
 103
 104	mapping = VFS_I(ip)->i_mapping;
 105	do {
 106		unsigned offset, bytes;
 107		void *fsdata;
 108
 109		offset = (pos & (PAGE_SIZE -1)); /* Within page */
 110		bytes = PAGE_SIZE - offset;
 111		if (bytes > count)
 112			bytes = count;
 113
 114		if (IS_DAX(VFS_I(ip))) {
 115			status = dax_zero_page_range(VFS_I(ip), pos, bytes,
 116						     xfs_get_blocks_direct);
 117			if (status)
 118				break;
 119		} else {
 120			status = pagecache_write_begin(NULL, mapping, pos, bytes,
 121						AOP_FLAG_UNINTERRUPTIBLE,
 122						&page, &fsdata);
 123			if (status)
 124				break;
 125
 126			zero_user(page, offset, bytes);
 127
 128			status = pagecache_write_end(NULL, mapping, pos, bytes,
 129						bytes, page, fsdata);
 130			WARN_ON(status <= 0); /* can't return less than zero! */
 131			status = 0;
 
 
 
 
 
 
 
 132		}
 133		pos += bytes;
 134		count -= bytes;
 135	} while (count);
 136
 137	return status;
 138}
 139
 140int
 141xfs_update_prealloc_flags(
 142	struct xfs_inode	*ip,
 143	enum xfs_prealloc_flags	flags)
 144{
 145	struct xfs_trans	*tp;
 146	int			error;
 147
 148	tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
 149	error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
 150	if (error) {
 151		xfs_trans_cancel(tp);
 152		return error;
 153	}
 154
 155	xfs_ilock(ip, XFS_ILOCK_EXCL);
 156	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 157
 158	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
 159		VFS_I(ip)->i_mode &= ~S_ISUID;
 160		if (VFS_I(ip)->i_mode & S_IXGRP)
 161			VFS_I(ip)->i_mode &= ~S_ISGID;
 162		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 163	}
 164
 165	if (flags & XFS_PREALLOC_SET)
 166		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
 167	if (flags & XFS_PREALLOC_CLEAR)
 168		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
 169
 170	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 171	if (flags & XFS_PREALLOC_SYNC)
 172		xfs_trans_set_sync(tp);
 173	return xfs_trans_commit(tp);
 174}
 175
 176/*
 177 * Fsync operations on directories are much simpler than on regular files,
 178 * as there is no file data to flush, and thus also no need for explicit
 179 * cache flush operations, and there are no non-transaction metadata updates
 180 * on directories either.
 181 */
 182STATIC int
 183xfs_dir_fsync(
 184	struct file		*file,
 185	loff_t			start,
 186	loff_t			end,
 187	int			datasync)
 188{
 189	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 190	struct xfs_mount	*mp = ip->i_mount;
 191	xfs_lsn_t		lsn = 0;
 192
 193	trace_xfs_dir_fsync(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194
 195	xfs_ilock(ip, XFS_ILOCK_SHARED);
 196	if (xfs_ipincount(ip))
 197		lsn = ip->i_itemp->ili_last_lsn;
 
 
 
 
 
 
 
 198	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 199
 200	if (!lsn)
 201		return 0;
 202	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 203}
 204
 205STATIC int
 206xfs_file_fsync(
 207	struct file		*file,
 208	loff_t			start,
 209	loff_t			end,
 210	int			datasync)
 211{
 212	struct inode		*inode = file->f_mapping->host;
 213	struct xfs_inode	*ip = XFS_I(inode);
 214	struct xfs_mount	*mp = ip->i_mount;
 215	int			error = 0;
 216	int			log_flushed = 0;
 217	xfs_lsn_t		lsn = 0;
 218
 219	trace_xfs_file_fsync(ip);
 220
 221	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 222	if (error)
 223		return error;
 224
 225	if (XFS_FORCED_SHUTDOWN(mp))
 226		return -EIO;
 227
 228	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 229
 230	if (mp->m_flags & XFS_MOUNT_BARRIER) {
 231		/*
 232		 * If we have an RT and/or log subvolume we need to make sure
 233		 * to flush the write cache the device used for file data
 234		 * first.  This is to ensure newly written file data make
 235		 * it to disk before logging the new inode size in case of
 236		 * an extending write.
 237		 */
 238		if (XFS_IS_REALTIME_INODE(ip))
 239			xfs_blkdev_issue_flush(mp->m_rtdev_targp);
 240		else if (mp->m_logdev_targp != mp->m_ddev_targp)
 241			xfs_blkdev_issue_flush(mp->m_ddev_targp);
 242	}
 243
 244	/*
 245	 * All metadata updates are logged, which means that we just have to
 246	 * flush the log up to the latest LSN that touched the inode. If we have
 247	 * concurrent fsync/fdatasync() calls, we need them to all block on the
 248	 * log force before we clear the ili_fsync_fields field. This ensures
 249	 * that we don't get a racing sync operation that does not wait for the
 250	 * metadata to hit the journal before returning. If we race with
 251	 * clearing the ili_fsync_fields, then all that will happen is the log
 252	 * force will do nothing as the lsn will already be on disk. We can't
 253	 * race with setting ili_fsync_fields because that is done under
 254	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
 255	 * until after the ili_fsync_fields is cleared.
 256	 */
 257	xfs_ilock(ip, XFS_ILOCK_SHARED);
 258	if (xfs_ipincount(ip)) {
 259		if (!datasync ||
 260		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
 261			lsn = ip->i_itemp->ili_last_lsn;
 262	}
 263
 264	if (lsn) {
 265		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
 266		ip->i_itemp->ili_fsync_fields = 0;
 267	}
 268	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 269
 270	/*
 271	 * If we only have a single device, and the log force about was
 272	 * a no-op we might have to flush the data device cache here.
 273	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 274	 * an already allocated file and thus do not have any metadata to
 275	 * commit.
 276	 */
 277	if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
 278	    mp->m_logdev_targp == mp->m_ddev_targp &&
 279	    !XFS_IS_REALTIME_INODE(ip) &&
 280	    !log_flushed)
 281		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
 282
 283	return error;
 284}
 285
 286STATIC ssize_t
 287xfs_file_read_iter(
 288	struct kiocb		*iocb,
 289	struct iov_iter		*to)
 290{
 291	struct file		*file = iocb->ki_filp;
 292	struct inode		*inode = file->f_mapping->host;
 293	struct xfs_inode	*ip = XFS_I(inode);
 294	struct xfs_mount	*mp = ip->i_mount;
 295	size_t			size = iov_iter_count(to);
 296	ssize_t			ret = 0;
 297	int			ioflags = 0;
 298	xfs_fsize_t		n;
 299	loff_t			pos = iocb->ki_pos;
 300
 301	XFS_STATS_INC(mp, xs_read_calls);
 302
 303	if (unlikely(iocb->ki_flags & IOCB_DIRECT))
 304		ioflags |= XFS_IO_ISDIRECT;
 305	if (file->f_mode & FMODE_NOCMTIME)
 306		ioflags |= XFS_IO_INVIS;
 307
 308	if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
 309		xfs_buftarg_t	*target =
 310			XFS_IS_REALTIME_INODE(ip) ?
 311				mp->m_rtdev_targp : mp->m_ddev_targp;
 312		/* DIO must be aligned to device logical sector size */
 313		if ((pos | size) & target->bt_logical_sectormask) {
 314			if (pos == i_size_read(inode))
 315				return 0;
 316			return -EINVAL;
 317		}
 318	}
 319
 320	n = mp->m_super->s_maxbytes - pos;
 321	if (n <= 0 || size == 0)
 322		return 0;
 323
 324	if (n < size)
 325		size = n;
 
 
 
 
 
 326
 327	if (XFS_FORCED_SHUTDOWN(mp))
 328		return -EIO;
 
 329
 330	/*
 331	 * Locking is a bit tricky here. If we take an exclusive lock for direct
 332	 * IO, we effectively serialise all new concurrent read IO to this file
 333	 * and block it behind IO that is currently in progress because IO in
 334	 * progress holds the IO lock shared. We only need to hold the lock
 335	 * exclusive to blow away the page cache, so only take lock exclusively
 336	 * if the page cache needs invalidation. This allows the normal direct
 337	 * IO case of no page cache pages to proceeed concurrently without
 338	 * serialisation.
 339	 */
 340	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 341	if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
 342		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 343		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 344
 345		/*
 346		 * The generic dio code only flushes the range of the particular
 347		 * I/O. Because we take an exclusive lock here, this whole
 348		 * sequence is considerably more expensive for us. This has a
 349		 * noticeable performance impact for any file with cached pages,
 350		 * even when outside of the range of the particular I/O.
 351		 *
 352		 * Hence, amortize the cost of the lock against a full file
 353		 * flush and reduce the chances of repeated iolock cycles going
 354		 * forward.
 355		 */
 356		if (inode->i_mapping->nrpages) {
 357			ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 358			if (ret) {
 359				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
 360				return ret;
 361			}
 362
 363			/*
 364			 * Invalidate whole pages. This can return an error if
 365			 * we fail to invalidate a page, but this should never
 366			 * happen on XFS. Warn if it does fail.
 367			 */
 368			ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
 369			WARN_ON_ONCE(ret);
 370			ret = 0;
 371		}
 372		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
 373	}
 374
 375	trace_xfs_file_read(ip, size, pos, ioflags);
 376
 377	ret = generic_file_read_iter(iocb, to);
 378	if (ret > 0)
 379		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 380
 381	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 382	return ret;
 383}
 384
 385STATIC ssize_t
 386xfs_file_splice_read(
 387	struct file		*infilp,
 388	loff_t			*ppos,
 389	struct pipe_inode_info	*pipe,
 390	size_t			count,
 391	unsigned int		flags)
 392{
 393	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host);
 394	int			ioflags = 0;
 395	ssize_t			ret;
 396
 397	XFS_STATS_INC(ip->i_mount, xs_read_calls);
 398
 399	if (infilp->f_mode & FMODE_NOCMTIME)
 400		ioflags |= XFS_IO_INVIS;
 401
 402	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 403		return -EIO;
 404
 405	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
 
 
 
 
 406
 407	/*
 408	 * DAX inodes cannot ues the page cache for splice, so we have to push
 409	 * them through the VFS IO path. This means it goes through
 410	 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
 411	 * cannot lock the splice operation at this level for DAX inodes.
 412	 */
 413	if (IS_DAX(VFS_I(ip))) {
 414		ret = default_file_splice_read(infilp, ppos, pipe, count,
 415					       flags);
 416		goto out;
 417	}
 418
 419	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 420	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
 421	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 422out:
 423	if (ret > 0)
 424		XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
 425	return ret;
 426}
 427
 428/*
 429 * This routine is called to handle zeroing any space in the last block of the
 430 * file that is beyond the EOF.  We do this since the size is being increased
 431 * without writing anything to that block and we don't want to read the
 432 * garbage on the disk.
 433 */
 434STATIC int				/* error (positive) */
 435xfs_zero_last_block(
 436	struct xfs_inode	*ip,
 437	xfs_fsize_t		offset,
 438	xfs_fsize_t		isize,
 439	bool			*did_zeroing)
 440{
 441	struct xfs_mount	*mp = ip->i_mount;
 442	xfs_fileoff_t		last_fsb = XFS_B_TO_FSBT(mp, isize);
 443	int			zero_offset = XFS_B_FSB_OFFSET(mp, isize);
 444	int			zero_len;
 445	int			nimaps = 1;
 446	int			error = 0;
 447	struct xfs_bmbt_irec	imap;
 448
 449	xfs_ilock(ip, XFS_ILOCK_EXCL);
 450	error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
 451	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 452	if (error)
 453		return error;
 454
 455	ASSERT(nimaps > 0);
 
 456
 457	/*
 458	 * If the block underlying isize is just a hole, then there
 459	 * is nothing to zero.
 460	 */
 461	if (imap.br_startblock == HOLESTARTBLOCK)
 462		return 0;
 463
 464	zero_len = mp->m_sb.sb_blocksize - zero_offset;
 465	if (isize + zero_len > offset)
 466		zero_len = offset - isize;
 467	*did_zeroing = true;
 468	return xfs_iozero(ip, isize, zero_len);
 469}
 470
 471/*
 472 * Zero any on disk space between the current EOF and the new, larger EOF.
 473 *
 474 * This handles the normal case of zeroing the remainder of the last block in
 475 * the file and the unusual case of zeroing blocks out beyond the size of the
 476 * file.  This second case only happens with fixed size extents and when the
 477 * system crashes before the inode size was updated but after blocks were
 478 * allocated.
 479 *
 480 * Expects the iolock to be held exclusive, and will take the ilock internally.
 481 */
 482int					/* error (positive) */
 483xfs_zero_eof(
 484	struct xfs_inode	*ip,
 485	xfs_off_t		offset,		/* starting I/O offset */
 486	xfs_fsize_t		isize,		/* current inode size */
 487	bool			*did_zeroing)
 488{
 489	struct xfs_mount	*mp = ip->i_mount;
 490	xfs_fileoff_t		start_zero_fsb;
 491	xfs_fileoff_t		end_zero_fsb;
 492	xfs_fileoff_t		zero_count_fsb;
 493	xfs_fileoff_t		last_fsb;
 494	xfs_fileoff_t		zero_off;
 495	xfs_fsize_t		zero_len;
 496	int			nimaps;
 497	int			error = 0;
 498	struct xfs_bmbt_irec	imap;
 499
 500	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 501	ASSERT(offset > isize);
 502
 503	trace_xfs_zero_eof(ip, isize, offset - isize);
 
 
 
 
 504
 505	/*
 506	 * First handle zeroing the block on which isize resides.
 507	 *
 508	 * We only zero a part of that block so it is handled specially.
 509	 */
 510	if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
 511		error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
 512		if (error)
 513			return error;
 514	}
 
 515
 516	/*
 517	 * Calculate the range between the new size and the old where blocks
 518	 * needing to be zeroed may exist.
 519	 *
 520	 * To get the block where the last byte in the file currently resides,
 521	 * we need to subtract one from the size and truncate back to a block
 522	 * boundary.  We subtract 1 in case the size is exactly on a block
 523	 * boundary.
 524	 */
 525	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
 526	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
 527	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
 528	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
 529	if (last_fsb == end_zero_fsb) {
 530		/*
 531		 * The size was only incremented on its last block.
 532		 * We took care of that above, so just return.
 533		 */
 534		return 0;
 535	}
 536
 537	ASSERT(start_zero_fsb <= end_zero_fsb);
 538	while (start_zero_fsb <= end_zero_fsb) {
 539		nimaps = 1;
 540		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
 541
 542		xfs_ilock(ip, XFS_ILOCK_EXCL);
 543		error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
 544					  &imap, &nimaps, 0);
 545		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 546		if (error)
 547			return error;
 548
 549		ASSERT(nimaps > 0);
 
 
 
 
 
 550
 551		if (imap.br_state == XFS_EXT_UNWRITTEN ||
 552		    imap.br_startblock == HOLESTARTBLOCK) {
 553			start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 554			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 555			continue;
 556		}
 557
 558		/*
 559		 * There are blocks we need to zero.
 560		 */
 561		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
 562		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
 
 
 
 
 
 
 
 563
 564		if ((zero_off + zero_len) > offset)
 565			zero_len = offset - zero_off;
 566
 567		error = xfs_iozero(ip, zero_off, zero_len);
 568		if (error)
 569			return error;
 570
 571		*did_zeroing = true;
 572		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 573		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 574	}
 575
 576	return 0;
 
 
 
 
 
 577}
 578
 579/*
 580 * Common pre-write limit and setup checks.
 581 *
 582 * Called with the iolocked held either shared and exclusive according to
 583 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 584 * if called for a direct write beyond i_size.
 585 */
 586STATIC ssize_t
 587xfs_file_aio_write_checks(
 588	struct kiocb		*iocb,
 589	struct iov_iter		*from,
 590	int			*iolock)
 591{
 592	struct file		*file = iocb->ki_filp;
 593	struct inode		*inode = file->f_mapping->host;
 594	struct xfs_inode	*ip = XFS_I(inode);
 595	ssize_t			error = 0;
 596	size_t			count = iov_iter_count(from);
 597	bool			drained_dio = false;
 
 598
 599restart:
 600	error = generic_write_checks(iocb, from);
 601	if (error <= 0)
 602		return error;
 603
 604	error = xfs_break_layouts(inode, iolock, true);
 
 
 
 
 
 
 
 605	if (error)
 606		return error;
 607
 608	/* For changing security info in file_remove_privs() we need i_mutex */
 
 
 
 609	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 610		xfs_rw_iunlock(ip, *iolock);
 611		*iolock = XFS_IOLOCK_EXCL;
 612		xfs_rw_ilock(ip, *iolock);
 
 
 
 
 613		goto restart;
 614	}
 
 615	/*
 616	 * If the offset is beyond the size of the file, we need to zero any
 617	 * blocks that fall between the existing EOF and the start of this
 618	 * write.  If zeroing is needed and we are currently holding the
 619	 * iolock shared, we need to update it to exclusive which implies
 620	 * having to redo all checks before.
 
 
 
 
 
 
 
 
 621	 *
 622	 * We need to serialise against EOF updates that occur in IO
 623	 * completions here. We want to make sure that nobody is changing the
 624	 * size while we do this check until we have placed an IO barrier (i.e.
 625	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
 626	 * The spinlock effectively forms a memory barrier once we have the
 627	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
 628	 * and hence be able to correctly determine if we need to run zeroing.
 629	 */
 
 
 
 630	spin_lock(&ip->i_flags_lock);
 631	if (iocb->ki_pos > i_size_read(inode)) {
 632		bool	zero = false;
 
 
 
 
 633
 634		spin_unlock(&ip->i_flags_lock);
 635		if (!drained_dio) {
 636			if (*iolock == XFS_IOLOCK_SHARED) {
 637				xfs_rw_iunlock(ip, *iolock);
 638				*iolock = XFS_IOLOCK_EXCL;
 639				xfs_rw_ilock(ip, *iolock);
 640				iov_iter_reexpand(from, count);
 641			}
 642			/*
 643			 * We now have an IO submission barrier in place, but
 644			 * AIO can do EOF updates during IO completion and hence
 645			 * we now need to wait for all of them to drain. Non-AIO
 646			 * DIO will have drained before we are given the
 647			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 648			 * no-op.
 649			 */
 650			inode_dio_wait(inode);
 651			drained_dio = true;
 652			goto restart;
 653		}
 654		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
 
 
 655		if (error)
 656			return error;
 657	} else
 658		spin_unlock(&ip->i_flags_lock);
 659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 660	/*
 661	 * Updating the timestamps will grab the ilock again from
 662	 * xfs_fs_dirty_inode, so we have to call it after dropping the
 663	 * lock above.  Eventually we should look into a way to avoid
 664	 * the pointless lock roundtrip.
 665	 */
 666	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
 667		error = file_update_time(file);
 
 
 668		if (error)
 669			return error;
 
 
 
 
 
 
 
 
 
 
 
 670	}
 671
 672	/*
 673	 * If we're writing the file then make sure to clear the setuid and
 674	 * setgid bits if the process is not being run by root.  This keeps
 675	 * people from modifying setuid and setgid binaries.
 
 
 
 
 
 
 
 
 
 
 
 
 
 676	 */
 677	if (!IS_NOSEC(inode))
 678		return file_remove_privs(file);
 679	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 680}
 681
 
 
 
 
 682/*
 683 * xfs_file_dio_aio_write - handle direct IO writes
 684 *
 685 * Lock the inode appropriately to prepare for and issue a direct IO write.
 686 * By separating it from the buffered write path we remove all the tricky to
 687 * follow locking changes and looping.
 688 *
 689 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 690 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 691 * pages are flushed out.
 692 *
 693 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 694 * allowing them to be done in parallel with reads and other direct IO writes.
 695 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 696 * needs to do sub-block zeroing and that requires serialisation against other
 697 * direct IOs to the same block. In this case we need to serialise the
 698 * submission of the unaligned IOs so that we don't get racing block zeroing in
 699 * the dio layer.  To avoid the problem with aio, we also need to wait for
 700 * outstanding IOs to complete so that unwritten extent conversion is completed
 701 * before we try to map the overlapping block. This is currently implemented by
 702 * hitting it with a big hammer (i.e. inode_dio_wait()).
 703 *
 704 * Returns with locks held indicated by @iolock and errors indicated by
 705 * negative return values.
 706 */
 707STATIC ssize_t
 708xfs_file_dio_aio_write(
 
 709	struct kiocb		*iocb,
 710	struct iov_iter		*from)
 711{
 712	struct file		*file = iocb->ki_filp;
 713	struct address_space	*mapping = file->f_mapping;
 714	struct inode		*inode = mapping->host;
 715	struct xfs_inode	*ip = XFS_I(inode);
 716	struct xfs_mount	*mp = ip->i_mount;
 717	ssize_t			ret = 0;
 718	int			unaligned_io = 0;
 719	int			iolock;
 720	size_t			count = iov_iter_count(from);
 721	loff_t			pos = iocb->ki_pos;
 722	loff_t			end;
 723	struct iov_iter		data;
 724	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
 725					mp->m_rtdev_targp : mp->m_ddev_targp;
 726
 727	/* DIO must be aligned to device logical sector size */
 728	if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask))
 729		return -EINVAL;
 730
 731	/* "unaligned" here means not aligned to a filesystem block */
 732	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
 733		unaligned_io = 1;
 734
 735	/*
 736	 * We don't need to take an exclusive lock unless there page cache needs
 737	 * to be invalidated or unaligned IO is being executed. We don't need to
 738	 * consider the EOF extension case here because
 739	 * xfs_file_aio_write_checks() will relock the inode as necessary for
 740	 * EOF zeroing cases and fill out the new inode size as appropriate.
 741	 */
 742	if (unaligned_io || mapping->nrpages)
 743		iolock = XFS_IOLOCK_EXCL;
 744	else
 745		iolock = XFS_IOLOCK_SHARED;
 746	xfs_rw_ilock(ip, iolock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747
 748	/*
 749	 * Recheck if there are cached pages that need invalidate after we got
 750	 * the iolock to protect against other threads adding new pages while
 751	 * we were waiting for the iolock.
 752	 */
 753	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
 754		xfs_rw_iunlock(ip, iolock);
 
 
 755		iolock = XFS_IOLOCK_EXCL;
 756		xfs_rw_ilock(ip, iolock);
 757	}
 758
 759	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 760	if (ret)
 761		goto out;
 762	count = iov_iter_count(from);
 763	pos = iocb->ki_pos;
 764	end = pos + count - 1;
 765
 766	/*
 767	 * See xfs_file_read_iter() for why we do a full-file flush here.
 
 768	 */
 769	if (mapping->nrpages) {
 770		ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 771		if (ret)
 772			goto out;
 773		/*
 774		 * Invalidate whole pages. This can return an error if we fail
 775		 * to invalidate a page, but this should never happen on XFS.
 776		 * Warn if it does fail.
 777		 */
 778		ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
 779		WARN_ON_ONCE(ret);
 780		ret = 0;
 781	}
 782
 
 
 
 
 783	/*
 784	 * If we are doing unaligned IO, wait for all other IO to drain,
 785	 * otherwise demote the lock if we had to flush cached pages
 786	 */
 787	if (unaligned_io)
 788		inode_dio_wait(inode);
 789	else if (iolock == XFS_IOLOCK_EXCL) {
 790		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
 791		iolock = XFS_IOLOCK_SHARED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 792	}
 793
 794	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 795
 796	data = *from;
 797	ret = mapping->a_ops->direct_IO(iocb, &data, pos);
 
 
 
 
 
 798
 799	/* see generic_file_direct_write() for why this is necessary */
 800	if (mapping->nrpages) {
 801		invalidate_inode_pages2_range(mapping,
 802					      pos >> PAGE_SHIFT,
 803					      end >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804	}
 
 
 
 
 
 805
 806	if (ret > 0) {
 807		pos += ret;
 808		iov_iter_advance(from, ret);
 809		iocb->ki_pos = pos;
 
 810	}
 811out:
 812	xfs_rw_iunlock(ip, iolock);
 813
 814	/*
 815	 * No fallback to buffered IO on errors for XFS. DAX can result in
 816	 * partial writes, but direct IO will either complete fully or fail.
 817	 */
 818	ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip)));
 819	return ret;
 820}
 821
 822STATIC ssize_t
 823xfs_file_buffered_aio_write(
 824	struct kiocb		*iocb,
 825	struct iov_iter		*from)
 826{
 827	struct file		*file = iocb->ki_filp;
 828	struct address_space	*mapping = file->f_mapping;
 829	struct inode		*inode = mapping->host;
 830	struct xfs_inode	*ip = XFS_I(inode);
 831	ssize_t			ret;
 832	int			enospc = 0;
 833	int			iolock = XFS_IOLOCK_EXCL;
 834
 835	xfs_rw_ilock(ip, iolock);
 
 
 
 
 836
 837	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 838	if (ret)
 839		goto out;
 840
 841	/* We can write back this queue in page reclaim */
 842	current->backing_dev_info = inode_to_bdi(inode);
 843
 844write_retry:
 845	trace_xfs_file_buffered_write(ip, iov_iter_count(from),
 846				      iocb->ki_pos, 0);
 847	ret = generic_perform_write(file, from, iocb->ki_pos);
 848	if (likely(ret >= 0))
 849		iocb->ki_pos += ret;
 850
 851	/*
 852	 * If we hit a space limit, try to free up some lingering preallocated
 853	 * space before returning an error. In the case of ENOSPC, first try to
 854	 * write back all dirty inodes to free up some of the excess reserved
 855	 * metadata space. This reduces the chances that the eofblocks scan
 856	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 857	 * also behaves as a filter to prevent too many eofblocks scans from
 858	 * running at the same time.
 
 859	 */
 860	if (ret == -EDQUOT && !enospc) {
 861		enospc = xfs_inode_free_quota_eofblocks(ip);
 862		if (enospc)
 863			goto write_retry;
 864	} else if (ret == -ENOSPC && !enospc) {
 865		struct xfs_eofblocks eofb = {0};
 
 866
 867		enospc = 1;
 868		xfs_flush_inodes(ip->i_mount);
 869		eofb.eof_scan_owner = ip->i_ino; /* for locking */
 870		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 871		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 
 872		goto write_retry;
 873	}
 874
 875	current->backing_dev_info = NULL;
 876out:
 877	xfs_rw_iunlock(ip, iolock);
 
 
 
 
 
 
 
 878	return ret;
 879}
 880
 881STATIC ssize_t
 882xfs_file_write_iter(
 883	struct kiocb		*iocb,
 884	struct iov_iter		*from)
 885{
 886	struct file		*file = iocb->ki_filp;
 887	struct address_space	*mapping = file->f_mapping;
 888	struct inode		*inode = mapping->host;
 889	struct xfs_inode	*ip = XFS_I(inode);
 890	ssize_t			ret;
 891	size_t			ocount = iov_iter_count(from);
 892
 893	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 894
 895	if (ocount == 0)
 896		return 0;
 897
 898	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 899		return -EIO;
 900
 901	if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode))
 902		ret = xfs_file_dio_aio_write(iocb, from);
 903	else
 904		ret = xfs_file_buffered_aio_write(iocb, from);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905
 906	if (ret > 0) {
 907		ssize_t err;
 
 
 908
 909		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 
 
 
 
 
 910
 911		/* Handle various SYNC-type writes */
 912		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
 913		if (err < 0)
 914			ret = err;
 915	}
 916	return ret;
 917}
 918
 919#define	XFS_FALLOC_FL_SUPPORTED						\
 920		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 921		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 922		 FALLOC_FL_INSERT_RANGE)
 923
 924STATIC long
 925xfs_file_fallocate(
 926	struct file		*file,
 927	int			mode,
 928	loff_t			offset,
 929	loff_t			len)
 930{
 931	struct inode		*inode = file_inode(file);
 932	struct xfs_inode	*ip = XFS_I(inode);
 933	long			error;
 934	enum xfs_prealloc_flags	flags = 0;
 935	uint			iolock = XFS_IOLOCK_EXCL;
 936	loff_t			new_size = 0;
 937	bool			do_file_insert = 0;
 938
 939	if (!S_ISREG(inode->i_mode))
 940		return -EINVAL;
 941	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 942		return -EOPNOTSUPP;
 943
 944	xfs_ilock(ip, iolock);
 945	error = xfs_break_layouts(inode, &iolock, false);
 946	if (error)
 947		goto out_unlock;
 948
 949	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 950	iolock |= XFS_MMAPLOCK_EXCL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951
 952	if (mode & FALLOC_FL_PUNCH_HOLE) {
 953		error = xfs_free_file_space(ip, offset, len);
 954		if (error)
 955			goto out_unlock;
 956	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
 957		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 958
 959		if (offset & blksize_mask || len & blksize_mask) {
 960			error = -EINVAL;
 961			goto out_unlock;
 962		}
 963
 964		/*
 965		 * There is no need to overlap collapse range with EOF,
 966		 * in which case it is effectively a truncate operation
 967		 */
 968		if (offset + len >= i_size_read(inode)) {
 969			error = -EINVAL;
 970			goto out_unlock;
 971		}
 972
 973		new_size = i_size_read(inode) - len;
 974
 975		error = xfs_collapse_file_space(ip, offset, len);
 976		if (error)
 977			goto out_unlock;
 978	} else if (mode & FALLOC_FL_INSERT_RANGE) {
 979		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 980
 981		new_size = i_size_read(inode) + len;
 982		if (offset & blksize_mask || len & blksize_mask) {
 983			error = -EINVAL;
 984			goto out_unlock;
 985		}
 986
 987		/* check the new inode size does not wrap through zero */
 988		if (new_size > inode->i_sb->s_maxbytes) {
 
 
 
 989			error = -EFBIG;
 990			goto out_unlock;
 991		}
 
 992
 993		/* Offset should be less than i_size */
 994		if (offset >= i_size_read(inode)) {
 995			error = -EINVAL;
 996			goto out_unlock;
 997		}
 998		do_file_insert = 1;
 999	} else {
1000		flags |= XFS_PREALLOC_SET;
1001
1002		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1003		    offset + len > i_size_read(inode)) {
1004			new_size = offset + len;
1005			error = inode_newsize_ok(inode, new_size);
1006			if (error)
1007				goto out_unlock;
1008		}
1009
1010		if (mode & FALLOC_FL_ZERO_RANGE)
1011			error = xfs_zero_file_space(ip, offset, len);
1012		else
1013			error = xfs_alloc_file_space(ip, offset, len,
1014						     XFS_BMAPI_PREALLOC);
1015		if (error)
1016			goto out_unlock;
1017	}
 
 
 
 
 
 
 
 
 
1018
1019	if (file->f_flags & O_DSYNC)
1020		flags |= XFS_PREALLOC_SYNC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021
1022	error = xfs_update_prealloc_flags(ip, flags);
1023	if (error)
1024		goto out_unlock;
 
 
 
1025
1026	/* Change file size if needed */
1027	if (new_size) {
1028		struct iattr iattr;
1029
1030		iattr.ia_valid = ATTR_SIZE;
1031		iattr.ia_size = new_size;
1032		error = xfs_setattr_size(ip, &iattr);
 
1033		if (error)
1034			goto out_unlock;
1035	}
1036
1037	/*
1038	 * Perform hole insertion now that the file size has been
1039	 * updated so that if we crash during the operation we don't
1040	 * leave shifted extents past EOF and hence losing access to
1041	 * the data that is contained within them.
1042	 */
1043	if (do_file_insert)
1044		error = xfs_insert_file_space(ip, offset, len);
 
 
 
 
 
 
1045
1046out_unlock:
1047	xfs_iunlock(ip, iolock);
1048	return error;
1049}
1050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051
1052STATIC int
1053xfs_file_open(
1054	struct inode	*inode,
1055	struct file	*file)
1056{
1057	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1058		return -EFBIG;
1059	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1060		return -EIO;
1061	return 0;
 
 
1062}
1063
1064STATIC int
1065xfs_dir_open(
1066	struct inode	*inode,
1067	struct file	*file)
1068{
1069	struct xfs_inode *ip = XFS_I(inode);
1070	int		mode;
1071	int		error;
1072
1073	error = xfs_file_open(inode, file);
1074	if (error)
1075		return error;
1076
1077	/*
1078	 * If there are any blocks, read-ahead block 0 as we're almost
1079	 * certain to have the next operation be a read there.
1080	 */
1081	mode = xfs_ilock_data_map_shared(ip);
1082	if (ip->i_d.di_nextents > 0)
1083		xfs_dir3_data_readahead(ip, 0, -1);
1084	xfs_iunlock(ip, mode);
1085	return 0;
1086}
1087
1088STATIC int
1089xfs_file_release(
1090	struct inode	*inode,
1091	struct file	*filp)
1092{
1093	return xfs_release(XFS_I(inode));
1094}
1095
1096STATIC int
1097xfs_file_readdir(
1098	struct file	*file,
1099	struct dir_context *ctx)
1100{
1101	struct inode	*inode = file_inode(file);
1102	xfs_inode_t	*ip = XFS_I(inode);
1103	size_t		bufsize;
1104
1105	/*
1106	 * The Linux API doesn't pass down the total size of the buffer
1107	 * we read into down to the filesystem.  With the filldir concept
1108	 * it's not needed for correct information, but the XFS dir2 leaf
1109	 * code wants an estimate of the buffer size to calculate it's
1110	 * readahead window and size the buffers used for mapping to
1111	 * physical blocks.
1112	 *
1113	 * Try to give it an estimate that's good enough, maybe at some
1114	 * point we can change the ->readdir prototype to include the
1115	 * buffer size.  For now we use the current glibc buffer size.
1116	 */
1117	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1118
1119	return xfs_readdir(ip, ctx, bufsize);
1120}
1121
1122/*
1123 * This type is designed to indicate the type of offset we would like
1124 * to search from page cache for xfs_seek_hole_data().
1125 */
1126enum {
1127	HOLE_OFF = 0,
1128	DATA_OFF,
1129};
1130
1131/*
1132 * Lookup the desired type of offset from the given page.
1133 *
1134 * On success, return true and the offset argument will point to the
1135 * start of the region that was found.  Otherwise this function will
1136 * return false and keep the offset argument unchanged.
1137 */
1138STATIC bool
1139xfs_lookup_buffer_offset(
1140	struct page		*page,
1141	loff_t			*offset,
1142	unsigned int		type)
1143{
1144	loff_t			lastoff = page_offset(page);
1145	bool			found = false;
1146	struct buffer_head	*bh, *head;
1147
1148	bh = head = page_buffers(page);
1149	do {
1150		/*
1151		 * Unwritten extents that have data in the page
1152		 * cache covering them can be identified by the
1153		 * BH_Unwritten state flag.  Pages with multiple
1154		 * buffers might have a mix of holes, data and
1155		 * unwritten extents - any buffer with valid
1156		 * data in it should have BH_Uptodate flag set
1157		 * on it.
1158		 */
1159		if (buffer_unwritten(bh) ||
1160		    buffer_uptodate(bh)) {
1161			if (type == DATA_OFF)
1162				found = true;
1163		} else {
1164			if (type == HOLE_OFF)
1165				found = true;
1166		}
1167
1168		if (found) {
1169			*offset = lastoff;
1170			break;
1171		}
1172		lastoff += bh->b_size;
1173	} while ((bh = bh->b_this_page) != head);
1174
1175	return found;
1176}
1177
1178/*
1179 * This routine is called to find out and return a data or hole offset
1180 * from the page cache for unwritten extents according to the desired
1181 * type for xfs_seek_hole_data().
1182 *
1183 * The argument offset is used to tell where we start to search from the
1184 * page cache.  Map is used to figure out the end points of the range to
1185 * lookup pages.
1186 *
1187 * Return true if the desired type of offset was found, and the argument
1188 * offset is filled with that address.  Otherwise, return false and keep
1189 * offset unchanged.
1190 */
1191STATIC bool
1192xfs_find_get_desired_pgoff(
1193	struct inode		*inode,
1194	struct xfs_bmbt_irec	*map,
1195	unsigned int		type,
1196	loff_t			*offset)
1197{
1198	struct xfs_inode	*ip = XFS_I(inode);
1199	struct xfs_mount	*mp = ip->i_mount;
1200	struct pagevec		pvec;
1201	pgoff_t			index;
1202	pgoff_t			end;
1203	loff_t			endoff;
1204	loff_t			startoff = *offset;
1205	loff_t			lastoff = startoff;
1206	bool			found = false;
1207
1208	pagevec_init(&pvec, 0);
1209
1210	index = startoff >> PAGE_SHIFT;
1211	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1212	end = endoff >> PAGE_SHIFT;
1213	do {
1214		int		want;
1215		unsigned	nr_pages;
1216		unsigned int	i;
1217
1218		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1219		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1220					  want);
1221		/*
1222		 * No page mapped into given range.  If we are searching holes
1223		 * and if this is the first time we got into the loop, it means
1224		 * that the given offset is landed in a hole, return it.
1225		 *
1226		 * If we have already stepped through some block buffers to find
1227		 * holes but they all contains data.  In this case, the last
1228		 * offset is already updated and pointed to the end of the last
1229		 * mapped page, if it does not reach the endpoint to search,
1230		 * that means there should be a hole between them.
1231		 */
1232		if (nr_pages == 0) {
1233			/* Data search found nothing */
1234			if (type == DATA_OFF)
1235				break;
1236
1237			ASSERT(type == HOLE_OFF);
1238			if (lastoff == startoff || lastoff < endoff) {
1239				found = true;
1240				*offset = lastoff;
1241			}
1242			break;
1243		}
1244
1245		/*
1246		 * At lease we found one page.  If this is the first time we
1247		 * step into the loop, and if the first page index offset is
1248		 * greater than the given search offset, a hole was found.
1249		 */
1250		if (type == HOLE_OFF && lastoff == startoff &&
1251		    lastoff < page_offset(pvec.pages[0])) {
1252			found = true;
1253			break;
1254		}
1255
1256		for (i = 0; i < nr_pages; i++) {
1257			struct page	*page = pvec.pages[i];
1258			loff_t		b_offset;
1259
1260			/*
1261			 * At this point, the page may be truncated or
1262			 * invalidated (changing page->mapping to NULL),
1263			 * or even swizzled back from swapper_space to tmpfs
1264			 * file mapping. However, page->index will not change
1265			 * because we have a reference on the page.
1266			 *
1267			 * Searching done if the page index is out of range.
1268			 * If the current offset is not reaches the end of
1269			 * the specified search range, there should be a hole
1270			 * between them.
1271			 */
1272			if (page->index > end) {
1273				if (type == HOLE_OFF && lastoff < endoff) {
1274					*offset = lastoff;
1275					found = true;
1276				}
1277				goto out;
1278			}
1279
1280			lock_page(page);
1281			/*
1282			 * Page truncated or invalidated(page->mapping == NULL).
1283			 * We can freely skip it and proceed to check the next
1284			 * page.
1285			 */
1286			if (unlikely(page->mapping != inode->i_mapping)) {
1287				unlock_page(page);
1288				continue;
1289			}
1290
1291			if (!page_has_buffers(page)) {
1292				unlock_page(page);
1293				continue;
1294			}
1295
1296			found = xfs_lookup_buffer_offset(page, &b_offset, type);
1297			if (found) {
1298				/*
1299				 * The found offset may be less than the start
1300				 * point to search if this is the first time to
1301				 * come here.
1302				 */
1303				*offset = max_t(loff_t, startoff, b_offset);
1304				unlock_page(page);
1305				goto out;
1306			}
1307
1308			/*
1309			 * We either searching data but nothing was found, or
1310			 * searching hole but found a data buffer.  In either
1311			 * case, probably the next page contains the desired
1312			 * things, update the last offset to it so.
1313			 */
1314			lastoff = page_offset(page) + PAGE_SIZE;
1315			unlock_page(page);
1316		}
1317
1318		/*
1319		 * The number of returned pages less than our desired, search
1320		 * done.  In this case, nothing was found for searching data,
1321		 * but we found a hole behind the last offset.
1322		 */
1323		if (nr_pages < want) {
1324			if (type == HOLE_OFF) {
1325				*offset = lastoff;
1326				found = true;
1327			}
1328			break;
1329		}
1330
1331		index = pvec.pages[i - 1]->index + 1;
1332		pagevec_release(&pvec);
1333	} while (index <= end);
1334
1335out:
1336	pagevec_release(&pvec);
1337	return found;
1338}
1339
1340/*
1341 * caller must lock inode with xfs_ilock_data_map_shared,
1342 * can we craft an appropriate ASSERT?
1343 *
1344 * end is because the VFS-level lseek interface is defined such that any
1345 * offset past i_size shall return -ENXIO, but we use this for quota code
1346 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1347 */
1348loff_t
1349__xfs_seek_hole_data(
1350	struct inode		*inode,
1351	loff_t			start,
1352	loff_t			end,
1353	int			whence)
1354{
1355	struct xfs_inode	*ip = XFS_I(inode);
1356	struct xfs_mount	*mp = ip->i_mount;
1357	loff_t			uninitialized_var(offset);
1358	xfs_fileoff_t		fsbno;
1359	xfs_filblks_t		lastbno;
1360	int			error;
1361
1362	if (start >= end) {
1363		error = -ENXIO;
1364		goto out_error;
1365	}
1366
1367	/*
1368	 * Try to read extents from the first block indicated
1369	 * by fsbno to the end block of the file.
1370	 */
1371	fsbno = XFS_B_TO_FSBT(mp, start);
1372	lastbno = XFS_B_TO_FSB(mp, end);
1373
1374	for (;;) {
1375		struct xfs_bmbt_irec	map[2];
1376		int			nmap = 2;
1377		unsigned int		i;
1378
1379		error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
1380				       XFS_BMAPI_ENTIRE);
1381		if (error)
1382			goto out_error;
1383
1384		/* No extents at given offset, must be beyond EOF */
1385		if (nmap == 0) {
1386			error = -ENXIO;
1387			goto out_error;
1388		}
1389
1390		for (i = 0; i < nmap; i++) {
1391			offset = max_t(loff_t, start,
1392				       XFS_FSB_TO_B(mp, map[i].br_startoff));
1393
1394			/* Landed in the hole we wanted? */
1395			if (whence == SEEK_HOLE &&
1396			    map[i].br_startblock == HOLESTARTBLOCK)
1397				goto out;
1398
1399			/* Landed in the data extent we wanted? */
1400			if (whence == SEEK_DATA &&
1401			    (map[i].br_startblock == DELAYSTARTBLOCK ||
1402			     (map[i].br_state == XFS_EXT_NORM &&
1403			      !isnullstartblock(map[i].br_startblock))))
1404				goto out;
1405
1406			/*
1407			 * Landed in an unwritten extent, try to search
1408			 * for hole or data from page cache.
1409			 */
1410			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1411				if (xfs_find_get_desired_pgoff(inode, &map[i],
1412				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1413							&offset))
1414					goto out;
1415			}
1416		}
1417
1418		/*
1419		 * We only received one extent out of the two requested. This
1420		 * means we've hit EOF and didn't find what we are looking for.
1421		 */
1422		if (nmap == 1) {
1423			/*
1424			 * If we were looking for a hole, set offset to
1425			 * the end of the file (i.e., there is an implicit
1426			 * hole at the end of any file).
1427		 	 */
1428			if (whence == SEEK_HOLE) {
1429				offset = end;
1430				break;
1431			}
1432			/*
1433			 * If we were looking for data, it's nowhere to be found
1434			 */
1435			ASSERT(whence == SEEK_DATA);
1436			error = -ENXIO;
1437			goto out_error;
1438		}
1439
1440		ASSERT(i > 1);
1441
1442		/*
1443		 * Nothing was found, proceed to the next round of search
1444		 * if the next reading offset is not at or beyond EOF.
1445		 */
1446		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1447		start = XFS_FSB_TO_B(mp, fsbno);
1448		if (start >= end) {
1449			if (whence == SEEK_HOLE) {
1450				offset = end;
1451				break;
1452			}
1453			ASSERT(whence == SEEK_DATA);
1454			error = -ENXIO;
1455			goto out_error;
1456		}
1457	}
1458
1459out:
1460	/*
1461	 * If at this point we have found the hole we wanted, the returned
1462	 * offset may be bigger than the file size as it may be aligned to
1463	 * page boundary for unwritten extents.  We need to deal with this
1464	 * situation in particular.
1465	 */
1466	if (whence == SEEK_HOLE)
1467		offset = min_t(loff_t, offset, end);
1468
1469	return offset;
1470
1471out_error:
1472	return error;
1473}
1474
1475STATIC loff_t
1476xfs_seek_hole_data(
1477	struct file		*file,
1478	loff_t			start,
1479	int			whence)
1480{
1481	struct inode		*inode = file->f_mapping->host;
1482	struct xfs_inode	*ip = XFS_I(inode);
1483	struct xfs_mount	*mp = ip->i_mount;
1484	uint			lock;
1485	loff_t			offset, end;
1486	int			error = 0;
1487
1488	if (XFS_FORCED_SHUTDOWN(mp))
1489		return -EIO;
1490
1491	lock = xfs_ilock_data_map_shared(ip);
1492
1493	end = i_size_read(inode);
1494	offset = __xfs_seek_hole_data(inode, start, end, whence);
1495	if (offset < 0) {
1496		error = offset;
1497		goto out_unlock;
1498	}
1499
1500	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1501
1502out_unlock:
1503	xfs_iunlock(ip, lock);
1504
1505	if (error)
1506		return error;
1507	return offset;
1508}
1509
1510STATIC loff_t
1511xfs_file_llseek(
1512	struct file	*file,
1513	loff_t		offset,
1514	int		whence)
1515{
1516	switch (whence) {
1517	case SEEK_END:
1518	case SEEK_CUR:
1519	case SEEK_SET:
1520		return generic_file_llseek(file, offset, whence);
1521	case SEEK_HOLE:
 
 
1522	case SEEK_DATA:
1523		return xfs_seek_hole_data(file, offset, whence);
1524	default:
1525		return -EINVAL;
1526	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1527}
 
1528
1529/*
1530 * Locking for serialisation of IO during page faults. This results in a lock
1531 * ordering of:
1532 *
1533 * mmap_sem (MM)
1534 *   sb_start_pagefault(vfs, freeze)
1535 *     i_mmaplock (XFS - truncate serialisation)
1536 *       page_lock (MM)
1537 *         i_lock (XFS - extent map serialisation)
1538 */
 
 
 
 
 
 
 
 
 
 
1539
1540/*
1541 * mmap()d file has taken write protection fault and is being made writable. We
1542 * can set the page state up correctly for a writable page, which means we can
1543 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1544 * mapping.
1545 */
1546STATIC int
1547xfs_filemap_page_mkwrite(
1548	struct vm_area_struct	*vma,
1549	struct vm_fault		*vmf)
1550{
1551	struct inode		*inode = file_inode(vma->vm_file);
1552	int			ret;
1553
1554	trace_xfs_filemap_page_mkwrite(XFS_I(inode));
 
 
 
1555
1556	sb_start_pagefault(inode->i_sb);
1557	file_update_time(vma->vm_file);
1558	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1559
1560	if (IS_DAX(inode)) {
1561		ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
 
 
 
 
 
 
1562	} else {
1563		ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
1564		ret = block_page_mkwrite_return(ret);
1565	}
1566
1567	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1568	sb_end_pagefault(inode->i_sb);
1569
 
 
1570	return ret;
1571}
1572
1573STATIC int
 
 
 
 
 
 
 
 
1574xfs_filemap_fault(
1575	struct vm_area_struct	*vma,
1576	struct vm_fault		*vmf)
1577{
1578	struct inode		*inode = file_inode(vma->vm_file);
1579	int			ret;
1580
1581	trace_xfs_filemap_fault(XFS_I(inode));
1582
1583	/* DAX can shortcut the normal fault path on write faults! */
1584	if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1585		return xfs_filemap_page_mkwrite(vma, vmf);
1586
1587	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1588	if (IS_DAX(inode)) {
1589		/*
1590		 * we do not want to trigger unwritten extent conversion on read
1591		 * faults - that is unnecessary overhead and would also require
1592		 * changes to xfs_get_blocks_direct() to map unwritten extent
1593		 * ioend for conversion on read-only mappings.
1594		 */
1595		ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
1596	} else
1597		ret = filemap_fault(vma, vmf);
1598	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1599
1600	return ret;
1601}
1602
1603/*
1604 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1605 * both read and write faults. Hence we need to handle both cases. There is no
1606 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1607 * handle both cases here. @flags carries the information on the type of fault
1608 * occuring.
1609 */
1610STATIC int
1611xfs_filemap_pmd_fault(
1612	struct vm_area_struct	*vma,
1613	unsigned long		addr,
1614	pmd_t			*pmd,
1615	unsigned int		flags)
1616{
1617	struct inode		*inode = file_inode(vma->vm_file);
1618	struct xfs_inode	*ip = XFS_I(inode);
1619	int			ret;
1620
1621	if (!IS_DAX(inode))
1622		return VM_FAULT_FALLBACK;
1623
1624	trace_xfs_filemap_pmd_fault(ip);
 
 
 
1625
1626	if (flags & FAULT_FLAG_WRITE) {
1627		sb_start_pagefault(inode->i_sb);
1628		file_update_time(vma->vm_file);
1629	}
1630
1631	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1632	ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
1633			      NULL);
1634	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1635
1636	if (flags & FAULT_FLAG_WRITE)
1637		sb_end_pagefault(inode->i_sb);
1638
1639	return ret;
1640}
1641
1642/*
1643 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1644 * updates on write faults. In reality, it's need to serialise against
1645 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1646 * to ensure we serialise the fault barrier in place.
1647 */
1648static int
1649xfs_filemap_pfn_mkwrite(
1650	struct vm_area_struct	*vma,
1651	struct vm_fault		*vmf)
1652{
1653
1654	struct inode		*inode = file_inode(vma->vm_file);
1655	struct xfs_inode	*ip = XFS_I(inode);
1656	int			ret = VM_FAULT_NOPAGE;
1657	loff_t			size;
1658
1659	trace_xfs_filemap_pfn_mkwrite(ip);
1660
1661	sb_start_pagefault(inode->i_sb);
1662	file_update_time(vma->vm_file);
1663
1664	/* check if the faulting page hasn't raced with truncate */
1665	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1666	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1667	if (vmf->pgoff >= size)
1668		ret = VM_FAULT_SIGBUS;
1669	else if (IS_DAX(inode))
1670		ret = dax_pfn_mkwrite(vma, vmf);
1671	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1672	sb_end_pagefault(inode->i_sb);
1673	return ret;
1674
1675}
1676
1677static const struct vm_operations_struct xfs_file_vm_ops = {
1678	.fault		= xfs_filemap_fault,
1679	.pmd_fault	= xfs_filemap_pmd_fault,
1680	.map_pages	= filemap_map_pages,
1681	.page_mkwrite	= xfs_filemap_page_mkwrite,
1682	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1683};
1684
1685STATIC int
1686xfs_file_mmap(
1687	struct file	*filp,
1688	struct vm_area_struct *vma)
1689{
1690	file_accessed(filp);
 
 
 
 
 
 
 
 
 
 
1691	vma->vm_ops = &xfs_file_vm_ops;
1692	if (IS_DAX(file_inode(filp)))
1693		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1694	return 0;
1695}
1696
1697const struct file_operations xfs_file_operations = {
1698	.llseek		= xfs_file_llseek,
1699	.read_iter	= xfs_file_read_iter,
1700	.write_iter	= xfs_file_write_iter,
1701	.splice_read	= xfs_file_splice_read,
1702	.splice_write	= iter_file_splice_write,
 
1703	.unlocked_ioctl	= xfs_file_ioctl,
1704#ifdef CONFIG_COMPAT
1705	.compat_ioctl	= xfs_file_compat_ioctl,
1706#endif
1707	.mmap		= xfs_file_mmap,
 
1708	.open		= xfs_file_open,
1709	.release	= xfs_file_release,
1710	.fsync		= xfs_file_fsync,
 
1711	.fallocate	= xfs_file_fallocate,
 
 
1712};
1713
1714const struct file_operations xfs_dir_file_operations = {
1715	.open		= xfs_dir_open,
1716	.read		= generic_read_dir,
1717	.iterate	= xfs_file_readdir,
1718	.llseek		= generic_file_llseek,
1719	.unlocked_ioctl	= xfs_file_ioctl,
1720#ifdef CONFIG_COMPAT
1721	.compat_ioctl	= xfs_file_compat_ioctl,
1722#endif
1723	.fsync		= xfs_dir_fsync,
1724};