Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
 
 
  13#include "xfs_inode.h"
  14#include "xfs_trans.h"
  15#include "xfs_inode_item.h"
  16#include "xfs_bmap.h"
  17#include "xfs_bmap_util.h"
 
  18#include "xfs_dir2.h"
  19#include "xfs_dir2_priv.h"
  20#include "xfs_ioctl.h"
  21#include "xfs_trace.h"
  22#include "xfs_log.h"
  23#include "xfs_icache.h"
  24#include "xfs_pnfs.h"
  25#include "xfs_iomap.h"
  26#include "xfs_reflink.h"
  27
  28#include <linux/dax.h>
  29#include <linux/falloc.h>
 
  30#include <linux/backing-dev.h>
  31#include <linux/mman.h>
  32#include <linux/fadvise.h>
  33#include <linux/mount.h>
  34
  35static const struct vm_operations_struct xfs_file_vm_ops;
  36
  37/*
  38 * Decide if the given file range is aligned to the size of the fundamental
  39 * allocation unit for the file.
  40 */
  41static bool
  42xfs_is_falloc_aligned(
  43	struct xfs_inode	*ip,
  44	loff_t			pos,
  45	long long int		len)
 
  46{
  47	struct xfs_mount	*mp = ip->i_mount;
  48	uint64_t		mask;
  49
  50	if (XFS_IS_REALTIME_INODE(ip)) {
  51		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
  52			u64	rextbytes;
  53			u32	mod;
  54
  55			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
  56			div_u64_rem(pos, rextbytes, &mod);
  57			if (mod)
  58				return false;
  59			div_u64_rem(len, rextbytes, &mod);
  60			return mod == 0;
  61		}
  62		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
  63	} else {
  64		mask = mp->m_sb.sb_blocksize - 1;
  65	}
  66
  67	return !((pos | len) & mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68}
  69
  70/*
  71 * Fsync operations on directories are much simpler than on regular files,
  72 * as there is no file data to flush, and thus also no need for explicit
  73 * cache flush operations, and there are no non-transaction metadata updates
  74 * on directories either.
  75 */
  76STATIC int
  77xfs_dir_fsync(
  78	struct file		*file,
  79	loff_t			start,
  80	loff_t			end,
  81	int			datasync)
  82{
  83	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 
  84
  85	trace_xfs_dir_fsync(ip);
  86	return xfs_log_force_inode(ip);
  87}
  88
  89static xfs_csn_t
  90xfs_fsync_seq(
  91	struct xfs_inode	*ip,
  92	bool			datasync)
  93{
  94	if (!xfs_ipincount(ip))
  95		return 0;
  96	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  97		return 0;
  98	return ip->i_itemp->ili_commit_seq;
  99}
 100
 101/*
 102 * All metadata updates are logged, which means that we just have to flush the
 103 * log up to the latest LSN that touched the inode.
 104 *
 105 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
 106 * the log force before we clear the ili_fsync_fields field. This ensures that
 107 * we don't get a racing sync operation that does not wait for the metadata to
 108 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
 109 * then all that will happen is the log force will do nothing as the lsn will
 110 * already be on disk.  We can't race with setting ili_fsync_fields because that
 111 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
 112 * shared until after the ili_fsync_fields is cleared.
 113 */
 114static  int
 115xfs_fsync_flush_log(
 116	struct xfs_inode	*ip,
 117	bool			datasync,
 118	int			*log_flushed)
 119{
 120	int			error = 0;
 121	xfs_csn_t		seq;
 122
 123	xfs_ilock(ip, XFS_ILOCK_SHARED);
 124	seq = xfs_fsync_seq(ip, datasync);
 125	if (seq) {
 126		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
 127					  log_flushed);
 128
 129		spin_lock(&ip->i_itemp->ili_lock);
 130		ip->i_itemp->ili_fsync_fields = 0;
 131		spin_unlock(&ip->i_itemp->ili_lock);
 132	}
 133	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 134	return error;
 
 
 
 135}
 136
 137STATIC int
 138xfs_file_fsync(
 139	struct file		*file,
 140	loff_t			start,
 141	loff_t			end,
 142	int			datasync)
 143{
 144	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 
 145	struct xfs_mount	*mp = ip->i_mount;
 146	int			error, err2;
 147	int			log_flushed = 0;
 
 148
 149	trace_xfs_file_fsync(ip);
 150
 151	error = file_write_and_wait_range(file, start, end);
 152	if (error)
 153		return error;
 154
 155	if (xfs_is_shutdown(mp))
 156		return -EIO;
 157
 158	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 159
 160	/*
 161	 * If we have an RT and/or log subvolume we need to make sure to flush
 162	 * the write cache the device used for file data first.  This is to
 163	 * ensure newly written file data make it to disk before logging the new
 164	 * inode size in case of an extending write.
 165	 */
 166	if (XFS_IS_REALTIME_INODE(ip))
 167		error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
 168	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 169		error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 170
 171	/*
 172	 * Any inode that has dirty modifications in the log is pinned.  The
 173	 * racy check here for a pinned inode will not catch modifications
 174	 * that happen concurrently to the fsync call, but fsync semantics
 175	 * only require to sync previously completed I/O.
 
 
 
 
 
 
 
 176	 */
 
 177	if (xfs_ipincount(ip)) {
 178		err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
 179		if (err2 && !error)
 180			error = err2;
 181	}
 182
 
 
 
 
 
 
 183	/*
 184	 * If we only have a single device, and the log force about was
 185	 * a no-op we might have to flush the data device cache here.
 186	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 187	 * an already allocated file and thus do not have any metadata to
 188	 * commit.
 189	 */
 190	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 191	    mp->m_logdev_targp == mp->m_ddev_targp) {
 192		err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 193		if (err2 && !error)
 194			error = err2;
 195	}
 196
 197	return error;
 198}
 199
 200static int
 201xfs_ilock_iocb(
 202	struct kiocb		*iocb,
 203	unsigned int		lock_mode)
 204{
 205	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 206
 207	if (iocb->ki_flags & IOCB_NOWAIT) {
 208		if (!xfs_ilock_nowait(ip, lock_mode))
 209			return -EAGAIN;
 210	} else {
 211		xfs_ilock(ip, lock_mode);
 212	}
 213
 214	return 0;
 215}
 216
 217static int
 218xfs_ilock_iocb_for_write(
 219	struct kiocb		*iocb,
 220	unsigned int		*lock_mode)
 221{
 222	ssize_t			ret;
 223	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 224
 225	ret = xfs_ilock_iocb(iocb, *lock_mode);
 226	if (ret)
 227		return ret;
 228
 229	if (*lock_mode == XFS_IOLOCK_EXCL)
 230		return 0;
 231	if (!xfs_iflags_test(ip, XFS_IREMAPPING))
 232		return 0;
 233
 234	xfs_iunlock(ip, *lock_mode);
 235	*lock_mode = XFS_IOLOCK_EXCL;
 236	return xfs_ilock_iocb(iocb, *lock_mode);
 237}
 238
 239static unsigned int
 240xfs_ilock_for_write_fault(
 241	struct xfs_inode	*ip)
 242{
 243	/* get a shared lock if no remapping in progress */
 244	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
 245	if (!xfs_iflags_test(ip, XFS_IREMAPPING))
 246		return XFS_MMAPLOCK_SHARED;
 247
 248	/* wait for remapping to complete */
 249	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
 250	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 251	return XFS_MMAPLOCK_EXCL;
 252}
 253
 254STATIC ssize_t
 255xfs_file_dio_read(
 256	struct kiocb		*iocb,
 257	struct iov_iter		*to)
 258{
 259	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 
 260	ssize_t			ret;
 261
 262	trace_xfs_file_direct_read(iocb, to);
 263
 264	if (!iov_iter_count(to))
 265		return 0; /* skip atime */
 266
 267	file_accessed(iocb->ki_filp);
 268
 269	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 270	if (ret)
 271		return ret;
 272	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
 273	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 274
 275	return ret;
 276}
 277
 278static noinline ssize_t
 279xfs_file_dax_read(
 280	struct kiocb		*iocb,
 281	struct iov_iter		*to)
 282{
 283	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 
 284	ssize_t			ret = 0;
 285
 286	trace_xfs_file_dax_read(iocb, to);
 287
 288	if (!iov_iter_count(to))
 289		return 0; /* skip atime */
 290
 291	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 292	if (ret)
 293		return ret;
 294	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
 295	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 296
 297	file_accessed(iocb->ki_filp);
 298	return ret;
 299}
 300
 301STATIC ssize_t
 302xfs_file_buffered_read(
 303	struct kiocb		*iocb,
 304	struct iov_iter		*to)
 305{
 306	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 307	ssize_t			ret;
 308
 309	trace_xfs_file_buffered_read(iocb, to);
 310
 311	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
 312	if (ret)
 313		return ret;
 314	ret = generic_file_read_iter(iocb, to);
 315	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 316
 317	return ret;
 318}
 319
 320STATIC ssize_t
 321xfs_file_read_iter(
 322	struct kiocb		*iocb,
 323	struct iov_iter		*to)
 324{
 325	struct inode		*inode = file_inode(iocb->ki_filp);
 326	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 327	ssize_t			ret = 0;
 328
 329	XFS_STATS_INC(mp, xs_read_calls);
 330
 331	if (xfs_is_shutdown(mp))
 332		return -EIO;
 333
 334	if (IS_DAX(inode))
 335		ret = xfs_file_dax_read(iocb, to);
 336	else if (iocb->ki_flags & IOCB_DIRECT)
 337		ret = xfs_file_dio_read(iocb, to);
 338	else
 339		ret = xfs_file_buffered_read(iocb, to);
 340
 341	if (ret > 0)
 342		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 343	return ret;
 344}
 345
 346STATIC ssize_t
 347xfs_file_splice_read(
 348	struct file		*in,
 349	loff_t			*ppos,
 350	struct pipe_inode_info	*pipe,
 351	size_t			len,
 352	unsigned int		flags)
 
 
 
 
 
 
 
 
 
 
 353{
 354	struct inode		*inode = file_inode(in);
 355	struct xfs_inode	*ip = XFS_I(inode);
 356	struct xfs_mount	*mp = ip->i_mount;
 357	ssize_t			ret = 0;
 358
 359	XFS_STATS_INC(mp, xs_read_calls);
 360
 361	if (xfs_is_shutdown(mp))
 362		return -EIO;
 363
 364	trace_xfs_file_splice_read(ip, *ppos, len);
 365
 366	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 367	ret = filemap_splice_read(in, ppos, pipe, len, flags);
 368	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 369	if (ret > 0)
 370		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 371	return ret;
 372}
 373
 374/*
 375 * Common pre-write limit and setup checks.
 376 *
 377 * Called with the iolocked held either shared and exclusive according to
 378 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 379 * if called for a direct write beyond i_size.
 380 */
 381STATIC ssize_t
 382xfs_file_write_checks(
 383	struct kiocb		*iocb,
 384	struct iov_iter		*from,
 385	unsigned int		*iolock)
 386{
 387	struct file		*file = iocb->ki_filp;
 388	struct inode		*inode = file->f_mapping->host;
 389	struct xfs_inode	*ip = XFS_I(inode);
 390	ssize_t			error = 0;
 391	size_t			count = iov_iter_count(from);
 392	bool			drained_dio = false;
 393	loff_t			isize;
 394
 395restart:
 396	error = generic_write_checks(iocb, from);
 397	if (error <= 0)
 398		return error;
 399
 400	if (iocb->ki_flags & IOCB_NOWAIT) {
 401		error = break_layout(inode, false);
 402		if (error == -EWOULDBLOCK)
 403			error = -EAGAIN;
 404	} else {
 405		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
 406	}
 407
 408	if (error)
 409		return error;
 410
 411	/*
 412	 * For changing security info in file_remove_privs() we need i_rwsem
 413	 * exclusively.
 414	 */
 415	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 416		xfs_iunlock(ip, *iolock);
 417		*iolock = XFS_IOLOCK_EXCL;
 418		error = xfs_ilock_iocb(iocb, *iolock);
 419		if (error) {
 420			*iolock = 0;
 421			return error;
 422		}
 423		goto restart;
 424	}
 425
 426	/*
 427	 * If the offset is beyond the size of the file, we need to zero any
 428	 * blocks that fall between the existing EOF and the start of this
 429	 * write.  If zeroing is needed and we are currently holding the iolock
 430	 * shared, we need to update it to exclusive which implies having to
 431	 * redo all checks before.
 432	 *
 433	 * We need to serialise against EOF updates that occur in IO completions
 434	 * here. We want to make sure that nobody is changing the size while we
 435	 * do this check until we have placed an IO barrier (i.e.  hold the
 436	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
 437	 * spinlock effectively forms a memory barrier once we have the
 438	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
 439	 * hence be able to correctly determine if we need to run zeroing.
 440	 *
 441	 * We can do an unlocked check here safely as IO completion can only
 442	 * extend EOF. Truncate is locked out at this point, so the EOF can
 443	 * not move backwards, only forwards. Hence we only need to take the
 444	 * slow path and spin locks when we are at or beyond the current EOF.
 
 
 
 445	 */
 446	if (iocb->ki_pos <= i_size_read(inode))
 447		goto out;
 448
 449	spin_lock(&ip->i_flags_lock);
 450	isize = i_size_read(inode);
 451	if (iocb->ki_pos > isize) {
 452		spin_unlock(&ip->i_flags_lock);
 453
 454		if (iocb->ki_flags & IOCB_NOWAIT)
 455			return -EAGAIN;
 456
 
 457		if (!drained_dio) {
 458			if (*iolock == XFS_IOLOCK_SHARED) {
 459				xfs_iunlock(ip, *iolock);
 460				*iolock = XFS_IOLOCK_EXCL;
 461				xfs_ilock(ip, *iolock);
 462				iov_iter_reexpand(from, count);
 463			}
 464			/*
 465			 * We now have an IO submission barrier in place, but
 466			 * AIO can do EOF updates during IO completion and hence
 467			 * we now need to wait for all of them to drain. Non-AIO
 468			 * DIO will have drained before we are given the
 469			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 470			 * no-op.
 471			 */
 472			inode_dio_wait(inode);
 473			drained_dio = true;
 474			goto restart;
 475		}
 476
 477		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
 478		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
 479		if (error)
 480			return error;
 481	} else
 482		spin_unlock(&ip->i_flags_lock);
 483
 484out:
 485	return kiocb_modified(iocb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 486}
 487
 488static int
 489xfs_dio_write_end_io(
 490	struct kiocb		*iocb,
 491	ssize_t			size,
 492	int			error,
 493	unsigned		flags)
 494{
 495	struct inode		*inode = file_inode(iocb->ki_filp);
 496	struct xfs_inode	*ip = XFS_I(inode);
 497	loff_t			offset = iocb->ki_pos;
 498	unsigned int		nofs_flag;
 
 499
 500	trace_xfs_end_io_direct_write(ip, offset, size);
 501
 502	if (xfs_is_shutdown(ip->i_mount))
 503		return -EIO;
 504
 505	if (error)
 506		return error;
 507	if (!size)
 508		return 0;
 509
 510	/*
 511	 * Capture amount written on completion as we can't reliably account
 512	 * for it on submission.
 513	 */
 514	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
 515
 516	/*
 517	 * We can allocate memory here while doing writeback on behalf of
 518	 * memory reclaim.  To avoid memory allocation deadlocks set the
 519	 * task-wide nofs context for the following operations.
 520	 */
 521	nofs_flag = memalloc_nofs_save();
 522
 523	if (flags & IOMAP_DIO_COW) {
 524		error = xfs_reflink_end_cow(ip, offset, size);
 525		if (error)
 526			goto out;
 527	}
 528
 529	/*
 530	 * Unwritten conversion updates the in-core isize after extent
 531	 * conversion but before updating the on-disk size. Updating isize any
 532	 * earlier allows a racing dio read to find unwritten extents before
 533	 * they are converted.
 534	 */
 535	if (flags & IOMAP_DIO_UNWRITTEN) {
 536		error = xfs_iomap_write_unwritten(ip, offset, size, true);
 537		goto out;
 538	}
 539
 540	/*
 541	 * We need to update the in-core inode size here so that we don't end up
 542	 * with the on-disk inode size being outside the in-core inode size. We
 543	 * have no other method of updating EOF for AIO, so always do it here
 544	 * if necessary.
 545	 *
 546	 * We need to lock the test/set EOF update as we can be racing with
 547	 * other IO completions here to update the EOF. Failing to serialise
 548	 * here can result in EOF moving backwards and Bad Things Happen when
 549	 * that occurs.
 550	 *
 551	 * As IO completion only ever extends EOF, we can do an unlocked check
 552	 * here to avoid taking the spinlock. If we land within the current EOF,
 553	 * then we do not need to do an extending update at all, and we don't
 554	 * need to take the lock to check this. If we race with an update moving
 555	 * EOF, then we'll either still be beyond EOF and need to take the lock,
 556	 * or we'll be within EOF and we don't need to take it at all.
 557	 */
 558	if (offset + size <= i_size_read(inode))
 559		goto out;
 560
 561	spin_lock(&ip->i_flags_lock);
 562	if (offset + size > i_size_read(inode)) {
 563		i_size_write(inode, offset + size);
 564		spin_unlock(&ip->i_flags_lock);
 565		error = xfs_setfilesize(ip, offset, size);
 566	} else {
 567		spin_unlock(&ip->i_flags_lock);
 568	}
 
 569
 570out:
 571	memalloc_nofs_restore(nofs_flag);
 572	return error;
 573}
 574
 575static const struct iomap_dio_ops xfs_dio_write_ops = {
 576	.end_io		= xfs_dio_write_end_io,
 577};
 578
 579/*
 580 * Handle block aligned direct I/O writes
 581 */
 582static noinline ssize_t
 583xfs_file_dio_write_aligned(
 584	struct xfs_inode	*ip,
 585	struct kiocb		*iocb,
 586	struct iov_iter		*from)
 587{
 588	unsigned int		iolock = XFS_IOLOCK_SHARED;
 589	ssize_t			ret;
 590
 591	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 592	if (ret)
 593		return ret;
 594	ret = xfs_file_write_checks(iocb, from, &iolock);
 595	if (ret)
 596		goto out_unlock;
 597
 598	/*
 599	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
 600	 * the iolock back to shared if we had to take the exclusive lock in
 601	 * xfs_file_write_checks() for other reasons.
 602	 */
 603	if (iolock == XFS_IOLOCK_EXCL) {
 604		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 605		iolock = XFS_IOLOCK_SHARED;
 606	}
 607	trace_xfs_file_direct_write(iocb, from);
 608	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 609			   &xfs_dio_write_ops, 0, NULL, 0);
 610out_unlock:
 611	if (iolock)
 612		xfs_iunlock(ip, iolock);
 613	return ret;
 614}
 615
 616/*
 617 * Handle block unaligned direct I/O writes
 618 *
 619 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 620 * them to be done in parallel with reads and other direct I/O writes.  However,
 621 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 622 * to do sub-block zeroing and that requires serialisation against other direct
 623 * I/O to the same block.  In this case we need to serialise the submission of
 624 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
 625 * In the case where sub-block zeroing is not required, we can do concurrent
 626 * sub-block dios to the same block successfully.
 627 *
 628 * Optimistically submit the I/O using the shared lock first, but use the
 629 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 630 * if block allocation or partial block zeroing would be required.  In that case
 631 * we try again with the exclusive lock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 632 */
 633static noinline ssize_t
 634xfs_file_dio_write_unaligned(
 635	struct xfs_inode	*ip,
 636	struct kiocb		*iocb,
 637	struct iov_iter		*from)
 638{
 639	size_t			isize = i_size_read(VFS_I(ip));
 
 
 
 
 
 
 
 640	size_t			count = iov_iter_count(from);
 641	unsigned int		iolock = XFS_IOLOCK_SHARED;
 642	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
 643	ssize_t			ret;
 
 
 
 644
 645	/*
 646	 * Extending writes need exclusivity because of the sub-block zeroing
 647	 * that the DIO code always does for partial tail blocks beyond EOF, so
 648	 * don't even bother trying the fast path in this case.
 649	 */
 650	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
 651		if (iocb->ki_flags & IOCB_NOWAIT)
 652			return -EAGAIN;
 653retry_exclusive:
 
 
 
 
 
 
 
 
 
 
 654		iolock = XFS_IOLOCK_EXCL;
 655		flags = IOMAP_DIO_FORCE_WAIT;
 
 656	}
 657
 658	ret = xfs_ilock_iocb_for_write(iocb, &iolock);
 
 
 659	if (ret)
 660		return ret;
 
 661
 662	/*
 663	 * We can't properly handle unaligned direct I/O to reflink files yet,
 664	 * as we can't unshare a partial block.
 
 665	 */
 666	if (xfs_is_cow_inode(ip)) {
 667		trace_xfs_reflink_bounce_dio_write(iocb, from);
 668		ret = -ENOTBLK;
 669		goto out_unlock;
 
 670	}
 671
 672	ret = xfs_file_write_checks(iocb, from, &iolock);
 673	if (ret)
 674		goto out_unlock;
 675
 676	/*
 677	 * If we are doing exclusive unaligned I/O, this must be the only I/O
 678	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
 679	 * conversions from the AIO end_io handler.  Wait for all other I/O to
 680	 * drain first.
 681	 */
 682	if (flags & IOMAP_DIO_FORCE_WAIT)
 683		inode_dio_wait(VFS_I(ip));
 684
 685	trace_xfs_file_direct_write(iocb, from);
 686	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
 687			   &xfs_dio_write_ops, flags, NULL, 0);
 688
 689	/*
 690	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
 691	 * layer rejected it for mapping or locking reasons. If we are doing
 692	 * nonblocking user I/O, propagate the error.
 693	 */
 694	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
 695		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
 696		xfs_iunlock(ip, iolock);
 697		goto retry_exclusive;
 698	}
 699
 700out_unlock:
 701	if (iolock)
 702		xfs_iunlock(ip, iolock);
 703	return ret;
 704}
 705
 706static ssize_t
 707xfs_file_dio_write(
 708	struct kiocb		*iocb,
 709	struct iov_iter		*from)
 710{
 711	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 712	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
 713	size_t			count = iov_iter_count(from);
 714
 715	/* direct I/O must be aligned to device logical sector size */
 716	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 717		return -EINVAL;
 718	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
 719		return xfs_file_dio_write_unaligned(ip, iocb, from);
 720	return xfs_file_dio_write_aligned(ip, iocb, from);
 721}
 722
 723static noinline ssize_t
 724xfs_file_dax_write(
 725	struct kiocb		*iocb,
 726	struct iov_iter		*from)
 727{
 728	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 729	struct xfs_inode	*ip = XFS_I(inode);
 730	unsigned int		iolock = XFS_IOLOCK_EXCL;
 731	ssize_t			ret, error = 0;
 
 732	loff_t			pos;
 733
 734	ret = xfs_ilock_iocb(iocb, iolock);
 735	if (ret)
 736		return ret;
 737	ret = xfs_file_write_checks(iocb, from, &iolock);
 738	if (ret)
 739		goto out;
 740
 741	pos = iocb->ki_pos;
 
 742
 743	trace_xfs_file_dax_write(iocb, from);
 744	ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
 745	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 746		i_size_write(inode, iocb->ki_pos);
 747		error = xfs_setfilesize(ip, pos, ret);
 748	}
 749out:
 750	if (iolock)
 751		xfs_iunlock(ip, iolock);
 752	if (error)
 753		return error;
 754
 755	if (ret > 0) {
 756		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 757
 758		/* Handle various SYNC-type writes */
 759		ret = generic_write_sync(iocb, ret);
 760	}
 761	return ret;
 762}
 763
 764STATIC ssize_t
 765xfs_file_buffered_write(
 766	struct kiocb		*iocb,
 767	struct iov_iter		*from)
 768{
 769	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 770	struct xfs_inode	*ip = XFS_I(inode);
 771	ssize_t			ret;
 772	bool			cleared_space = false;
 773	unsigned int		iolock;
 774
 775write_retry:
 776	iolock = XFS_IOLOCK_EXCL;
 777	ret = xfs_ilock_iocb(iocb, iolock);
 778	if (ret)
 779		return ret;
 780
 781	ret = xfs_file_write_checks(iocb, from, &iolock);
 782	if (ret)
 783		goto out;
 784
 785	trace_xfs_file_buffered_write(iocb, from);
 786	ret = iomap_file_buffered_write(iocb, from,
 787			&xfs_buffered_write_iomap_ops);
 
 
 
 
 788
 789	/*
 790	 * If we hit a space limit, try to free up some lingering preallocated
 791	 * space before returning an error. In the case of ENOSPC, first try to
 792	 * write back all dirty inodes to free up some of the excess reserved
 793	 * metadata space. This reduces the chances that the eofblocks scan
 794	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 795	 * also behaves as a filter to prevent too many eofblocks scans from
 796	 * running at the same time.  Use a synchronous scan to increase the
 797	 * effectiveness of the scan.
 798	 */
 799	if (ret == -EDQUOT && !cleared_space) {
 800		xfs_iunlock(ip, iolock);
 801		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
 802		cleared_space = true;
 803		goto write_retry;
 804	} else if (ret == -ENOSPC && !cleared_space) {
 805		struct xfs_icwalk	icw = {0};
 
 
 
 
 806
 807		cleared_space = true;
 808		xfs_flush_inodes(ip->i_mount);
 809
 810		xfs_iunlock(ip, iolock);
 811		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
 812		xfs_blockgc_free_space(ip->i_mount, &icw);
 813		goto write_retry;
 814	}
 815
 
 816out:
 817	if (iolock)
 818		xfs_iunlock(ip, iolock);
 819
 820	if (ret > 0) {
 821		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 822		/* Handle various SYNC-type writes */
 823		ret = generic_write_sync(iocb, ret);
 824	}
 825	return ret;
 826}
 827
 828STATIC ssize_t
 829xfs_file_write_iter(
 830	struct kiocb		*iocb,
 831	struct iov_iter		*from)
 832{
 833	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 
 
 834	struct xfs_inode	*ip = XFS_I(inode);
 835	ssize_t			ret;
 836	size_t			ocount = iov_iter_count(from);
 837
 838	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 839
 840	if (ocount == 0)
 841		return 0;
 842
 843	if (xfs_is_shutdown(ip->i_mount))
 844		return -EIO;
 845
 846	if (IS_DAX(inode))
 847		return xfs_file_dax_write(iocb, from);
 848
 849	if (iocb->ki_flags & IOCB_DIRECT) {
 850		/*
 851		 * Allow a directio write to fall back to a buffered
 852		 * write *only* in the case that we're doing a reflink
 853		 * CoW.  In all other directio scenarios we do not
 854		 * allow an operation to fall back to buffered mode.
 855		 */
 856		ret = xfs_file_dio_write(iocb, from);
 857		if (ret != -ENOTBLK)
 858			return ret;
 
 
 
 859	}
 860
 861	return xfs_file_buffered_write(iocb, from);
 862}
 863
 864static void
 865xfs_wait_dax_page(
 866	struct inode		*inode)
 867{
 868	struct xfs_inode        *ip = XFS_I(inode);
 869
 870	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
 871	schedule();
 872	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 873}
 874
 875int
 876xfs_break_dax_layouts(
 877	struct inode		*inode,
 878	bool			*retry)
 879{
 880	struct page		*page;
 881
 882	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
 883
 884	page = dax_layout_busy_page(inode->i_mapping);
 885	if (!page)
 886		return 0;
 887
 888	*retry = true;
 889	return ___wait_var_event(&page->_refcount,
 890			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
 891			0, 0, xfs_wait_dax_page(inode));
 892}
 893
 894int
 895xfs_break_layouts(
 896	struct inode		*inode,
 897	uint			*iolock,
 898	enum layout_break_reason reason)
 899{
 900	bool			retry;
 901	int			error;
 902
 903	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
 904
 905	do {
 906		retry = false;
 907		switch (reason) {
 908		case BREAK_UNMAP:
 909			error = xfs_break_dax_layouts(inode, &retry);
 910			if (error || retry)
 911				break;
 912			fallthrough;
 913		case BREAK_WRITE:
 914			error = xfs_break_leased_layouts(inode, iolock, &retry);
 915			break;
 916		default:
 917			WARN_ON_ONCE(1);
 918			error = -EINVAL;
 919		}
 920	} while (error == 0 && retry);
 921
 922	return error;
 923}
 924
 925/* Does this file, inode, or mount want synchronous writes? */
 926static inline bool xfs_file_sync_writes(struct file *filp)
 927{
 928	struct xfs_inode	*ip = XFS_I(file_inode(filp));
 929
 930	if (xfs_has_wsync(ip->i_mount))
 931		return true;
 932	if (filp->f_flags & (__O_SYNC | O_DSYNC))
 933		return true;
 934	if (IS_SYNC(file_inode(filp)))
 935		return true;
 936
 937	return false;
 
 
 
 938}
 939
 940#define	XFS_FALLOC_FL_SUPPORTED						\
 941		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 942		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 943		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 944
 945STATIC long
 946xfs_file_fallocate(
 947	struct file		*file,
 948	int			mode,
 949	loff_t			offset,
 950	loff_t			len)
 951{
 952	struct inode		*inode = file_inode(file);
 953	struct xfs_inode	*ip = XFS_I(inode);
 954	long			error;
 955	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
 
 956	loff_t			new_size = 0;
 957	bool			do_file_insert = false;
 958
 959	if (!S_ISREG(inode->i_mode))
 960		return -EINVAL;
 961	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 962		return -EOPNOTSUPP;
 963
 964	xfs_ilock(ip, iolock);
 965	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
 966	if (error)
 967		goto out_unlock;
 968
 969	/*
 970	 * Must wait for all AIO to complete before we continue as AIO can
 971	 * change the file size on completion without holding any locks we
 972	 * currently hold. We must do this first because AIO can update both
 973	 * the on disk and in memory inode sizes, and the operations that follow
 974	 * require the in-memory size to be fully up-to-date.
 975	 */
 976	inode_dio_wait(inode);
 977
 978	/*
 979	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
 980	 * the cached range over the first operation we are about to run.
 981	 *
 982	 * We care about zero and collapse here because they both run a hole
 983	 * punch over the range first. Because that can zero data, and the range
 984	 * of invalidation for the shift operations is much larger, we still do
 985	 * the required flush for collapse in xfs_prepare_shift().
 986	 *
 987	 * Insert has the same range requirements as collapse, and we extend the
 988	 * file first which can zero data. Hence insert has the same
 989	 * flush/invalidate requirements as collapse and so they are both
 990	 * handled at the right time by xfs_prepare_shift().
 991	 */
 992	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
 993		    FALLOC_FL_COLLAPSE_RANGE)) {
 994		error = xfs_flush_unmap_range(ip, offset, len);
 995		if (error)
 996			goto out_unlock;
 997	}
 998
 999	error = file_modified(file);
1000	if (error)
1001		goto out_unlock;
1002
1003	if (mode & FALLOC_FL_PUNCH_HOLE) {
1004		error = xfs_free_file_space(ip, offset, len);
1005		if (error)
1006			goto out_unlock;
1007	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1008		if (!xfs_is_falloc_aligned(ip, offset, len)) {
 
 
1009			error = -EINVAL;
1010			goto out_unlock;
1011		}
1012
1013		/*
1014		 * There is no need to overlap collapse range with EOF,
1015		 * in which case it is effectively a truncate operation
1016		 */
1017		if (offset + len >= i_size_read(inode)) {
1018			error = -EINVAL;
1019			goto out_unlock;
1020		}
1021
1022		new_size = i_size_read(inode) - len;
1023
1024		error = xfs_collapse_file_space(ip, offset, len);
1025		if (error)
1026			goto out_unlock;
1027	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1028		loff_t		isize = i_size_read(inode);
1029
1030		if (!xfs_is_falloc_aligned(ip, offset, len)) {
 
1031			error = -EINVAL;
1032			goto out_unlock;
1033		}
1034
1035		/*
1036		 * New inode size must not exceed ->s_maxbytes, accounting for
1037		 * possible signed overflow.
1038		 */
1039		if (inode->i_sb->s_maxbytes - isize < len) {
1040			error = -EFBIG;
1041			goto out_unlock;
1042		}
1043		new_size = isize + len;
1044
1045		/* Offset should be less than i_size */
1046		if (offset >= isize) {
1047			error = -EINVAL;
1048			goto out_unlock;
1049		}
1050		do_file_insert = true;
1051	} else {
 
 
1052		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1053		    offset + len > i_size_read(inode)) {
1054			new_size = offset + len;
1055			error = inode_newsize_ok(inode, new_size);
1056			if (error)
1057				goto out_unlock;
1058		}
1059
1060		if (mode & FALLOC_FL_ZERO_RANGE) {
1061			/*
1062			 * Punch a hole and prealloc the range.  We use a hole
1063			 * punch rather than unwritten extent conversion for two
1064			 * reasons:
1065			 *
1066			 *   1.) Hole punch handles partial block zeroing for us.
1067			 *   2.) If prealloc returns ENOSPC, the file range is
1068			 *       still zero-valued by virtue of the hole punch.
1069			 */
1070			unsigned int blksize = i_blocksize(inode);
1071
1072			trace_xfs_zero_file_space(ip);
1073
1074			error = xfs_free_file_space(ip, offset, len);
1075			if (error)
1076				goto out_unlock;
1077
1078			len = round_up(offset + len, blksize) -
1079			      round_down(offset, blksize);
1080			offset = round_down(offset, blksize);
1081		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1082			error = xfs_reflink_unshare(ip, offset, len);
1083			if (error)
1084				goto out_unlock;
1085		} else {
1086			/*
1087			 * If always_cow mode we can't use preallocations and
1088			 * thus should not create them.
1089			 */
1090			if (xfs_is_always_cow_inode(ip)) {
1091				error = -EOPNOTSUPP;
1092				goto out_unlock;
1093			}
 
 
1094		}
1095
1096		if (!xfs_is_always_cow_inode(ip)) {
1097			error = xfs_alloc_file_space(ip, offset, len);
1098			if (error)
1099				goto out_unlock;
1100		}
1101	}
1102
 
 
 
 
 
 
 
1103	/* Change file size if needed */
1104	if (new_size) {
1105		struct iattr iattr;
1106
1107		iattr.ia_valid = ATTR_SIZE;
1108		iattr.ia_size = new_size;
1109		error = xfs_vn_setattr_size(file_mnt_idmap(file),
1110					    file_dentry(file), &iattr);
1111		if (error)
1112			goto out_unlock;
1113	}
1114
1115	/*
1116	 * Perform hole insertion now that the file size has been
1117	 * updated so that if we crash during the operation we don't
1118	 * leave shifted extents past EOF and hence losing access to
1119	 * the data that is contained within them.
1120	 */
1121	if (do_file_insert) {
1122		error = xfs_insert_file_space(ip, offset, len);
1123		if (error)
1124			goto out_unlock;
1125	}
1126
1127	if (xfs_file_sync_writes(file))
1128		error = xfs_log_force_inode(ip);
1129
1130out_unlock:
1131	xfs_iunlock(ip, iolock);
1132	return error;
1133}
1134
1135STATIC int
1136xfs_file_fadvise(
1137	struct file	*file,
1138	loff_t		start,
1139	loff_t		end,
1140	int		advice)
1141{
1142	struct xfs_inode *ip = XFS_I(file_inode(file));
1143	int ret;
1144	int lockflags = 0;
1145
1146	/*
1147	 * Operations creating pages in page cache need protection from hole
1148	 * punching and similar ops
1149	 */
1150	if (advice == POSIX_FADV_WILLNEED) {
1151		lockflags = XFS_IOLOCK_SHARED;
1152		xfs_ilock(ip, lockflags);
1153	}
1154	ret = generic_fadvise(file, start, end, advice);
1155	if (lockflags)
1156		xfs_iunlock(ip, lockflags);
1157	return ret;
1158}
1159
1160STATIC loff_t
1161xfs_file_remap_range(
1162	struct file		*file_in,
1163	loff_t			pos_in,
1164	struct file		*file_out,
1165	loff_t			pos_out,
1166	loff_t			len,
1167	unsigned int		remap_flags)
1168{
1169	struct inode		*inode_in = file_inode(file_in);
1170	struct xfs_inode	*src = XFS_I(inode_in);
1171	struct inode		*inode_out = file_inode(file_out);
1172	struct xfs_inode	*dest = XFS_I(inode_out);
1173	struct xfs_mount	*mp = src->i_mount;
1174	loff_t			remapped = 0;
1175	xfs_extlen_t		cowextsize;
1176	int			ret;
1177
1178	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1179		return -EINVAL;
1180
1181	if (!xfs_has_reflink(mp))
1182		return -EOPNOTSUPP;
1183
1184	if (xfs_is_shutdown(mp))
1185		return -EIO;
1186
1187	/* Prepare and then clone file data. */
1188	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1189			&len, remap_flags);
1190	if (ret || len == 0)
1191		return ret;
1192
1193	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1194
1195	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1196			&remapped);
1197	if (ret)
1198		goto out_unlock;
1199
1200	/*
1201	 * Carry the cowextsize hint from src to dest if we're sharing the
1202	 * entire source file to the entire destination file, the source file
1203	 * has a cowextsize hint, and the destination file does not.
1204	 */
1205	cowextsize = 0;
1206	if (pos_in == 0 && len == i_size_read(inode_in) &&
1207	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1208	    pos_out == 0 && len >= i_size_read(inode_out) &&
1209	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1210		cowextsize = src->i_cowextsize;
1211
1212	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1213			remap_flags);
1214	if (ret)
1215		goto out_unlock;
1216
1217	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1218		xfs_log_force_inode(dest);
1219out_unlock:
1220	xfs_iunlock2_remapping(src, dest);
1221	if (ret)
1222		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1223	return remapped > 0 ? remapped : ret;
1224}
1225
1226STATIC int
1227xfs_file_open(
1228	struct inode	*inode,
1229	struct file	*file)
1230{
1231	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
 
 
1232		return -EIO;
1233	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
1234			FMODE_DIO_PARALLEL_WRITE | FMODE_CAN_ODIRECT;
1235	return generic_file_open(inode, file);
1236}
1237
1238STATIC int
1239xfs_dir_open(
1240	struct inode	*inode,
1241	struct file	*file)
1242{
1243	struct xfs_inode *ip = XFS_I(inode);
1244	unsigned int	mode;
1245	int		error;
1246
1247	error = xfs_file_open(inode, file);
1248	if (error)
1249		return error;
1250
1251	/*
1252	 * If there are any blocks, read-ahead block 0 as we're almost
1253	 * certain to have the next operation be a read there.
1254	 */
1255	mode = xfs_ilock_data_map_shared(ip);
1256	if (ip->i_df.if_nextents > 0)
1257		error = xfs_dir3_data_readahead(ip, 0, 0);
1258	xfs_iunlock(ip, mode);
1259	return error;
1260}
1261
1262STATIC int
1263xfs_file_release(
1264	struct inode	*inode,
1265	struct file	*filp)
1266{
1267	return xfs_release(XFS_I(inode));
1268}
1269
1270STATIC int
1271xfs_file_readdir(
1272	struct file	*file,
1273	struct dir_context *ctx)
1274{
1275	struct inode	*inode = file_inode(file);
1276	xfs_inode_t	*ip = XFS_I(inode);
1277	size_t		bufsize;
1278
1279	/*
1280	 * The Linux API doesn't pass down the total size of the buffer
1281	 * we read into down to the filesystem.  With the filldir concept
1282	 * it's not needed for correct information, but the XFS dir2 leaf
1283	 * code wants an estimate of the buffer size to calculate it's
1284	 * readahead window and size the buffers used for mapping to
1285	 * physical blocks.
1286	 *
1287	 * Try to give it an estimate that's good enough, maybe at some
1288	 * point we can change the ->readdir prototype to include the
1289	 * buffer size.  For now we use the current glibc buffer size.
1290	 */
1291	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292
1293	return xfs_readdir(NULL, ip, ctx, bufsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1294}
1295
1296STATIC loff_t
1297xfs_file_llseek(
1298	struct file	*file,
1299	loff_t		offset,
1300	int		whence)
1301{
1302	struct inode		*inode = file->f_mapping->host;
 
 
 
 
 
1303
1304	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1305		return -EIO;
1306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307	switch (whence) {
1308	default:
 
 
1309		return generic_file_llseek(file, offset, whence);
1310	case SEEK_HOLE:
1311		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1312		break;
1313	case SEEK_DATA:
1314		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1315		break;
 
1316	}
1317
1318	if (offset < 0)
1319		return offset;
1320	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1321}
1322
1323#ifdef CONFIG_FS_DAX
1324static inline vm_fault_t
1325xfs_dax_fault(
1326	struct vm_fault		*vmf,
1327	unsigned int		order,
1328	bool			write_fault,
1329	pfn_t			*pfn)
1330{
1331	return dax_iomap_fault(vmf, order, pfn, NULL,
1332			(write_fault && !vmf->cow_page) ?
1333				&xfs_dax_write_iomap_ops :
1334				&xfs_read_iomap_ops);
1335}
1336#else
1337static inline vm_fault_t
1338xfs_dax_fault(
1339	struct vm_fault		*vmf,
1340	unsigned int		order,
1341	bool			write_fault,
1342	pfn_t			*pfn)
1343{
1344	ASSERT(0);
1345	return VM_FAULT_SIGBUS;
1346}
1347#endif
1348
1349/*
1350 * Locking for serialisation of IO during page faults. This results in a lock
1351 * ordering of:
1352 *
1353 * mmap_lock (MM)
1354 *   sb_start_pagefault(vfs, freeze)
1355 *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1356 *       page_lock (MM)
1357 *         i_lock (XFS - extent map serialisation)
1358 */
1359static vm_fault_t
1360__xfs_filemap_fault(
1361	struct vm_fault		*vmf,
1362	unsigned int		order,
1363	bool			write_fault)
1364{
1365	struct inode		*inode = file_inode(vmf->vma->vm_file);
1366	struct xfs_inode	*ip = XFS_I(inode);
1367	vm_fault_t		ret;
1368	unsigned int		lock_mode = 0;
1369
1370	trace_xfs_filemap_fault(ip, order, write_fault);
 
 
 
 
 
 
 
 
 
 
 
 
1371
1372	if (write_fault) {
1373		sb_start_pagefault(inode->i_sb);
1374		file_update_time(vmf->vma->vm_file);
1375	}
1376
1377	if (IS_DAX(inode) || write_fault)
1378		lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
 
1379
1380	if (IS_DAX(inode)) {
1381		pfn_t pfn;
1382
1383		ret = xfs_dax_fault(vmf, order, write_fault, &pfn);
1384		if (ret & VM_FAULT_NEEDDSYNC)
1385			ret = dax_finish_sync_fault(vmf, order, pfn);
1386	} else if (write_fault) {
1387		ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
1388	} else {
1389		ret = filemap_fault(vmf);
 
1390	}
1391
1392	if (lock_mode)
1393		xfs_iunlock(XFS_I(inode), lock_mode);
1394
1395	if (write_fault)
1396		sb_end_pagefault(inode->i_sb);
1397	return ret;
1398}
1399
1400static inline bool
1401xfs_is_write_fault(
1402	struct vm_fault		*vmf)
1403{
1404	return (vmf->flags & FAULT_FLAG_WRITE) &&
1405	       (vmf->vma->vm_flags & VM_SHARED);
1406}
1407
1408static vm_fault_t
1409xfs_filemap_fault(
 
1410	struct vm_fault		*vmf)
1411{
 
 
 
 
 
1412	/* DAX can shortcut the normal fault path on write faults! */
1413	return __xfs_filemap_fault(vmf, 0,
1414			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1415			xfs_is_write_fault(vmf));
 
 
 
 
 
 
 
 
1416}
1417
1418static vm_fault_t
1419xfs_filemap_huge_fault(
1420	struct vm_fault		*vmf,
1421	unsigned int		order)
 
 
 
 
 
 
 
 
 
1422{
1423	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
 
 
 
 
1424		return VM_FAULT_FALLBACK;
1425
1426	/* DAX can shortcut the normal fault path on write faults! */
1427	return __xfs_filemap_fault(vmf, order,
1428			xfs_is_write_fault(vmf));
1429}
1430
1431static vm_fault_t
1432xfs_filemap_page_mkwrite(
1433	struct vm_fault		*vmf)
1434{
1435	return __xfs_filemap_fault(vmf, 0, true);
 
 
 
 
 
 
 
 
1436}
1437
1438/*
1439 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1440 * on write faults. In reality, it needs to serialise against truncate and
1441 * prepare memory for writing so handle is as standard write fault.
 
1442 */
1443static vm_fault_t
1444xfs_filemap_pfn_mkwrite(
 
1445	struct vm_fault		*vmf)
1446{
1447
1448	return __xfs_filemap_fault(vmf, 0, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449}
1450
1451static const struct vm_operations_struct xfs_file_vm_ops = {
1452	.fault		= xfs_filemap_fault,
1453	.huge_fault	= xfs_filemap_huge_fault,
1454	.map_pages	= filemap_map_pages,
1455	.page_mkwrite	= xfs_filemap_page_mkwrite,
1456	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1457};
1458
1459STATIC int
1460xfs_file_mmap(
1461	struct file		*file,
1462	struct vm_area_struct	*vma)
1463{
1464	struct inode		*inode = file_inode(file);
1465	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1466
1467	/*
1468	 * We don't support synchronous mappings for non-DAX files and
1469	 * for DAX files if underneath dax_device is not synchronous.
1470	 */
1471	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1472		return -EOPNOTSUPP;
1473
1474	file_accessed(file);
1475	vma->vm_ops = &xfs_file_vm_ops;
1476	if (IS_DAX(inode))
1477		vm_flags_set(vma, VM_HUGEPAGE);
1478	return 0;
1479}
1480
1481const struct file_operations xfs_file_operations = {
1482	.llseek		= xfs_file_llseek,
1483	.read_iter	= xfs_file_read_iter,
1484	.write_iter	= xfs_file_write_iter,
1485	.splice_read	= xfs_file_splice_read,
1486	.splice_write	= iter_file_splice_write,
1487	.iopoll		= iocb_bio_iopoll,
1488	.unlocked_ioctl	= xfs_file_ioctl,
1489#ifdef CONFIG_COMPAT
1490	.compat_ioctl	= xfs_file_compat_ioctl,
1491#endif
1492	.mmap		= xfs_file_mmap,
1493	.mmap_supported_flags = MAP_SYNC,
1494	.open		= xfs_file_open,
1495	.release	= xfs_file_release,
1496	.fsync		= xfs_file_fsync,
1497	.get_unmapped_area = thp_get_unmapped_area,
1498	.fallocate	= xfs_file_fallocate,
1499	.fadvise	= xfs_file_fadvise,
1500	.remap_file_range = xfs_file_remap_range,
1501};
1502
1503const struct file_operations xfs_dir_file_operations = {
1504	.open		= xfs_dir_open,
1505	.read		= generic_read_dir,
1506	.iterate_shared	= xfs_file_readdir,
1507	.llseek		= generic_file_llseek,
1508	.unlocked_ioctl	= xfs_file_ioctl,
1509#ifdef CONFIG_COMPAT
1510	.compat_ioctl	= xfs_file_compat_ioctl,
1511#endif
1512	.fsync		= xfs_dir_fsync,
1513};
v4.10.11
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_da_format.h"
  26#include "xfs_da_btree.h"
  27#include "xfs_inode.h"
  28#include "xfs_trans.h"
  29#include "xfs_inode_item.h"
  30#include "xfs_bmap.h"
  31#include "xfs_bmap_util.h"
  32#include "xfs_error.h"
  33#include "xfs_dir2.h"
  34#include "xfs_dir2_priv.h"
  35#include "xfs_ioctl.h"
  36#include "xfs_trace.h"
  37#include "xfs_log.h"
  38#include "xfs_icache.h"
  39#include "xfs_pnfs.h"
  40#include "xfs_iomap.h"
  41#include "xfs_reflink.h"
  42
  43#include <linux/dcache.h>
  44#include <linux/falloc.h>
  45#include <linux/pagevec.h>
  46#include <linux/backing-dev.h>
 
 
 
  47
  48static const struct vm_operations_struct xfs_file_vm_ops;
  49
  50/*
  51 * Clear the specified ranges to zero through either the pagecache or DAX.
  52 * Holes and unwritten extents will be left as-is as they already are zeroed.
  53 */
  54int
  55xfs_zero_range(
  56	struct xfs_inode	*ip,
  57	xfs_off_t		pos,
  58	xfs_off_t		count,
  59	bool			*did_zero)
  60{
  61	return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
  62}
  63
  64int
  65xfs_update_prealloc_flags(
  66	struct xfs_inode	*ip,
  67	enum xfs_prealloc_flags	flags)
  68{
  69	struct xfs_trans	*tp;
  70	int			error;
 
 
 
 
 
 
 
 
 
  71
  72	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
  73			0, 0, 0, &tp);
  74	if (error)
  75		return error;
  76
  77	xfs_ilock(ip, XFS_ILOCK_EXCL);
  78	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  79
  80	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
  81		VFS_I(ip)->i_mode &= ~S_ISUID;
  82		if (VFS_I(ip)->i_mode & S_IXGRP)
  83			VFS_I(ip)->i_mode &= ~S_ISGID;
  84		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  85	}
  86
  87	if (flags & XFS_PREALLOC_SET)
  88		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
  89	if (flags & XFS_PREALLOC_CLEAR)
  90		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
  91
  92	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  93	if (flags & XFS_PREALLOC_SYNC)
  94		xfs_trans_set_sync(tp);
  95	return xfs_trans_commit(tp);
  96}
  97
  98/*
  99 * Fsync operations on directories are much simpler than on regular files,
 100 * as there is no file data to flush, and thus also no need for explicit
 101 * cache flush operations, and there are no non-transaction metadata updates
 102 * on directories either.
 103 */
 104STATIC int
 105xfs_dir_fsync(
 106	struct file		*file,
 107	loff_t			start,
 108	loff_t			end,
 109	int			datasync)
 110{
 111	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
 112	struct xfs_mount	*mp = ip->i_mount;
 113	xfs_lsn_t		lsn = 0;
 114
 115	trace_xfs_dir_fsync(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116
 117	xfs_ilock(ip, XFS_ILOCK_SHARED);
 118	if (xfs_ipincount(ip))
 119		lsn = ip->i_itemp->ili_last_lsn;
 
 
 
 
 
 
 
 120	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 121
 122	if (!lsn)
 123		return 0;
 124	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 125}
 126
 127STATIC int
 128xfs_file_fsync(
 129	struct file		*file,
 130	loff_t			start,
 131	loff_t			end,
 132	int			datasync)
 133{
 134	struct inode		*inode = file->f_mapping->host;
 135	struct xfs_inode	*ip = XFS_I(inode);
 136	struct xfs_mount	*mp = ip->i_mount;
 137	int			error = 0;
 138	int			log_flushed = 0;
 139	xfs_lsn_t		lsn = 0;
 140
 141	trace_xfs_file_fsync(ip);
 142
 143	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
 144	if (error)
 145		return error;
 146
 147	if (XFS_FORCED_SHUTDOWN(mp))
 148		return -EIO;
 149
 150	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 151
 152	/*
 153	 * If we have an RT and/or log subvolume we need to make sure to flush
 154	 * the write cache the device used for file data first.  This is to
 155	 * ensure newly written file data make it to disk before logging the new
 156	 * inode size in case of an extending write.
 157	 */
 158	if (XFS_IS_REALTIME_INODE(ip))
 159		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
 160	else if (mp->m_logdev_targp != mp->m_ddev_targp)
 161		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 162
 163	/*
 164	 * All metadata updates are logged, which means that we just have to
 165	 * flush the log up to the latest LSN that touched the inode. If we have
 166	 * concurrent fsync/fdatasync() calls, we need them to all block on the
 167	 * log force before we clear the ili_fsync_fields field. This ensures
 168	 * that we don't get a racing sync operation that does not wait for the
 169	 * metadata to hit the journal before returning. If we race with
 170	 * clearing the ili_fsync_fields, then all that will happen is the log
 171	 * force will do nothing as the lsn will already be on disk. We can't
 172	 * race with setting ili_fsync_fields because that is done under
 173	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
 174	 * until after the ili_fsync_fields is cleared.
 175	 */
 176	xfs_ilock(ip, XFS_ILOCK_SHARED);
 177	if (xfs_ipincount(ip)) {
 178		if (!datasync ||
 179		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
 180			lsn = ip->i_itemp->ili_last_lsn;
 181	}
 182
 183	if (lsn) {
 184		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
 185		ip->i_itemp->ili_fsync_fields = 0;
 186	}
 187	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 188
 189	/*
 190	 * If we only have a single device, and the log force about was
 191	 * a no-op we might have to flush the data device cache here.
 192	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
 193	 * an already allocated file and thus do not have any metadata to
 194	 * commit.
 195	 */
 196	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
 197	    mp->m_logdev_targp == mp->m_ddev_targp)
 198		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
 
 
 199
 200	return error;
 201}
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203STATIC ssize_t
 204xfs_file_dio_aio_read(
 205	struct kiocb		*iocb,
 206	struct iov_iter		*to)
 207{
 208	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 209	size_t			count = iov_iter_count(to);
 210	ssize_t			ret;
 211
 212	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
 213
 214	if (!count)
 215		return 0; /* skip atime */
 216
 217	file_accessed(iocb->ki_filp);
 218
 219	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 220	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
 
 
 221	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 222
 223	return ret;
 224}
 225
 226static noinline ssize_t
 227xfs_file_dax_read(
 228	struct kiocb		*iocb,
 229	struct iov_iter		*to)
 230{
 231	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
 232	size_t			count = iov_iter_count(to);
 233	ssize_t			ret = 0;
 234
 235	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
 236
 237	if (!count)
 238		return 0; /* skip atime */
 239
 240	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 241	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
 
 
 242	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 243
 244	file_accessed(iocb->ki_filp);
 245	return ret;
 246}
 247
 248STATIC ssize_t
 249xfs_file_buffered_aio_read(
 250	struct kiocb		*iocb,
 251	struct iov_iter		*to)
 252{
 253	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 254	ssize_t			ret;
 255
 256	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 257
 258	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 
 259	ret = generic_file_read_iter(iocb, to);
 260	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 261
 262	return ret;
 263}
 264
 265STATIC ssize_t
 266xfs_file_read_iter(
 267	struct kiocb		*iocb,
 268	struct iov_iter		*to)
 269{
 270	struct inode		*inode = file_inode(iocb->ki_filp);
 271	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
 272	ssize_t			ret = 0;
 273
 274	XFS_STATS_INC(mp, xs_read_calls);
 275
 276	if (XFS_FORCED_SHUTDOWN(mp))
 277		return -EIO;
 278
 279	if (IS_DAX(inode))
 280		ret = xfs_file_dax_read(iocb, to);
 281	else if (iocb->ki_flags & IOCB_DIRECT)
 282		ret = xfs_file_dio_aio_read(iocb, to);
 283	else
 284		ret = xfs_file_buffered_aio_read(iocb, to);
 285
 286	if (ret > 0)
 287		XFS_STATS_ADD(mp, xs_read_bytes, ret);
 288	return ret;
 289}
 290
 291/*
 292 * Zero any on disk space between the current EOF and the new, larger EOF.
 293 *
 294 * This handles the normal case of zeroing the remainder of the last block in
 295 * the file and the unusual case of zeroing blocks out beyond the size of the
 296 * file.  This second case only happens with fixed size extents and when the
 297 * system crashes before the inode size was updated but after blocks were
 298 * allocated.
 299 *
 300 * Expects the iolock to be held exclusive, and will take the ilock internally.
 301 */
 302int					/* error (positive) */
 303xfs_zero_eof(
 304	struct xfs_inode	*ip,
 305	xfs_off_t		offset,		/* starting I/O offset */
 306	xfs_fsize_t		isize,		/* current inode size */
 307	bool			*did_zeroing)
 308{
 309	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 310	ASSERT(offset > isize);
 
 
 311
 312	trace_xfs_zero_eof(ip, isize, offset - isize);
 313	return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
 
 
 
 
 
 
 
 
 
 
 
 314}
 315
 316/*
 317 * Common pre-write limit and setup checks.
 318 *
 319 * Called with the iolocked held either shared and exclusive according to
 320 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 321 * if called for a direct write beyond i_size.
 322 */
 323STATIC ssize_t
 324xfs_file_aio_write_checks(
 325	struct kiocb		*iocb,
 326	struct iov_iter		*from,
 327	int			*iolock)
 328{
 329	struct file		*file = iocb->ki_filp;
 330	struct inode		*inode = file->f_mapping->host;
 331	struct xfs_inode	*ip = XFS_I(inode);
 332	ssize_t			error = 0;
 333	size_t			count = iov_iter_count(from);
 334	bool			drained_dio = false;
 
 335
 336restart:
 337	error = generic_write_checks(iocb, from);
 338	if (error <= 0)
 339		return error;
 340
 341	error = xfs_break_layouts(inode, iolock);
 
 
 
 
 
 
 
 342	if (error)
 343		return error;
 344
 345	/*
 346	 * For changing security info in file_remove_privs() we need i_rwsem
 347	 * exclusively.
 348	 */
 349	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
 350		xfs_iunlock(ip, *iolock);
 351		*iolock = XFS_IOLOCK_EXCL;
 352		xfs_ilock(ip, *iolock);
 
 
 
 
 353		goto restart;
 354	}
 
 355	/*
 356	 * If the offset is beyond the size of the file, we need to zero any
 357	 * blocks that fall between the existing EOF and the start of this
 358	 * write.  If zeroing is needed and we are currently holding the
 359	 * iolock shared, we need to update it to exclusive which implies
 360	 * having to redo all checks before.
 
 
 
 
 
 
 
 
 361	 *
 362	 * We need to serialise against EOF updates that occur in IO
 363	 * completions here. We want to make sure that nobody is changing the
 364	 * size while we do this check until we have placed an IO barrier (i.e.
 365	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
 366	 * The spinlock effectively forms a memory barrier once we have the
 367	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
 368	 * and hence be able to correctly determine if we need to run zeroing.
 369	 */
 
 
 
 370	spin_lock(&ip->i_flags_lock);
 371	if (iocb->ki_pos > i_size_read(inode)) {
 372		bool	zero = false;
 
 
 
 
 373
 374		spin_unlock(&ip->i_flags_lock);
 375		if (!drained_dio) {
 376			if (*iolock == XFS_IOLOCK_SHARED) {
 377				xfs_iunlock(ip, *iolock);
 378				*iolock = XFS_IOLOCK_EXCL;
 379				xfs_ilock(ip, *iolock);
 380				iov_iter_reexpand(from, count);
 381			}
 382			/*
 383			 * We now have an IO submission barrier in place, but
 384			 * AIO can do EOF updates during IO completion and hence
 385			 * we now need to wait for all of them to drain. Non-AIO
 386			 * DIO will have drained before we are given the
 387			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
 388			 * no-op.
 389			 */
 390			inode_dio_wait(inode);
 391			drained_dio = true;
 392			goto restart;
 393		}
 394		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
 
 
 395		if (error)
 396			return error;
 397	} else
 398		spin_unlock(&ip->i_flags_lock);
 399
 400	/*
 401	 * Updating the timestamps will grab the ilock again from
 402	 * xfs_fs_dirty_inode, so we have to call it after dropping the
 403	 * lock above.  Eventually we should look into a way to avoid
 404	 * the pointless lock roundtrip.
 405	 */
 406	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
 407		error = file_update_time(file);
 408		if (error)
 409			return error;
 410	}
 411
 412	/*
 413	 * If we're writing the file then make sure to clear the setuid and
 414	 * setgid bits if the process is not being run by root.  This keeps
 415	 * people from modifying setuid and setgid binaries.
 416	 */
 417	if (!IS_NOSEC(inode))
 418		return file_remove_privs(file);
 419	return 0;
 420}
 421
 422static int
 423xfs_dio_write_end_io(
 424	struct kiocb		*iocb,
 425	ssize_t			size,
 
 426	unsigned		flags)
 427{
 428	struct inode		*inode = file_inode(iocb->ki_filp);
 429	struct xfs_inode	*ip = XFS_I(inode);
 430	loff_t			offset = iocb->ki_pos;
 431	bool			update_size = false;
 432	int			error = 0;
 433
 434	trace_xfs_end_io_direct_write(ip, offset, size);
 435
 436	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 437		return -EIO;
 438
 439	if (size <= 0)
 440		return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441
 442	/*
 443	 * We need to update the in-core inode size here so that we don't end up
 444	 * with the on-disk inode size being outside the in-core inode size. We
 445	 * have no other method of updating EOF for AIO, so always do it here
 446	 * if necessary.
 447	 *
 448	 * We need to lock the test/set EOF update as we can be racing with
 449	 * other IO completions here to update the EOF. Failing to serialise
 450	 * here can result in EOF moving backwards and Bad Things Happen when
 451	 * that occurs.
 
 
 
 
 
 
 
 452	 */
 
 
 
 453	spin_lock(&ip->i_flags_lock);
 454	if (offset + size > i_size_read(inode)) {
 455		i_size_write(inode, offset + size);
 456		update_size = true;
 
 
 
 457	}
 458	spin_unlock(&ip->i_flags_lock);
 459
 460	if (flags & IOMAP_DIO_COW) {
 461		error = xfs_reflink_end_cow(ip, offset, size);
 462		if (error)
 463			return error;
 464	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465
 466	if (flags & IOMAP_DIO_UNWRITTEN)
 467		error = xfs_iomap_write_unwritten(ip, offset, size);
 468	else if (update_size)
 469		error = xfs_setfilesize(ip, offset, size);
 
 
 470
 471	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472}
 473
 474/*
 475 * xfs_file_dio_aio_write - handle direct IO writes
 476 *
 477 * Lock the inode appropriately to prepare for and issue a direct IO write.
 478 * By separating it from the buffered write path we remove all the tricky to
 479 * follow locking changes and looping.
 
 
 
 
 
 480 *
 481 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 482 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 483 * pages are flushed out.
 484 *
 485 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 486 * allowing them to be done in parallel with reads and other direct IO writes.
 487 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 488 * needs to do sub-block zeroing and that requires serialisation against other
 489 * direct IOs to the same block. In this case we need to serialise the
 490 * submission of the unaligned IOs so that we don't get racing block zeroing in
 491 * the dio layer.  To avoid the problem with aio, we also need to wait for
 492 * outstanding IOs to complete so that unwritten extent conversion is completed
 493 * before we try to map the overlapping block. This is currently implemented by
 494 * hitting it with a big hammer (i.e. inode_dio_wait()).
 495 *
 496 * Returns with locks held indicated by @iolock and errors indicated by
 497 * negative return values.
 498 */
 499STATIC ssize_t
 500xfs_file_dio_aio_write(
 
 501	struct kiocb		*iocb,
 502	struct iov_iter		*from)
 503{
 504	struct file		*file = iocb->ki_filp;
 505	struct address_space	*mapping = file->f_mapping;
 506	struct inode		*inode = mapping->host;
 507	struct xfs_inode	*ip = XFS_I(inode);
 508	struct xfs_mount	*mp = ip->i_mount;
 509	ssize_t			ret = 0;
 510	int			unaligned_io = 0;
 511	int			iolock;
 512	size_t			count = iov_iter_count(from);
 513	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 514					mp->m_rtdev_targp : mp->m_ddev_targp;
 515
 516	/* DIO must be aligned to device logical sector size */
 517	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
 518		return -EINVAL;
 519
 520	/*
 521	 * Don't take the exclusive iolock here unless the I/O is unaligned to
 522	 * the file system block size.  We don't need to consider the EOF
 523	 * extension case here because xfs_file_aio_write_checks() will relock
 524	 * the inode as necessary for EOF zeroing cases and fill out the new
 525	 * inode size as appropriate.
 526	 */
 527	if ((iocb->ki_pos & mp->m_blockmask) ||
 528	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
 529		unaligned_io = 1;
 530
 531		/*
 532		 * We can't properly handle unaligned direct I/O to reflink
 533		 * files yet, as we can't unshare a partial block.
 534		 */
 535		if (xfs_is_reflink_inode(ip)) {
 536			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
 537			return -EREMCHG;
 538		}
 539		iolock = XFS_IOLOCK_EXCL;
 540	} else {
 541		iolock = XFS_IOLOCK_SHARED;
 542	}
 543
 544	xfs_ilock(ip, iolock);
 545
 546	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 547	if (ret)
 548		goto out;
 549	count = iov_iter_count(from);
 550
 551	/*
 552	 * If we are doing unaligned IO, wait for all other IO to drain,
 553	 * otherwise demote the lock if we had to take the exclusive lock
 554	 * for other reasons in xfs_file_aio_write_checks.
 555	 */
 556	if (unaligned_io)
 557		inode_dio_wait(inode);
 558	else if (iolock == XFS_IOLOCK_EXCL) {
 559		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 560		iolock = XFS_IOLOCK_SHARED;
 561	}
 562
 563	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 
 
 564
 565	/* If this is a block-aligned directio CoW, remap immediately. */
 566	if (xfs_is_reflink_inode(ip) && !unaligned_io) {
 567		ret = xfs_reflink_allocate_cow_range(ip, iocb->ki_pos, count);
 568		if (ret)
 569			goto out;
 570	}
 
 
 571
 572	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
 573out:
 574	xfs_iunlock(ip, iolock);
 575
 576	/*
 577	 * No fallback to buffered IO on errors for XFS, direct IO will either
 578	 * complete fully or fail.
 
 579	 */
 580	ASSERT(ret < 0 || ret == count);
 
 
 
 
 
 
 
 
 581	return ret;
 582}
 583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584static noinline ssize_t
 585xfs_file_dax_write(
 586	struct kiocb		*iocb,
 587	struct iov_iter		*from)
 588{
 589	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 590	struct xfs_inode	*ip = XFS_I(inode);
 591	int			iolock = XFS_IOLOCK_EXCL;
 592	ssize_t			ret, error = 0;
 593	size_t			count;
 594	loff_t			pos;
 595
 596	xfs_ilock(ip, iolock);
 597	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 
 
 598	if (ret)
 599		goto out;
 600
 601	pos = iocb->ki_pos;
 602	count = iov_iter_count(from);
 603
 604	trace_xfs_file_dax_write(ip, count, pos);
 605	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
 606	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 607		i_size_write(inode, iocb->ki_pos);
 608		error = xfs_setfilesize(ip, pos, ret);
 609	}
 610out:
 611	xfs_iunlock(ip, iolock);
 612	return error ? error : ret;
 
 
 
 
 
 
 
 
 
 
 613}
 614
 615STATIC ssize_t
 616xfs_file_buffered_aio_write(
 617	struct kiocb		*iocb,
 618	struct iov_iter		*from)
 619{
 620	struct file		*file = iocb->ki_filp;
 621	struct address_space	*mapping = file->f_mapping;
 622	struct inode		*inode = mapping->host;
 623	struct xfs_inode	*ip = XFS_I(inode);
 624	ssize_t			ret;
 625	int			enospc = 0;
 626	int			iolock;
 627
 628write_retry:
 629	iolock = XFS_IOLOCK_EXCL;
 630	xfs_ilock(ip, iolock);
 
 
 631
 632	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 633	if (ret)
 634		goto out;
 635
 636	/* We can write back this queue in page reclaim */
 637	current->backing_dev_info = inode_to_bdi(inode);
 638
 639	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 640	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
 641	if (likely(ret >= 0))
 642		iocb->ki_pos += ret;
 643
 644	/*
 645	 * If we hit a space limit, try to free up some lingering preallocated
 646	 * space before returning an error. In the case of ENOSPC, first try to
 647	 * write back all dirty inodes to free up some of the excess reserved
 648	 * metadata space. This reduces the chances that the eofblocks scan
 649	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
 650	 * also behaves as a filter to prevent too many eofblocks scans from
 651	 * running at the same time.
 
 652	 */
 653	if (ret == -EDQUOT && !enospc) {
 654		xfs_iunlock(ip, iolock);
 655		enospc = xfs_inode_free_quota_eofblocks(ip);
 656		if (enospc)
 657			goto write_retry;
 658		enospc = xfs_inode_free_quota_cowblocks(ip);
 659		if (enospc)
 660			goto write_retry;
 661		iolock = 0;
 662	} else if (ret == -ENOSPC && !enospc) {
 663		struct xfs_eofblocks eofb = {0};
 664
 665		enospc = 1;
 666		xfs_flush_inodes(ip->i_mount);
 667
 668		xfs_iunlock(ip, iolock);
 669		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 670		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 671		goto write_retry;
 672	}
 673
 674	current->backing_dev_info = NULL;
 675out:
 676	if (iolock)
 677		xfs_iunlock(ip, iolock);
 
 
 
 
 
 
 678	return ret;
 679}
 680
 681STATIC ssize_t
 682xfs_file_write_iter(
 683	struct kiocb		*iocb,
 684	struct iov_iter		*from)
 685{
 686	struct file		*file = iocb->ki_filp;
 687	struct address_space	*mapping = file->f_mapping;
 688	struct inode		*inode = mapping->host;
 689	struct xfs_inode	*ip = XFS_I(inode);
 690	ssize_t			ret;
 691	size_t			ocount = iov_iter_count(from);
 692
 693	XFS_STATS_INC(ip->i_mount, xs_write_calls);
 694
 695	if (ocount == 0)
 696		return 0;
 697
 698	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 699		return -EIO;
 700
 701	if (IS_DAX(inode))
 702		ret = xfs_file_dax_write(iocb, from);
 703	else if (iocb->ki_flags & IOCB_DIRECT) {
 
 704		/*
 705		 * Allow a directio write to fall back to a buffered
 706		 * write *only* in the case that we're doing a reflink
 707		 * CoW.  In all other directio scenarios we do not
 708		 * allow an operation to fall back to buffered mode.
 709		 */
 710		ret = xfs_file_dio_aio_write(iocb, from);
 711		if (ret == -EREMCHG)
 712			goto buffered;
 713	} else {
 714buffered:
 715		ret = xfs_file_buffered_aio_write(iocb, from);
 716	}
 717
 718	if (ret > 0) {
 719		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720
 721		/* Handle various SYNC-type writes */
 722		ret = generic_write_sync(iocb, ret);
 723	}
 724	return ret;
 725}
 726
 727#define	XFS_FALLOC_FL_SUPPORTED						\
 728		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 729		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
 730		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 731
 732STATIC long
 733xfs_file_fallocate(
 734	struct file		*file,
 735	int			mode,
 736	loff_t			offset,
 737	loff_t			len)
 738{
 739	struct inode		*inode = file_inode(file);
 740	struct xfs_inode	*ip = XFS_I(inode);
 741	long			error;
 742	enum xfs_prealloc_flags	flags = 0;
 743	uint			iolock = XFS_IOLOCK_EXCL;
 744	loff_t			new_size = 0;
 745	bool			do_file_insert = 0;
 746
 747	if (!S_ISREG(inode->i_mode))
 748		return -EINVAL;
 749	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
 750		return -EOPNOTSUPP;
 751
 752	xfs_ilock(ip, iolock);
 753	error = xfs_break_layouts(inode, &iolock);
 754	if (error)
 755		goto out_unlock;
 756
 757	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 758	iolock |= XFS_MMAPLOCK_EXCL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 759
 760	if (mode & FALLOC_FL_PUNCH_HOLE) {
 761		error = xfs_free_file_space(ip, offset, len);
 762		if (error)
 763			goto out_unlock;
 764	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
 765		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 766
 767		if (offset & blksize_mask || len & blksize_mask) {
 768			error = -EINVAL;
 769			goto out_unlock;
 770		}
 771
 772		/*
 773		 * There is no need to overlap collapse range with EOF,
 774		 * in which case it is effectively a truncate operation
 775		 */
 776		if (offset + len >= i_size_read(inode)) {
 777			error = -EINVAL;
 778			goto out_unlock;
 779		}
 780
 781		new_size = i_size_read(inode) - len;
 782
 783		error = xfs_collapse_file_space(ip, offset, len);
 784		if (error)
 785			goto out_unlock;
 786	} else if (mode & FALLOC_FL_INSERT_RANGE) {
 787		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 788
 789		new_size = i_size_read(inode) + len;
 790		if (offset & blksize_mask || len & blksize_mask) {
 791			error = -EINVAL;
 792			goto out_unlock;
 793		}
 794
 795		/* check the new inode size does not wrap through zero */
 796		if (new_size > inode->i_sb->s_maxbytes) {
 
 
 
 797			error = -EFBIG;
 798			goto out_unlock;
 799		}
 
 800
 801		/* Offset should be less than i_size */
 802		if (offset >= i_size_read(inode)) {
 803			error = -EINVAL;
 804			goto out_unlock;
 805		}
 806		do_file_insert = 1;
 807	} else {
 808		flags |= XFS_PREALLOC_SET;
 809
 810		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 811		    offset + len > i_size_read(inode)) {
 812			new_size = offset + len;
 813			error = inode_newsize_ok(inode, new_size);
 814			if (error)
 815				goto out_unlock;
 816		}
 817
 818		if (mode & FALLOC_FL_ZERO_RANGE)
 819			error = xfs_zero_file_space(ip, offset, len);
 820		else {
 821			if (mode & FALLOC_FL_UNSHARE_RANGE) {
 822				error = xfs_reflink_unshare(ip, offset, len);
 823				if (error)
 824					goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825			}
 826			error = xfs_alloc_file_space(ip, offset, len,
 827						     XFS_BMAPI_PREALLOC);
 828		}
 829		if (error)
 830			goto out_unlock;
 
 
 
 
 831	}
 832
 833	if (file->f_flags & O_DSYNC)
 834		flags |= XFS_PREALLOC_SYNC;
 835
 836	error = xfs_update_prealloc_flags(ip, flags);
 837	if (error)
 838		goto out_unlock;
 839
 840	/* Change file size if needed */
 841	if (new_size) {
 842		struct iattr iattr;
 843
 844		iattr.ia_valid = ATTR_SIZE;
 845		iattr.ia_size = new_size;
 846		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
 
 847		if (error)
 848			goto out_unlock;
 849	}
 850
 851	/*
 852	 * Perform hole insertion now that the file size has been
 853	 * updated so that if we crash during the operation we don't
 854	 * leave shifted extents past EOF and hence losing access to
 855	 * the data that is contained within them.
 856	 */
 857	if (do_file_insert)
 858		error = xfs_insert_file_space(ip, offset, len);
 
 
 
 
 
 
 859
 860out_unlock:
 861	xfs_iunlock(ip, iolock);
 862	return error;
 863}
 864
 865STATIC int
 866xfs_file_clone_range(
 867	struct file	*file_in,
 868	loff_t		pos_in,
 869	struct file	*file_out,
 870	loff_t		pos_out,
 871	u64		len)
 872{
 873	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
 874				     len, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 875}
 876
 877STATIC ssize_t
 878xfs_file_dedupe_range(
 879	struct file	*src_file,
 880	u64		loff,
 881	u64		len,
 882	struct file	*dst_file,
 883	u64		dst_loff)
 884{
 885	int		error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886
 887	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
 888				     len, true);
 889	if (error)
 890		return error;
 891	return len;
 
 
 892}
 893
 894STATIC int
 895xfs_file_open(
 896	struct inode	*inode,
 897	struct file	*file)
 898{
 899	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
 900		return -EFBIG;
 901	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 902		return -EIO;
 903	return 0;
 
 
 904}
 905
 906STATIC int
 907xfs_dir_open(
 908	struct inode	*inode,
 909	struct file	*file)
 910{
 911	struct xfs_inode *ip = XFS_I(inode);
 912	int		mode;
 913	int		error;
 914
 915	error = xfs_file_open(inode, file);
 916	if (error)
 917		return error;
 918
 919	/*
 920	 * If there are any blocks, read-ahead block 0 as we're almost
 921	 * certain to have the next operation be a read there.
 922	 */
 923	mode = xfs_ilock_data_map_shared(ip);
 924	if (ip->i_d.di_nextents > 0)
 925		error = xfs_dir3_data_readahead(ip, 0, -1);
 926	xfs_iunlock(ip, mode);
 927	return error;
 928}
 929
 930STATIC int
 931xfs_file_release(
 932	struct inode	*inode,
 933	struct file	*filp)
 934{
 935	return xfs_release(XFS_I(inode));
 936}
 937
 938STATIC int
 939xfs_file_readdir(
 940	struct file	*file,
 941	struct dir_context *ctx)
 942{
 943	struct inode	*inode = file_inode(file);
 944	xfs_inode_t	*ip = XFS_I(inode);
 945	size_t		bufsize;
 946
 947	/*
 948	 * The Linux API doesn't pass down the total size of the buffer
 949	 * we read into down to the filesystem.  With the filldir concept
 950	 * it's not needed for correct information, but the XFS dir2 leaf
 951	 * code wants an estimate of the buffer size to calculate it's
 952	 * readahead window and size the buffers used for mapping to
 953	 * physical blocks.
 954	 *
 955	 * Try to give it an estimate that's good enough, maybe at some
 956	 * point we can change the ->readdir prototype to include the
 957	 * buffer size.  For now we use the current glibc buffer size.
 958	 */
 959	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
 960
 961	return xfs_readdir(ip, ctx, bufsize);
 962}
 963
 964/*
 965 * This type is designed to indicate the type of offset we would like
 966 * to search from page cache for xfs_seek_hole_data().
 967 */
 968enum {
 969	HOLE_OFF = 0,
 970	DATA_OFF,
 971};
 972
 973/*
 974 * Lookup the desired type of offset from the given page.
 975 *
 976 * On success, return true and the offset argument will point to the
 977 * start of the region that was found.  Otherwise this function will
 978 * return false and keep the offset argument unchanged.
 979 */
 980STATIC bool
 981xfs_lookup_buffer_offset(
 982	struct page		*page,
 983	loff_t			*offset,
 984	unsigned int		type)
 985{
 986	loff_t			lastoff = page_offset(page);
 987	bool			found = false;
 988	struct buffer_head	*bh, *head;
 989
 990	bh = head = page_buffers(page);
 991	do {
 992		/*
 993		 * Unwritten extents that have data in the page
 994		 * cache covering them can be identified by the
 995		 * BH_Unwritten state flag.  Pages with multiple
 996		 * buffers might have a mix of holes, data and
 997		 * unwritten extents - any buffer with valid
 998		 * data in it should have BH_Uptodate flag set
 999		 * on it.
1000		 */
1001		if (buffer_unwritten(bh) ||
1002		    buffer_uptodate(bh)) {
1003			if (type == DATA_OFF)
1004				found = true;
1005		} else {
1006			if (type == HOLE_OFF)
1007				found = true;
1008		}
1009
1010		if (found) {
1011			*offset = lastoff;
1012			break;
1013		}
1014		lastoff += bh->b_size;
1015	} while ((bh = bh->b_this_page) != head);
1016
1017	return found;
1018}
1019
1020/*
1021 * This routine is called to find out and return a data or hole offset
1022 * from the page cache for unwritten extents according to the desired
1023 * type for xfs_seek_hole_data().
1024 *
1025 * The argument offset is used to tell where we start to search from the
1026 * page cache.  Map is used to figure out the end points of the range to
1027 * lookup pages.
1028 *
1029 * Return true if the desired type of offset was found, and the argument
1030 * offset is filled with that address.  Otherwise, return false and keep
1031 * offset unchanged.
1032 */
1033STATIC bool
1034xfs_find_get_desired_pgoff(
1035	struct inode		*inode,
1036	struct xfs_bmbt_irec	*map,
1037	unsigned int		type,
1038	loff_t			*offset)
1039{
1040	struct xfs_inode	*ip = XFS_I(inode);
1041	struct xfs_mount	*mp = ip->i_mount;
1042	struct pagevec		pvec;
1043	pgoff_t			index;
1044	pgoff_t			end;
1045	loff_t			endoff;
1046	loff_t			startoff = *offset;
1047	loff_t			lastoff = startoff;
1048	bool			found = false;
1049
1050	pagevec_init(&pvec, 0);
1051
1052	index = startoff >> PAGE_SHIFT;
1053	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1054	end = endoff >> PAGE_SHIFT;
1055	do {
1056		int		want;
1057		unsigned	nr_pages;
1058		unsigned int	i;
1059
1060		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1061		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1062					  want);
1063		/*
1064		 * No page mapped into given range.  If we are searching holes
1065		 * and if this is the first time we got into the loop, it means
1066		 * that the given offset is landed in a hole, return it.
1067		 *
1068		 * If we have already stepped through some block buffers to find
1069		 * holes but they all contains data.  In this case, the last
1070		 * offset is already updated and pointed to the end of the last
1071		 * mapped page, if it does not reach the endpoint to search,
1072		 * that means there should be a hole between them.
1073		 */
1074		if (nr_pages == 0) {
1075			/* Data search found nothing */
1076			if (type == DATA_OFF)
1077				break;
1078
1079			ASSERT(type == HOLE_OFF);
1080			if (lastoff == startoff || lastoff < endoff) {
1081				found = true;
1082				*offset = lastoff;
1083			}
1084			break;
1085		}
1086
1087		/*
1088		 * At lease we found one page.  If this is the first time we
1089		 * step into the loop, and if the first page index offset is
1090		 * greater than the given search offset, a hole was found.
1091		 */
1092		if (type == HOLE_OFF && lastoff == startoff &&
1093		    lastoff < page_offset(pvec.pages[0])) {
1094			found = true;
1095			break;
1096		}
1097
1098		for (i = 0; i < nr_pages; i++) {
1099			struct page	*page = pvec.pages[i];
1100			loff_t		b_offset;
1101
1102			/*
1103			 * At this point, the page may be truncated or
1104			 * invalidated (changing page->mapping to NULL),
1105			 * or even swizzled back from swapper_space to tmpfs
1106			 * file mapping. However, page->index will not change
1107			 * because we have a reference on the page.
1108			 *
1109			 * Searching done if the page index is out of range.
1110			 * If the current offset is not reaches the end of
1111			 * the specified search range, there should be a hole
1112			 * between them.
1113			 */
1114			if (page->index > end) {
1115				if (type == HOLE_OFF && lastoff < endoff) {
1116					*offset = lastoff;
1117					found = true;
1118				}
1119				goto out;
1120			}
1121
1122			lock_page(page);
1123			/*
1124			 * Page truncated or invalidated(page->mapping == NULL).
1125			 * We can freely skip it and proceed to check the next
1126			 * page.
1127			 */
1128			if (unlikely(page->mapping != inode->i_mapping)) {
1129				unlock_page(page);
1130				continue;
1131			}
1132
1133			if (!page_has_buffers(page)) {
1134				unlock_page(page);
1135				continue;
1136			}
1137
1138			found = xfs_lookup_buffer_offset(page, &b_offset, type);
1139			if (found) {
1140				/*
1141				 * The found offset may be less than the start
1142				 * point to search if this is the first time to
1143				 * come here.
1144				 */
1145				*offset = max_t(loff_t, startoff, b_offset);
1146				unlock_page(page);
1147				goto out;
1148			}
1149
1150			/*
1151			 * We either searching data but nothing was found, or
1152			 * searching hole but found a data buffer.  In either
1153			 * case, probably the next page contains the desired
1154			 * things, update the last offset to it so.
1155			 */
1156			lastoff = page_offset(page) + PAGE_SIZE;
1157			unlock_page(page);
1158		}
1159
1160		/*
1161		 * The number of returned pages less than our desired, search
1162		 * done.  In this case, nothing was found for searching data,
1163		 * but we found a hole behind the last offset.
1164		 */
1165		if (nr_pages < want) {
1166			if (type == HOLE_OFF) {
1167				*offset = lastoff;
1168				found = true;
1169			}
1170			break;
1171		}
1172
1173		index = pvec.pages[i - 1]->index + 1;
1174		pagevec_release(&pvec);
1175	} while (index <= end);
1176
1177out:
1178	pagevec_release(&pvec);
1179	return found;
1180}
1181
1182/*
1183 * caller must lock inode with xfs_ilock_data_map_shared,
1184 * can we craft an appropriate ASSERT?
1185 *
1186 * end is because the VFS-level lseek interface is defined such that any
1187 * offset past i_size shall return -ENXIO, but we use this for quota code
1188 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1189 */
1190loff_t
1191__xfs_seek_hole_data(
1192	struct inode		*inode,
1193	loff_t			start,
1194	loff_t			end,
1195	int			whence)
1196{
1197	struct xfs_inode	*ip = XFS_I(inode);
1198	struct xfs_mount	*mp = ip->i_mount;
1199	loff_t			uninitialized_var(offset);
1200	xfs_fileoff_t		fsbno;
1201	xfs_filblks_t		lastbno;
1202	int			error;
1203
1204	if (start >= end) {
1205		error = -ENXIO;
1206		goto out_error;
1207	}
1208
1209	/*
1210	 * Try to read extents from the first block indicated
1211	 * by fsbno to the end block of the file.
1212	 */
1213	fsbno = XFS_B_TO_FSBT(mp, start);
1214	lastbno = XFS_B_TO_FSB(mp, end);
1215
1216	for (;;) {
1217		struct xfs_bmbt_irec	map[2];
1218		int			nmap = 2;
1219		unsigned int		i;
1220
1221		error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
1222				       XFS_BMAPI_ENTIRE);
1223		if (error)
1224			goto out_error;
1225
1226		/* No extents at given offset, must be beyond EOF */
1227		if (nmap == 0) {
1228			error = -ENXIO;
1229			goto out_error;
1230		}
1231
1232		for (i = 0; i < nmap; i++) {
1233			offset = max_t(loff_t, start,
1234				       XFS_FSB_TO_B(mp, map[i].br_startoff));
1235
1236			/* Landed in the hole we wanted? */
1237			if (whence == SEEK_HOLE &&
1238			    map[i].br_startblock == HOLESTARTBLOCK)
1239				goto out;
1240
1241			/* Landed in the data extent we wanted? */
1242			if (whence == SEEK_DATA &&
1243			    (map[i].br_startblock == DELAYSTARTBLOCK ||
1244			     (map[i].br_state == XFS_EXT_NORM &&
1245			      !isnullstartblock(map[i].br_startblock))))
1246				goto out;
1247
1248			/*
1249			 * Landed in an unwritten extent, try to search
1250			 * for hole or data from page cache.
1251			 */
1252			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1253				if (xfs_find_get_desired_pgoff(inode, &map[i],
1254				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1255							&offset))
1256					goto out;
1257			}
1258		}
1259
1260		/*
1261		 * We only received one extent out of the two requested. This
1262		 * means we've hit EOF and didn't find what we are looking for.
1263		 */
1264		if (nmap == 1) {
1265			/*
1266			 * If we were looking for a hole, set offset to
1267			 * the end of the file (i.e., there is an implicit
1268			 * hole at the end of any file).
1269		 	 */
1270			if (whence == SEEK_HOLE) {
1271				offset = end;
1272				break;
1273			}
1274			/*
1275			 * If we were looking for data, it's nowhere to be found
1276			 */
1277			ASSERT(whence == SEEK_DATA);
1278			error = -ENXIO;
1279			goto out_error;
1280		}
1281
1282		ASSERT(i > 1);
1283
1284		/*
1285		 * Nothing was found, proceed to the next round of search
1286		 * if the next reading offset is not at or beyond EOF.
1287		 */
1288		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1289		start = XFS_FSB_TO_B(mp, fsbno);
1290		if (start >= end) {
1291			if (whence == SEEK_HOLE) {
1292				offset = end;
1293				break;
1294			}
1295			ASSERT(whence == SEEK_DATA);
1296			error = -ENXIO;
1297			goto out_error;
1298		}
1299	}
1300
1301out:
1302	/*
1303	 * If at this point we have found the hole we wanted, the returned
1304	 * offset may be bigger than the file size as it may be aligned to
1305	 * page boundary for unwritten extents.  We need to deal with this
1306	 * situation in particular.
1307	 */
1308	if (whence == SEEK_HOLE)
1309		offset = min_t(loff_t, offset, end);
1310
1311	return offset;
1312
1313out_error:
1314	return error;
1315}
1316
1317STATIC loff_t
1318xfs_seek_hole_data(
1319	struct file		*file,
1320	loff_t			start,
1321	int			whence)
1322{
1323	struct inode		*inode = file->f_mapping->host;
1324	struct xfs_inode	*ip = XFS_I(inode);
1325	struct xfs_mount	*mp = ip->i_mount;
1326	uint			lock;
1327	loff_t			offset, end;
1328	int			error = 0;
1329
1330	if (XFS_FORCED_SHUTDOWN(mp))
1331		return -EIO;
1332
1333	lock = xfs_ilock_data_map_shared(ip);
1334
1335	end = i_size_read(inode);
1336	offset = __xfs_seek_hole_data(inode, start, end, whence);
1337	if (offset < 0) {
1338		error = offset;
1339		goto out_unlock;
1340	}
1341
1342	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1343
1344out_unlock:
1345	xfs_iunlock(ip, lock);
1346
1347	if (error)
1348		return error;
1349	return offset;
1350}
1351
1352STATIC loff_t
1353xfs_file_llseek(
1354	struct file	*file,
1355	loff_t		offset,
1356	int		whence)
1357{
1358	switch (whence) {
1359	case SEEK_END:
1360	case SEEK_CUR:
1361	case SEEK_SET:
1362		return generic_file_llseek(file, offset, whence);
1363	case SEEK_HOLE:
 
 
1364	case SEEK_DATA:
1365		return xfs_seek_hole_data(file, offset, whence);
1366	default:
1367		return -EINVAL;
1368	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1369}
 
1370
1371/*
1372 * Locking for serialisation of IO during page faults. This results in a lock
1373 * ordering of:
1374 *
1375 * mmap_sem (MM)
1376 *   sb_start_pagefault(vfs, freeze)
1377 *     i_mmaplock (XFS - truncate serialisation)
1378 *       page_lock (MM)
1379 *         i_lock (XFS - extent map serialisation)
1380 */
 
 
 
 
 
 
 
 
 
 
1381
1382/*
1383 * mmap()d file has taken write protection fault and is being made writable. We
1384 * can set the page state up correctly for a writable page, which means we can
1385 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1386 * mapping.
1387 */
1388STATIC int
1389xfs_filemap_page_mkwrite(
1390	struct vm_area_struct	*vma,
1391	struct vm_fault		*vmf)
1392{
1393	struct inode		*inode = file_inode(vma->vm_file);
1394	int			ret;
1395
1396	trace_xfs_filemap_page_mkwrite(XFS_I(inode));
 
 
 
1397
1398	sb_start_pagefault(inode->i_sb);
1399	file_update_time(vma->vm_file);
1400	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1401
1402	if (IS_DAX(inode)) {
1403		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
 
 
 
 
 
 
1404	} else {
1405		ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
1406		ret = block_page_mkwrite_return(ret);
1407	}
1408
1409	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1410	sb_end_pagefault(inode->i_sb);
1411
 
 
1412	return ret;
1413}
1414
1415STATIC int
 
 
 
 
 
 
 
 
1416xfs_filemap_fault(
1417	struct vm_area_struct	*vma,
1418	struct vm_fault		*vmf)
1419{
1420	struct inode		*inode = file_inode(vma->vm_file);
1421	int			ret;
1422
1423	trace_xfs_filemap_fault(XFS_I(inode));
1424
1425	/* DAX can shortcut the normal fault path on write faults! */
1426	if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1427		return xfs_filemap_page_mkwrite(vma, vmf);
1428
1429	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1430	if (IS_DAX(inode))
1431		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
1432	else
1433		ret = filemap_fault(vma, vmf);
1434	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1435
1436	return ret;
1437}
1438
1439/*
1440 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1441 * both read and write faults. Hence we need to handle both cases. There is no
1442 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1443 * handle both cases here. @flags carries the information on the type of fault
1444 * occuring.
1445 */
1446STATIC int
1447xfs_filemap_pmd_fault(
1448	struct vm_area_struct	*vma,
1449	unsigned long		addr,
1450	pmd_t			*pmd,
1451	unsigned int		flags)
1452{
1453	struct inode		*inode = file_inode(vma->vm_file);
1454	struct xfs_inode	*ip = XFS_I(inode);
1455	int			ret;
1456
1457	if (!IS_DAX(inode))
1458		return VM_FAULT_FALLBACK;
1459
1460	trace_xfs_filemap_pmd_fault(ip);
 
 
 
1461
1462	if (flags & FAULT_FLAG_WRITE) {
1463		sb_start_pagefault(inode->i_sb);
1464		file_update_time(vma->vm_file);
1465	}
1466
1467	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1468	ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
1469	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1470
1471	if (flags & FAULT_FLAG_WRITE)
1472		sb_end_pagefault(inode->i_sb);
1473
1474	return ret;
1475}
1476
1477/*
1478 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1479 * updates on write faults. In reality, it's need to serialise against
1480 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1481 * to ensure we serialise the fault barrier in place.
1482 */
1483static int
1484xfs_filemap_pfn_mkwrite(
1485	struct vm_area_struct	*vma,
1486	struct vm_fault		*vmf)
1487{
1488
1489	struct inode		*inode = file_inode(vma->vm_file);
1490	struct xfs_inode	*ip = XFS_I(inode);
1491	int			ret = VM_FAULT_NOPAGE;
1492	loff_t			size;
1493
1494	trace_xfs_filemap_pfn_mkwrite(ip);
1495
1496	sb_start_pagefault(inode->i_sb);
1497	file_update_time(vma->vm_file);
1498
1499	/* check if the faulting page hasn't raced with truncate */
1500	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1501	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502	if (vmf->pgoff >= size)
1503		ret = VM_FAULT_SIGBUS;
1504	else if (IS_DAX(inode))
1505		ret = dax_pfn_mkwrite(vma, vmf);
1506	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1507	sb_end_pagefault(inode->i_sb);
1508	return ret;
1509
1510}
1511
1512static const struct vm_operations_struct xfs_file_vm_ops = {
1513	.fault		= xfs_filemap_fault,
1514	.pmd_fault	= xfs_filemap_pmd_fault,
1515	.map_pages	= filemap_map_pages,
1516	.page_mkwrite	= xfs_filemap_page_mkwrite,
1517	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1518};
1519
1520STATIC int
1521xfs_file_mmap(
1522	struct file	*filp,
1523	struct vm_area_struct *vma)
1524{
1525	file_accessed(filp);
 
 
 
 
 
 
 
 
 
 
1526	vma->vm_ops = &xfs_file_vm_ops;
1527	if (IS_DAX(file_inode(filp)))
1528		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1529	return 0;
1530}
1531
1532const struct file_operations xfs_file_operations = {
1533	.llseek		= xfs_file_llseek,
1534	.read_iter	= xfs_file_read_iter,
1535	.write_iter	= xfs_file_write_iter,
1536	.splice_read	= generic_file_splice_read,
1537	.splice_write	= iter_file_splice_write,
 
1538	.unlocked_ioctl	= xfs_file_ioctl,
1539#ifdef CONFIG_COMPAT
1540	.compat_ioctl	= xfs_file_compat_ioctl,
1541#endif
1542	.mmap		= xfs_file_mmap,
 
1543	.open		= xfs_file_open,
1544	.release	= xfs_file_release,
1545	.fsync		= xfs_file_fsync,
1546	.get_unmapped_area = thp_get_unmapped_area,
1547	.fallocate	= xfs_file_fallocate,
1548	.clone_file_range = xfs_file_clone_range,
1549	.dedupe_file_range = xfs_file_dedupe_range,
1550};
1551
1552const struct file_operations xfs_dir_file_operations = {
1553	.open		= xfs_dir_open,
1554	.read		= generic_read_dir,
1555	.iterate_shared	= xfs_file_readdir,
1556	.llseek		= generic_file_llseek,
1557	.unlocked_ioctl	= xfs_file_ioctl,
1558#ifdef CONFIG_COMPAT
1559	.compat_ioctl	= xfs_file_compat_ioctl,
1560#endif
1561	.fsync		= xfs_dir_fsync,
1562};