Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
27#include "xfs_inode.h"
28#include "xfs_trans.h"
29#include "xfs_inode_item.h"
30#include "xfs_bmap.h"
31#include "xfs_bmap_util.h"
32#include "xfs_error.h"
33#include "xfs_dir2.h"
34#include "xfs_dir2_priv.h"
35#include "xfs_ioctl.h"
36#include "xfs_trace.h"
37#include "xfs_log.h"
38#include "xfs_icache.h"
39#include "xfs_pnfs.h"
40#include "xfs_iomap.h"
41#include "xfs_reflink.h"
42
43#include <linux/dcache.h>
44#include <linux/falloc.h>
45#include <linux/pagevec.h>
46#include <linux/backing-dev.h>
47#include <linux/mman.h>
48
49static const struct vm_operations_struct xfs_file_vm_ops;
50
51int
52xfs_update_prealloc_flags(
53 struct xfs_inode *ip,
54 enum xfs_prealloc_flags flags)
55{
56 struct xfs_trans *tp;
57 int error;
58
59 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
60 0, 0, 0, &tp);
61 if (error)
62 return error;
63
64 xfs_ilock(ip, XFS_ILOCK_EXCL);
65 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
66
67 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
68 VFS_I(ip)->i_mode &= ~S_ISUID;
69 if (VFS_I(ip)->i_mode & S_IXGRP)
70 VFS_I(ip)->i_mode &= ~S_ISGID;
71 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
72 }
73
74 if (flags & XFS_PREALLOC_SET)
75 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
76 if (flags & XFS_PREALLOC_CLEAR)
77 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
78
79 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
80 if (flags & XFS_PREALLOC_SYNC)
81 xfs_trans_set_sync(tp);
82 return xfs_trans_commit(tp);
83}
84
85/*
86 * Fsync operations on directories are much simpler than on regular files,
87 * as there is no file data to flush, and thus also no need for explicit
88 * cache flush operations, and there are no non-transaction metadata updates
89 * on directories either.
90 */
91STATIC int
92xfs_dir_fsync(
93 struct file *file,
94 loff_t start,
95 loff_t end,
96 int datasync)
97{
98 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
99 struct xfs_mount *mp = ip->i_mount;
100 xfs_lsn_t lsn = 0;
101
102 trace_xfs_dir_fsync(ip);
103
104 xfs_ilock(ip, XFS_ILOCK_SHARED);
105 if (xfs_ipincount(ip))
106 lsn = ip->i_itemp->ili_last_lsn;
107 xfs_iunlock(ip, XFS_ILOCK_SHARED);
108
109 if (!lsn)
110 return 0;
111 return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
112}
113
114STATIC int
115xfs_file_fsync(
116 struct file *file,
117 loff_t start,
118 loff_t end,
119 int datasync)
120{
121 struct inode *inode = file->f_mapping->host;
122 struct xfs_inode *ip = XFS_I(inode);
123 struct xfs_mount *mp = ip->i_mount;
124 int error = 0;
125 int log_flushed = 0;
126 xfs_lsn_t lsn = 0;
127
128 trace_xfs_file_fsync(ip);
129
130 error = file_write_and_wait_range(file, start, end);
131 if (error)
132 return error;
133
134 if (XFS_FORCED_SHUTDOWN(mp))
135 return -EIO;
136
137 xfs_iflags_clear(ip, XFS_ITRUNCATED);
138
139 /*
140 * If we have an RT and/or log subvolume we need to make sure to flush
141 * the write cache the device used for file data first. This is to
142 * ensure newly written file data make it to disk before logging the new
143 * inode size in case of an extending write.
144 */
145 if (XFS_IS_REALTIME_INODE(ip))
146 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
147 else if (mp->m_logdev_targp != mp->m_ddev_targp)
148 xfs_blkdev_issue_flush(mp->m_ddev_targp);
149
150 /*
151 * All metadata updates are logged, which means that we just have to
152 * flush the log up to the latest LSN that touched the inode. If we have
153 * concurrent fsync/fdatasync() calls, we need them to all block on the
154 * log force before we clear the ili_fsync_fields field. This ensures
155 * that we don't get a racing sync operation that does not wait for the
156 * metadata to hit the journal before returning. If we race with
157 * clearing the ili_fsync_fields, then all that will happen is the log
158 * force will do nothing as the lsn will already be on disk. We can't
159 * race with setting ili_fsync_fields because that is done under
160 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
161 * until after the ili_fsync_fields is cleared.
162 */
163 xfs_ilock(ip, XFS_ILOCK_SHARED);
164 if (xfs_ipincount(ip)) {
165 if (!datasync ||
166 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
167 lsn = ip->i_itemp->ili_last_lsn;
168 }
169
170 if (lsn) {
171 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
172 ip->i_itemp->ili_fsync_fields = 0;
173 }
174 xfs_iunlock(ip, XFS_ILOCK_SHARED);
175
176 /*
177 * If we only have a single device, and the log force about was
178 * a no-op we might have to flush the data device cache here.
179 * This can only happen for fdatasync/O_DSYNC if we were overwriting
180 * an already allocated file and thus do not have any metadata to
181 * commit.
182 */
183 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
184 mp->m_logdev_targp == mp->m_ddev_targp)
185 xfs_blkdev_issue_flush(mp->m_ddev_targp);
186
187 return error;
188}
189
190STATIC ssize_t
191xfs_file_dio_aio_read(
192 struct kiocb *iocb,
193 struct iov_iter *to)
194{
195 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
196 size_t count = iov_iter_count(to);
197 ssize_t ret;
198
199 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
200
201 if (!count)
202 return 0; /* skip atime */
203
204 file_accessed(iocb->ki_filp);
205
206 xfs_ilock(ip, XFS_IOLOCK_SHARED);
207 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
208 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
209
210 return ret;
211}
212
213static noinline ssize_t
214xfs_file_dax_read(
215 struct kiocb *iocb,
216 struct iov_iter *to)
217{
218 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
219 size_t count = iov_iter_count(to);
220 ssize_t ret = 0;
221
222 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
223
224 if (!count)
225 return 0; /* skip atime */
226
227 if (iocb->ki_flags & IOCB_NOWAIT) {
228 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
229 return -EAGAIN;
230 } else {
231 xfs_ilock(ip, XFS_IOLOCK_SHARED);
232 }
233
234 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
235 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
236
237 file_accessed(iocb->ki_filp);
238 return ret;
239}
240
241STATIC ssize_t
242xfs_file_buffered_aio_read(
243 struct kiocb *iocb,
244 struct iov_iter *to)
245{
246 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
247 ssize_t ret;
248
249 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
250
251 if (iocb->ki_flags & IOCB_NOWAIT) {
252 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
253 return -EAGAIN;
254 } else {
255 xfs_ilock(ip, XFS_IOLOCK_SHARED);
256 }
257 ret = generic_file_read_iter(iocb, to);
258 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
259
260 return ret;
261}
262
263STATIC ssize_t
264xfs_file_read_iter(
265 struct kiocb *iocb,
266 struct iov_iter *to)
267{
268 struct inode *inode = file_inode(iocb->ki_filp);
269 struct xfs_mount *mp = XFS_I(inode)->i_mount;
270 ssize_t ret = 0;
271
272 XFS_STATS_INC(mp, xs_read_calls);
273
274 if (XFS_FORCED_SHUTDOWN(mp))
275 return -EIO;
276
277 if (IS_DAX(inode))
278 ret = xfs_file_dax_read(iocb, to);
279 else if (iocb->ki_flags & IOCB_DIRECT)
280 ret = xfs_file_dio_aio_read(iocb, to);
281 else
282 ret = xfs_file_buffered_aio_read(iocb, to);
283
284 if (ret > 0)
285 XFS_STATS_ADD(mp, xs_read_bytes, ret);
286 return ret;
287}
288
289/*
290 * Common pre-write limit and setup checks.
291 *
292 * Called with the iolocked held either shared and exclusive according to
293 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
294 * if called for a direct write beyond i_size.
295 */
296STATIC ssize_t
297xfs_file_aio_write_checks(
298 struct kiocb *iocb,
299 struct iov_iter *from,
300 int *iolock)
301{
302 struct file *file = iocb->ki_filp;
303 struct inode *inode = file->f_mapping->host;
304 struct xfs_inode *ip = XFS_I(inode);
305 ssize_t error = 0;
306 size_t count = iov_iter_count(from);
307 bool drained_dio = false;
308 loff_t isize;
309
310restart:
311 error = generic_write_checks(iocb, from);
312 if (error <= 0)
313 return error;
314
315 error = xfs_break_layouts(inode, iolock);
316 if (error)
317 return error;
318
319 /*
320 * For changing security info in file_remove_privs() we need i_rwsem
321 * exclusively.
322 */
323 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
324 xfs_iunlock(ip, *iolock);
325 *iolock = XFS_IOLOCK_EXCL;
326 xfs_ilock(ip, *iolock);
327 goto restart;
328 }
329 /*
330 * If the offset is beyond the size of the file, we need to zero any
331 * blocks that fall between the existing EOF and the start of this
332 * write. If zeroing is needed and we are currently holding the
333 * iolock shared, we need to update it to exclusive which implies
334 * having to redo all checks before.
335 *
336 * We need to serialise against EOF updates that occur in IO
337 * completions here. We want to make sure that nobody is changing the
338 * size while we do this check until we have placed an IO barrier (i.e.
339 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
340 * The spinlock effectively forms a memory barrier once we have the
341 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
342 * and hence be able to correctly determine if we need to run zeroing.
343 */
344 spin_lock(&ip->i_flags_lock);
345 isize = i_size_read(inode);
346 if (iocb->ki_pos > isize) {
347 spin_unlock(&ip->i_flags_lock);
348 if (!drained_dio) {
349 if (*iolock == XFS_IOLOCK_SHARED) {
350 xfs_iunlock(ip, *iolock);
351 *iolock = XFS_IOLOCK_EXCL;
352 xfs_ilock(ip, *iolock);
353 iov_iter_reexpand(from, count);
354 }
355 /*
356 * We now have an IO submission barrier in place, but
357 * AIO can do EOF updates during IO completion and hence
358 * we now need to wait for all of them to drain. Non-AIO
359 * DIO will have drained before we are given the
360 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
361 * no-op.
362 */
363 inode_dio_wait(inode);
364 drained_dio = true;
365 goto restart;
366 }
367
368 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
369 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
370 NULL, &xfs_iomap_ops);
371 if (error)
372 return error;
373 } else
374 spin_unlock(&ip->i_flags_lock);
375
376 /*
377 * Updating the timestamps will grab the ilock again from
378 * xfs_fs_dirty_inode, so we have to call it after dropping the
379 * lock above. Eventually we should look into a way to avoid
380 * the pointless lock roundtrip.
381 */
382 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
383 error = file_update_time(file);
384 if (error)
385 return error;
386 }
387
388 /*
389 * If we're writing the file then make sure to clear the setuid and
390 * setgid bits if the process is not being run by root. This keeps
391 * people from modifying setuid and setgid binaries.
392 */
393 if (!IS_NOSEC(inode))
394 return file_remove_privs(file);
395 return 0;
396}
397
398static int
399xfs_dio_write_end_io(
400 struct kiocb *iocb,
401 ssize_t size,
402 unsigned flags)
403{
404 struct inode *inode = file_inode(iocb->ki_filp);
405 struct xfs_inode *ip = XFS_I(inode);
406 loff_t offset = iocb->ki_pos;
407 int error = 0;
408
409 trace_xfs_end_io_direct_write(ip, offset, size);
410
411 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
412 return -EIO;
413
414 if (size <= 0)
415 return size;
416
417 if (flags & IOMAP_DIO_COW) {
418 error = xfs_reflink_end_cow(ip, offset, size);
419 if (error)
420 return error;
421 }
422
423 /*
424 * Unwritten conversion updates the in-core isize after extent
425 * conversion but before updating the on-disk size. Updating isize any
426 * earlier allows a racing dio read to find unwritten extents before
427 * they are converted.
428 */
429 if (flags & IOMAP_DIO_UNWRITTEN)
430 return xfs_iomap_write_unwritten(ip, offset, size, true);
431
432 /*
433 * We need to update the in-core inode size here so that we don't end up
434 * with the on-disk inode size being outside the in-core inode size. We
435 * have no other method of updating EOF for AIO, so always do it here
436 * if necessary.
437 *
438 * We need to lock the test/set EOF update as we can be racing with
439 * other IO completions here to update the EOF. Failing to serialise
440 * here can result in EOF moving backwards and Bad Things Happen when
441 * that occurs.
442 */
443 spin_lock(&ip->i_flags_lock);
444 if (offset + size > i_size_read(inode)) {
445 i_size_write(inode, offset + size);
446 spin_unlock(&ip->i_flags_lock);
447 error = xfs_setfilesize(ip, offset, size);
448 } else {
449 spin_unlock(&ip->i_flags_lock);
450 }
451
452 return error;
453}
454
455/*
456 * xfs_file_dio_aio_write - handle direct IO writes
457 *
458 * Lock the inode appropriately to prepare for and issue a direct IO write.
459 * By separating it from the buffered write path we remove all the tricky to
460 * follow locking changes and looping.
461 *
462 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
463 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
464 * pages are flushed out.
465 *
466 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
467 * allowing them to be done in parallel with reads and other direct IO writes.
468 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
469 * needs to do sub-block zeroing and that requires serialisation against other
470 * direct IOs to the same block. In this case we need to serialise the
471 * submission of the unaligned IOs so that we don't get racing block zeroing in
472 * the dio layer. To avoid the problem with aio, we also need to wait for
473 * outstanding IOs to complete so that unwritten extent conversion is completed
474 * before we try to map the overlapping block. This is currently implemented by
475 * hitting it with a big hammer (i.e. inode_dio_wait()).
476 *
477 * Returns with locks held indicated by @iolock and errors indicated by
478 * negative return values.
479 */
480STATIC ssize_t
481xfs_file_dio_aio_write(
482 struct kiocb *iocb,
483 struct iov_iter *from)
484{
485 struct file *file = iocb->ki_filp;
486 struct address_space *mapping = file->f_mapping;
487 struct inode *inode = mapping->host;
488 struct xfs_inode *ip = XFS_I(inode);
489 struct xfs_mount *mp = ip->i_mount;
490 ssize_t ret = 0;
491 int unaligned_io = 0;
492 int iolock;
493 size_t count = iov_iter_count(from);
494 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
495 mp->m_rtdev_targp : mp->m_ddev_targp;
496
497 /* DIO must be aligned to device logical sector size */
498 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
499 return -EINVAL;
500
501 /*
502 * Don't take the exclusive iolock here unless the I/O is unaligned to
503 * the file system block size. We don't need to consider the EOF
504 * extension case here because xfs_file_aio_write_checks() will relock
505 * the inode as necessary for EOF zeroing cases and fill out the new
506 * inode size as appropriate.
507 */
508 if ((iocb->ki_pos & mp->m_blockmask) ||
509 ((iocb->ki_pos + count) & mp->m_blockmask)) {
510 unaligned_io = 1;
511
512 /*
513 * We can't properly handle unaligned direct I/O to reflink
514 * files yet, as we can't unshare a partial block.
515 */
516 if (xfs_is_reflink_inode(ip)) {
517 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
518 return -EREMCHG;
519 }
520 iolock = XFS_IOLOCK_EXCL;
521 } else {
522 iolock = XFS_IOLOCK_SHARED;
523 }
524
525 if (iocb->ki_flags & IOCB_NOWAIT) {
526 if (!xfs_ilock_nowait(ip, iolock))
527 return -EAGAIN;
528 } else {
529 xfs_ilock(ip, iolock);
530 }
531
532 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
533 if (ret)
534 goto out;
535 count = iov_iter_count(from);
536
537 /*
538 * If we are doing unaligned IO, wait for all other IO to drain,
539 * otherwise demote the lock if we had to take the exclusive lock
540 * for other reasons in xfs_file_aio_write_checks.
541 */
542 if (unaligned_io) {
543 /* If we are going to wait for other DIO to finish, bail */
544 if (iocb->ki_flags & IOCB_NOWAIT) {
545 if (atomic_read(&inode->i_dio_count))
546 return -EAGAIN;
547 } else {
548 inode_dio_wait(inode);
549 }
550 } else if (iolock == XFS_IOLOCK_EXCL) {
551 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
552 iolock = XFS_IOLOCK_SHARED;
553 }
554
555 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
556 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
557out:
558 xfs_iunlock(ip, iolock);
559
560 /*
561 * No fallback to buffered IO on errors for XFS, direct IO will either
562 * complete fully or fail.
563 */
564 ASSERT(ret < 0 || ret == count);
565 return ret;
566}
567
568static noinline ssize_t
569xfs_file_dax_write(
570 struct kiocb *iocb,
571 struct iov_iter *from)
572{
573 struct inode *inode = iocb->ki_filp->f_mapping->host;
574 struct xfs_inode *ip = XFS_I(inode);
575 int iolock = XFS_IOLOCK_EXCL;
576 ssize_t ret, error = 0;
577 size_t count;
578 loff_t pos;
579
580 if (iocb->ki_flags & IOCB_NOWAIT) {
581 if (!xfs_ilock_nowait(ip, iolock))
582 return -EAGAIN;
583 } else {
584 xfs_ilock(ip, iolock);
585 }
586
587 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
588 if (ret)
589 goto out;
590
591 pos = iocb->ki_pos;
592 count = iov_iter_count(from);
593
594 trace_xfs_file_dax_write(ip, count, pos);
595 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
596 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
597 i_size_write(inode, iocb->ki_pos);
598 error = xfs_setfilesize(ip, pos, ret);
599 }
600out:
601 xfs_iunlock(ip, iolock);
602 return error ? error : ret;
603}
604
605STATIC ssize_t
606xfs_file_buffered_aio_write(
607 struct kiocb *iocb,
608 struct iov_iter *from)
609{
610 struct file *file = iocb->ki_filp;
611 struct address_space *mapping = file->f_mapping;
612 struct inode *inode = mapping->host;
613 struct xfs_inode *ip = XFS_I(inode);
614 ssize_t ret;
615 int enospc = 0;
616 int iolock;
617
618 if (iocb->ki_flags & IOCB_NOWAIT)
619 return -EOPNOTSUPP;
620
621write_retry:
622 iolock = XFS_IOLOCK_EXCL;
623 xfs_ilock(ip, iolock);
624
625 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
626 if (ret)
627 goto out;
628
629 /* We can write back this queue in page reclaim */
630 current->backing_dev_info = inode_to_bdi(inode);
631
632 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
633 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
634 if (likely(ret >= 0))
635 iocb->ki_pos += ret;
636
637 /*
638 * If we hit a space limit, try to free up some lingering preallocated
639 * space before returning an error. In the case of ENOSPC, first try to
640 * write back all dirty inodes to free up some of the excess reserved
641 * metadata space. This reduces the chances that the eofblocks scan
642 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
643 * also behaves as a filter to prevent too many eofblocks scans from
644 * running at the same time.
645 */
646 if (ret == -EDQUOT && !enospc) {
647 xfs_iunlock(ip, iolock);
648 enospc = xfs_inode_free_quota_eofblocks(ip);
649 if (enospc)
650 goto write_retry;
651 enospc = xfs_inode_free_quota_cowblocks(ip);
652 if (enospc)
653 goto write_retry;
654 iolock = 0;
655 } else if (ret == -ENOSPC && !enospc) {
656 struct xfs_eofblocks eofb = {0};
657
658 enospc = 1;
659 xfs_flush_inodes(ip->i_mount);
660
661 xfs_iunlock(ip, iolock);
662 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
663 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
664 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
665 goto write_retry;
666 }
667
668 current->backing_dev_info = NULL;
669out:
670 if (iolock)
671 xfs_iunlock(ip, iolock);
672 return ret;
673}
674
675STATIC ssize_t
676xfs_file_write_iter(
677 struct kiocb *iocb,
678 struct iov_iter *from)
679{
680 struct file *file = iocb->ki_filp;
681 struct address_space *mapping = file->f_mapping;
682 struct inode *inode = mapping->host;
683 struct xfs_inode *ip = XFS_I(inode);
684 ssize_t ret;
685 size_t ocount = iov_iter_count(from);
686
687 XFS_STATS_INC(ip->i_mount, xs_write_calls);
688
689 if (ocount == 0)
690 return 0;
691
692 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
693 return -EIO;
694
695 if (IS_DAX(inode))
696 ret = xfs_file_dax_write(iocb, from);
697 else if (iocb->ki_flags & IOCB_DIRECT) {
698 /*
699 * Allow a directio write to fall back to a buffered
700 * write *only* in the case that we're doing a reflink
701 * CoW. In all other directio scenarios we do not
702 * allow an operation to fall back to buffered mode.
703 */
704 ret = xfs_file_dio_aio_write(iocb, from);
705 if (ret == -EREMCHG)
706 goto buffered;
707 } else {
708buffered:
709 ret = xfs_file_buffered_aio_write(iocb, from);
710 }
711
712 if (ret > 0) {
713 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
714
715 /* Handle various SYNC-type writes */
716 ret = generic_write_sync(iocb, ret);
717 }
718 return ret;
719}
720
721#define XFS_FALLOC_FL_SUPPORTED \
722 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
723 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
724 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
725
726STATIC long
727xfs_file_fallocate(
728 struct file *file,
729 int mode,
730 loff_t offset,
731 loff_t len)
732{
733 struct inode *inode = file_inode(file);
734 struct xfs_inode *ip = XFS_I(inode);
735 long error;
736 enum xfs_prealloc_flags flags = 0;
737 uint iolock = XFS_IOLOCK_EXCL;
738 loff_t new_size = 0;
739 bool do_file_insert = false;
740
741 if (!S_ISREG(inode->i_mode))
742 return -EINVAL;
743 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
744 return -EOPNOTSUPP;
745
746 xfs_ilock(ip, iolock);
747 error = xfs_break_layouts(inode, &iolock);
748 if (error)
749 goto out_unlock;
750
751 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
752 iolock |= XFS_MMAPLOCK_EXCL;
753
754 if (mode & FALLOC_FL_PUNCH_HOLE) {
755 error = xfs_free_file_space(ip, offset, len);
756 if (error)
757 goto out_unlock;
758 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
759 unsigned int blksize_mask = i_blocksize(inode) - 1;
760
761 if (offset & blksize_mask || len & blksize_mask) {
762 error = -EINVAL;
763 goto out_unlock;
764 }
765
766 /*
767 * There is no need to overlap collapse range with EOF,
768 * in which case it is effectively a truncate operation
769 */
770 if (offset + len >= i_size_read(inode)) {
771 error = -EINVAL;
772 goto out_unlock;
773 }
774
775 new_size = i_size_read(inode) - len;
776
777 error = xfs_collapse_file_space(ip, offset, len);
778 if (error)
779 goto out_unlock;
780 } else if (mode & FALLOC_FL_INSERT_RANGE) {
781 unsigned int blksize_mask = i_blocksize(inode) - 1;
782 loff_t isize = i_size_read(inode);
783
784 if (offset & blksize_mask || len & blksize_mask) {
785 error = -EINVAL;
786 goto out_unlock;
787 }
788
789 /*
790 * New inode size must not exceed ->s_maxbytes, accounting for
791 * possible signed overflow.
792 */
793 if (inode->i_sb->s_maxbytes - isize < len) {
794 error = -EFBIG;
795 goto out_unlock;
796 }
797 new_size = isize + len;
798
799 /* Offset should be less than i_size */
800 if (offset >= isize) {
801 error = -EINVAL;
802 goto out_unlock;
803 }
804 do_file_insert = true;
805 } else {
806 flags |= XFS_PREALLOC_SET;
807
808 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
809 offset + len > i_size_read(inode)) {
810 new_size = offset + len;
811 error = inode_newsize_ok(inode, new_size);
812 if (error)
813 goto out_unlock;
814 }
815
816 if (mode & FALLOC_FL_ZERO_RANGE)
817 error = xfs_zero_file_space(ip, offset, len);
818 else {
819 if (mode & FALLOC_FL_UNSHARE_RANGE) {
820 error = xfs_reflink_unshare(ip, offset, len);
821 if (error)
822 goto out_unlock;
823 }
824 error = xfs_alloc_file_space(ip, offset, len,
825 XFS_BMAPI_PREALLOC);
826 }
827 if (error)
828 goto out_unlock;
829 }
830
831 if (file->f_flags & O_DSYNC)
832 flags |= XFS_PREALLOC_SYNC;
833
834 error = xfs_update_prealloc_flags(ip, flags);
835 if (error)
836 goto out_unlock;
837
838 /* Change file size if needed */
839 if (new_size) {
840 struct iattr iattr;
841
842 iattr.ia_valid = ATTR_SIZE;
843 iattr.ia_size = new_size;
844 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
845 if (error)
846 goto out_unlock;
847 }
848
849 /*
850 * Perform hole insertion now that the file size has been
851 * updated so that if we crash during the operation we don't
852 * leave shifted extents past EOF and hence losing access to
853 * the data that is contained within them.
854 */
855 if (do_file_insert)
856 error = xfs_insert_file_space(ip, offset, len);
857
858out_unlock:
859 xfs_iunlock(ip, iolock);
860 return error;
861}
862
863STATIC int
864xfs_file_clone_range(
865 struct file *file_in,
866 loff_t pos_in,
867 struct file *file_out,
868 loff_t pos_out,
869 u64 len)
870{
871 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
872 len, false);
873}
874
875STATIC ssize_t
876xfs_file_dedupe_range(
877 struct file *src_file,
878 u64 loff,
879 u64 len,
880 struct file *dst_file,
881 u64 dst_loff)
882{
883 struct inode *srci = file_inode(src_file);
884 u64 max_dedupe;
885 int error;
886
887 /*
888 * Since we have to read all these pages in to compare them, cut
889 * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
890 * That means we won't do more than MAX_RW_COUNT IO per request.
891 */
892 max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
893 if (len > max_dedupe)
894 len = max_dedupe;
895 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
896 len, true);
897 if (error)
898 return error;
899 return len;
900}
901
902STATIC int
903xfs_file_open(
904 struct inode *inode,
905 struct file *file)
906{
907 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
908 return -EFBIG;
909 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
910 return -EIO;
911 file->f_mode |= FMODE_NOWAIT;
912 return 0;
913}
914
915STATIC int
916xfs_dir_open(
917 struct inode *inode,
918 struct file *file)
919{
920 struct xfs_inode *ip = XFS_I(inode);
921 int mode;
922 int error;
923
924 error = xfs_file_open(inode, file);
925 if (error)
926 return error;
927
928 /*
929 * If there are any blocks, read-ahead block 0 as we're almost
930 * certain to have the next operation be a read there.
931 */
932 mode = xfs_ilock_data_map_shared(ip);
933 if (ip->i_d.di_nextents > 0)
934 error = xfs_dir3_data_readahead(ip, 0, -1);
935 xfs_iunlock(ip, mode);
936 return error;
937}
938
939STATIC int
940xfs_file_release(
941 struct inode *inode,
942 struct file *filp)
943{
944 return xfs_release(XFS_I(inode));
945}
946
947STATIC int
948xfs_file_readdir(
949 struct file *file,
950 struct dir_context *ctx)
951{
952 struct inode *inode = file_inode(file);
953 xfs_inode_t *ip = XFS_I(inode);
954 size_t bufsize;
955
956 /*
957 * The Linux API doesn't pass down the total size of the buffer
958 * we read into down to the filesystem. With the filldir concept
959 * it's not needed for correct information, but the XFS dir2 leaf
960 * code wants an estimate of the buffer size to calculate it's
961 * readahead window and size the buffers used for mapping to
962 * physical blocks.
963 *
964 * Try to give it an estimate that's good enough, maybe at some
965 * point we can change the ->readdir prototype to include the
966 * buffer size. For now we use the current glibc buffer size.
967 */
968 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
969
970 return xfs_readdir(NULL, ip, ctx, bufsize);
971}
972
973STATIC loff_t
974xfs_file_llseek(
975 struct file *file,
976 loff_t offset,
977 int whence)
978{
979 struct inode *inode = file->f_mapping->host;
980
981 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
982 return -EIO;
983
984 switch (whence) {
985 default:
986 return generic_file_llseek(file, offset, whence);
987 case SEEK_HOLE:
988 offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
989 break;
990 case SEEK_DATA:
991 offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
992 break;
993 }
994
995 if (offset < 0)
996 return offset;
997 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
998}
999
1000/*
1001 * Locking for serialisation of IO during page faults. This results in a lock
1002 * ordering of:
1003 *
1004 * mmap_sem (MM)
1005 * sb_start_pagefault(vfs, freeze)
1006 * i_mmaplock (XFS - truncate serialisation)
1007 * page_lock (MM)
1008 * i_lock (XFS - extent map serialisation)
1009 */
1010static int
1011__xfs_filemap_fault(
1012 struct vm_fault *vmf,
1013 enum page_entry_size pe_size,
1014 bool write_fault)
1015{
1016 struct inode *inode = file_inode(vmf->vma->vm_file);
1017 struct xfs_inode *ip = XFS_I(inode);
1018 int ret;
1019
1020 trace_xfs_filemap_fault(ip, pe_size, write_fault);
1021
1022 if (write_fault) {
1023 sb_start_pagefault(inode->i_sb);
1024 file_update_time(vmf->vma->vm_file);
1025 }
1026
1027 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1028 if (IS_DAX(inode)) {
1029 pfn_t pfn;
1030
1031 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1032 if (ret & VM_FAULT_NEEDDSYNC)
1033 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1034 } else {
1035 if (write_fault)
1036 ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1037 else
1038 ret = filemap_fault(vmf);
1039 }
1040 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1041
1042 if (write_fault)
1043 sb_end_pagefault(inode->i_sb);
1044 return ret;
1045}
1046
1047static int
1048xfs_filemap_fault(
1049 struct vm_fault *vmf)
1050{
1051 /* DAX can shortcut the normal fault path on write faults! */
1052 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1053 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1054 (vmf->flags & FAULT_FLAG_WRITE));
1055}
1056
1057static int
1058xfs_filemap_huge_fault(
1059 struct vm_fault *vmf,
1060 enum page_entry_size pe_size)
1061{
1062 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1063 return VM_FAULT_FALLBACK;
1064
1065 /* DAX can shortcut the normal fault path on write faults! */
1066 return __xfs_filemap_fault(vmf, pe_size,
1067 (vmf->flags & FAULT_FLAG_WRITE));
1068}
1069
1070static int
1071xfs_filemap_page_mkwrite(
1072 struct vm_fault *vmf)
1073{
1074 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1075}
1076
1077/*
1078 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1079 * on write faults. In reality, it needs to serialise against truncate and
1080 * prepare memory for writing so handle is as standard write fault.
1081 */
1082static int
1083xfs_filemap_pfn_mkwrite(
1084 struct vm_fault *vmf)
1085{
1086
1087 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1088}
1089
1090static const struct vm_operations_struct xfs_file_vm_ops = {
1091 .fault = xfs_filemap_fault,
1092 .huge_fault = xfs_filemap_huge_fault,
1093 .map_pages = filemap_map_pages,
1094 .page_mkwrite = xfs_filemap_page_mkwrite,
1095 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1096};
1097
1098STATIC int
1099xfs_file_mmap(
1100 struct file *filp,
1101 struct vm_area_struct *vma)
1102{
1103 /*
1104 * We don't support synchronous mappings for non-DAX files. At least
1105 * until someone comes with a sensible use case.
1106 */
1107 if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1108 return -EOPNOTSUPP;
1109
1110 file_accessed(filp);
1111 vma->vm_ops = &xfs_file_vm_ops;
1112 if (IS_DAX(file_inode(filp)))
1113 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1114 return 0;
1115}
1116
1117const struct file_operations xfs_file_operations = {
1118 .llseek = xfs_file_llseek,
1119 .read_iter = xfs_file_read_iter,
1120 .write_iter = xfs_file_write_iter,
1121 .splice_read = generic_file_splice_read,
1122 .splice_write = iter_file_splice_write,
1123 .unlocked_ioctl = xfs_file_ioctl,
1124#ifdef CONFIG_COMPAT
1125 .compat_ioctl = xfs_file_compat_ioctl,
1126#endif
1127 .mmap = xfs_file_mmap,
1128 .mmap_supported_flags = MAP_SYNC,
1129 .open = xfs_file_open,
1130 .release = xfs_file_release,
1131 .fsync = xfs_file_fsync,
1132 .get_unmapped_area = thp_get_unmapped_area,
1133 .fallocate = xfs_file_fallocate,
1134 .clone_file_range = xfs_file_clone_range,
1135 .dedupe_file_range = xfs_file_dedupe_range,
1136};
1137
1138const struct file_operations xfs_dir_file_operations = {
1139 .open = xfs_dir_open,
1140 .read = generic_read_dir,
1141 .iterate_shared = xfs_file_readdir,
1142 .llseek = generic_file_llseek,
1143 .unlocked_ioctl = xfs_file_ioctl,
1144#ifdef CONFIG_COMPAT
1145 .compat_ioctl = xfs_file_compat_ioctl,
1146#endif
1147 .fsync = xfs_dir_fsync,
1148};
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
27#include "xfs_inode.h"
28#include "xfs_trans.h"
29#include "xfs_inode_item.h"
30#include "xfs_bmap.h"
31#include "xfs_bmap_util.h"
32#include "xfs_error.h"
33#include "xfs_dir2.h"
34#include "xfs_dir2_priv.h"
35#include "xfs_ioctl.h"
36#include "xfs_trace.h"
37#include "xfs_log.h"
38#include "xfs_icache.h"
39#include "xfs_pnfs.h"
40
41#include <linux/dcache.h>
42#include <linux/falloc.h>
43#include <linux/pagevec.h>
44#include <linux/backing-dev.h>
45
46static const struct vm_operations_struct xfs_file_vm_ops;
47
48/*
49 * Locking primitives for read and write IO paths to ensure we consistently use
50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
51 */
52static inline void
53xfs_rw_ilock(
54 struct xfs_inode *ip,
55 int type)
56{
57 if (type & XFS_IOLOCK_EXCL)
58 inode_lock(VFS_I(ip));
59 xfs_ilock(ip, type);
60}
61
62static inline void
63xfs_rw_iunlock(
64 struct xfs_inode *ip,
65 int type)
66{
67 xfs_iunlock(ip, type);
68 if (type & XFS_IOLOCK_EXCL)
69 inode_unlock(VFS_I(ip));
70}
71
72static inline void
73xfs_rw_ilock_demote(
74 struct xfs_inode *ip,
75 int type)
76{
77 xfs_ilock_demote(ip, type);
78 if (type & XFS_IOLOCK_EXCL)
79 inode_unlock(VFS_I(ip));
80}
81
82/*
83 * xfs_iozero clears the specified range supplied via the page cache (except in
84 * the DAX case). Writes through the page cache will allocate blocks over holes,
85 * though the callers usually map the holes first and avoid them. If a block is
86 * not completely zeroed, then it will be read from disk before being partially
87 * zeroed.
88 *
89 * In the DAX case, we can just directly write to the underlying pages. This
90 * will not allocate blocks, but will avoid holes and unwritten extents and so
91 * not do unnecessary work.
92 */
93int
94xfs_iozero(
95 struct xfs_inode *ip, /* inode */
96 loff_t pos, /* offset in file */
97 size_t count) /* size of data to zero */
98{
99 struct page *page;
100 struct address_space *mapping;
101 int status = 0;
102
103
104 mapping = VFS_I(ip)->i_mapping;
105 do {
106 unsigned offset, bytes;
107 void *fsdata;
108
109 offset = (pos & (PAGE_SIZE -1)); /* Within page */
110 bytes = PAGE_SIZE - offset;
111 if (bytes > count)
112 bytes = count;
113
114 if (IS_DAX(VFS_I(ip))) {
115 status = dax_zero_page_range(VFS_I(ip), pos, bytes,
116 xfs_get_blocks_direct);
117 if (status)
118 break;
119 } else {
120 status = pagecache_write_begin(NULL, mapping, pos, bytes,
121 AOP_FLAG_UNINTERRUPTIBLE,
122 &page, &fsdata);
123 if (status)
124 break;
125
126 zero_user(page, offset, bytes);
127
128 status = pagecache_write_end(NULL, mapping, pos, bytes,
129 bytes, page, fsdata);
130 WARN_ON(status <= 0); /* can't return less than zero! */
131 status = 0;
132 }
133 pos += bytes;
134 count -= bytes;
135 } while (count);
136
137 return status;
138}
139
140int
141xfs_update_prealloc_flags(
142 struct xfs_inode *ip,
143 enum xfs_prealloc_flags flags)
144{
145 struct xfs_trans *tp;
146 int error;
147
148 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
149 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
150 if (error) {
151 xfs_trans_cancel(tp);
152 return error;
153 }
154
155 xfs_ilock(ip, XFS_ILOCK_EXCL);
156 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
157
158 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
159 VFS_I(ip)->i_mode &= ~S_ISUID;
160 if (VFS_I(ip)->i_mode & S_IXGRP)
161 VFS_I(ip)->i_mode &= ~S_ISGID;
162 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
163 }
164
165 if (flags & XFS_PREALLOC_SET)
166 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
167 if (flags & XFS_PREALLOC_CLEAR)
168 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
169
170 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
171 if (flags & XFS_PREALLOC_SYNC)
172 xfs_trans_set_sync(tp);
173 return xfs_trans_commit(tp);
174}
175
176/*
177 * Fsync operations on directories are much simpler than on regular files,
178 * as there is no file data to flush, and thus also no need for explicit
179 * cache flush operations, and there are no non-transaction metadata updates
180 * on directories either.
181 */
182STATIC int
183xfs_dir_fsync(
184 struct file *file,
185 loff_t start,
186 loff_t end,
187 int datasync)
188{
189 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
190 struct xfs_mount *mp = ip->i_mount;
191 xfs_lsn_t lsn = 0;
192
193 trace_xfs_dir_fsync(ip);
194
195 xfs_ilock(ip, XFS_ILOCK_SHARED);
196 if (xfs_ipincount(ip))
197 lsn = ip->i_itemp->ili_last_lsn;
198 xfs_iunlock(ip, XFS_ILOCK_SHARED);
199
200 if (!lsn)
201 return 0;
202 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
203}
204
205STATIC int
206xfs_file_fsync(
207 struct file *file,
208 loff_t start,
209 loff_t end,
210 int datasync)
211{
212 struct inode *inode = file->f_mapping->host;
213 struct xfs_inode *ip = XFS_I(inode);
214 struct xfs_mount *mp = ip->i_mount;
215 int error = 0;
216 int log_flushed = 0;
217 xfs_lsn_t lsn = 0;
218
219 trace_xfs_file_fsync(ip);
220
221 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
222 if (error)
223 return error;
224
225 if (XFS_FORCED_SHUTDOWN(mp))
226 return -EIO;
227
228 xfs_iflags_clear(ip, XFS_ITRUNCATED);
229
230 if (mp->m_flags & XFS_MOUNT_BARRIER) {
231 /*
232 * If we have an RT and/or log subvolume we need to make sure
233 * to flush the write cache the device used for file data
234 * first. This is to ensure newly written file data make
235 * it to disk before logging the new inode size in case of
236 * an extending write.
237 */
238 if (XFS_IS_REALTIME_INODE(ip))
239 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
240 else if (mp->m_logdev_targp != mp->m_ddev_targp)
241 xfs_blkdev_issue_flush(mp->m_ddev_targp);
242 }
243
244 /*
245 * All metadata updates are logged, which means that we just have to
246 * flush the log up to the latest LSN that touched the inode. If we have
247 * concurrent fsync/fdatasync() calls, we need them to all block on the
248 * log force before we clear the ili_fsync_fields field. This ensures
249 * that we don't get a racing sync operation that does not wait for the
250 * metadata to hit the journal before returning. If we race with
251 * clearing the ili_fsync_fields, then all that will happen is the log
252 * force will do nothing as the lsn will already be on disk. We can't
253 * race with setting ili_fsync_fields because that is done under
254 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
255 * until after the ili_fsync_fields is cleared.
256 */
257 xfs_ilock(ip, XFS_ILOCK_SHARED);
258 if (xfs_ipincount(ip)) {
259 if (!datasync ||
260 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
261 lsn = ip->i_itemp->ili_last_lsn;
262 }
263
264 if (lsn) {
265 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
266 ip->i_itemp->ili_fsync_fields = 0;
267 }
268 xfs_iunlock(ip, XFS_ILOCK_SHARED);
269
270 /*
271 * If we only have a single device, and the log force about was
272 * a no-op we might have to flush the data device cache here.
273 * This can only happen for fdatasync/O_DSYNC if we were overwriting
274 * an already allocated file and thus do not have any metadata to
275 * commit.
276 */
277 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
278 mp->m_logdev_targp == mp->m_ddev_targp &&
279 !XFS_IS_REALTIME_INODE(ip) &&
280 !log_flushed)
281 xfs_blkdev_issue_flush(mp->m_ddev_targp);
282
283 return error;
284}
285
286STATIC ssize_t
287xfs_file_read_iter(
288 struct kiocb *iocb,
289 struct iov_iter *to)
290{
291 struct file *file = iocb->ki_filp;
292 struct inode *inode = file->f_mapping->host;
293 struct xfs_inode *ip = XFS_I(inode);
294 struct xfs_mount *mp = ip->i_mount;
295 size_t size = iov_iter_count(to);
296 ssize_t ret = 0;
297 int ioflags = 0;
298 xfs_fsize_t n;
299 loff_t pos = iocb->ki_pos;
300
301 XFS_STATS_INC(mp, xs_read_calls);
302
303 if (unlikely(iocb->ki_flags & IOCB_DIRECT))
304 ioflags |= XFS_IO_ISDIRECT;
305 if (file->f_mode & FMODE_NOCMTIME)
306 ioflags |= XFS_IO_INVIS;
307
308 if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
309 xfs_buftarg_t *target =
310 XFS_IS_REALTIME_INODE(ip) ?
311 mp->m_rtdev_targp : mp->m_ddev_targp;
312 /* DIO must be aligned to device logical sector size */
313 if ((pos | size) & target->bt_logical_sectormask) {
314 if (pos == i_size_read(inode))
315 return 0;
316 return -EINVAL;
317 }
318 }
319
320 n = mp->m_super->s_maxbytes - pos;
321 if (n <= 0 || size == 0)
322 return 0;
323
324 if (n < size)
325 size = n;
326
327 if (XFS_FORCED_SHUTDOWN(mp))
328 return -EIO;
329
330 /*
331 * Locking is a bit tricky here. If we take an exclusive lock for direct
332 * IO, we effectively serialise all new concurrent read IO to this file
333 * and block it behind IO that is currently in progress because IO in
334 * progress holds the IO lock shared. We only need to hold the lock
335 * exclusive to blow away the page cache, so only take lock exclusively
336 * if the page cache needs invalidation. This allows the normal direct
337 * IO case of no page cache pages to proceeed concurrently without
338 * serialisation.
339 */
340 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
341 if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
342 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
343 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
344
345 /*
346 * The generic dio code only flushes the range of the particular
347 * I/O. Because we take an exclusive lock here, this whole
348 * sequence is considerably more expensive for us. This has a
349 * noticeable performance impact for any file with cached pages,
350 * even when outside of the range of the particular I/O.
351 *
352 * Hence, amortize the cost of the lock against a full file
353 * flush and reduce the chances of repeated iolock cycles going
354 * forward.
355 */
356 if (inode->i_mapping->nrpages) {
357 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
358 if (ret) {
359 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
360 return ret;
361 }
362
363 /*
364 * Invalidate whole pages. This can return an error if
365 * we fail to invalidate a page, but this should never
366 * happen on XFS. Warn if it does fail.
367 */
368 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
369 WARN_ON_ONCE(ret);
370 ret = 0;
371 }
372 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
373 }
374
375 trace_xfs_file_read(ip, size, pos, ioflags);
376
377 ret = generic_file_read_iter(iocb, to);
378 if (ret > 0)
379 XFS_STATS_ADD(mp, xs_read_bytes, ret);
380
381 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
382 return ret;
383}
384
385STATIC ssize_t
386xfs_file_splice_read(
387 struct file *infilp,
388 loff_t *ppos,
389 struct pipe_inode_info *pipe,
390 size_t count,
391 unsigned int flags)
392{
393 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
394 int ioflags = 0;
395 ssize_t ret;
396
397 XFS_STATS_INC(ip->i_mount, xs_read_calls);
398
399 if (infilp->f_mode & FMODE_NOCMTIME)
400 ioflags |= XFS_IO_INVIS;
401
402 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
403 return -EIO;
404
405 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
406
407 /*
408 * DAX inodes cannot ues the page cache for splice, so we have to push
409 * them through the VFS IO path. This means it goes through
410 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
411 * cannot lock the splice operation at this level for DAX inodes.
412 */
413 if (IS_DAX(VFS_I(ip))) {
414 ret = default_file_splice_read(infilp, ppos, pipe, count,
415 flags);
416 goto out;
417 }
418
419 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
420 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
421 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
422out:
423 if (ret > 0)
424 XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
425 return ret;
426}
427
428/*
429 * This routine is called to handle zeroing any space in the last block of the
430 * file that is beyond the EOF. We do this since the size is being increased
431 * without writing anything to that block and we don't want to read the
432 * garbage on the disk.
433 */
434STATIC int /* error (positive) */
435xfs_zero_last_block(
436 struct xfs_inode *ip,
437 xfs_fsize_t offset,
438 xfs_fsize_t isize,
439 bool *did_zeroing)
440{
441 struct xfs_mount *mp = ip->i_mount;
442 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
443 int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
444 int zero_len;
445 int nimaps = 1;
446 int error = 0;
447 struct xfs_bmbt_irec imap;
448
449 xfs_ilock(ip, XFS_ILOCK_EXCL);
450 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
451 xfs_iunlock(ip, XFS_ILOCK_EXCL);
452 if (error)
453 return error;
454
455 ASSERT(nimaps > 0);
456
457 /*
458 * If the block underlying isize is just a hole, then there
459 * is nothing to zero.
460 */
461 if (imap.br_startblock == HOLESTARTBLOCK)
462 return 0;
463
464 zero_len = mp->m_sb.sb_blocksize - zero_offset;
465 if (isize + zero_len > offset)
466 zero_len = offset - isize;
467 *did_zeroing = true;
468 return xfs_iozero(ip, isize, zero_len);
469}
470
471/*
472 * Zero any on disk space between the current EOF and the new, larger EOF.
473 *
474 * This handles the normal case of zeroing the remainder of the last block in
475 * the file and the unusual case of zeroing blocks out beyond the size of the
476 * file. This second case only happens with fixed size extents and when the
477 * system crashes before the inode size was updated but after blocks were
478 * allocated.
479 *
480 * Expects the iolock to be held exclusive, and will take the ilock internally.
481 */
482int /* error (positive) */
483xfs_zero_eof(
484 struct xfs_inode *ip,
485 xfs_off_t offset, /* starting I/O offset */
486 xfs_fsize_t isize, /* current inode size */
487 bool *did_zeroing)
488{
489 struct xfs_mount *mp = ip->i_mount;
490 xfs_fileoff_t start_zero_fsb;
491 xfs_fileoff_t end_zero_fsb;
492 xfs_fileoff_t zero_count_fsb;
493 xfs_fileoff_t last_fsb;
494 xfs_fileoff_t zero_off;
495 xfs_fsize_t zero_len;
496 int nimaps;
497 int error = 0;
498 struct xfs_bmbt_irec imap;
499
500 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
501 ASSERT(offset > isize);
502
503 trace_xfs_zero_eof(ip, isize, offset - isize);
504
505 /*
506 * First handle zeroing the block on which isize resides.
507 *
508 * We only zero a part of that block so it is handled specially.
509 */
510 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
511 error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
512 if (error)
513 return error;
514 }
515
516 /*
517 * Calculate the range between the new size and the old where blocks
518 * needing to be zeroed may exist.
519 *
520 * To get the block where the last byte in the file currently resides,
521 * we need to subtract one from the size and truncate back to a block
522 * boundary. We subtract 1 in case the size is exactly on a block
523 * boundary.
524 */
525 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
526 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
527 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
528 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
529 if (last_fsb == end_zero_fsb) {
530 /*
531 * The size was only incremented on its last block.
532 * We took care of that above, so just return.
533 */
534 return 0;
535 }
536
537 ASSERT(start_zero_fsb <= end_zero_fsb);
538 while (start_zero_fsb <= end_zero_fsb) {
539 nimaps = 1;
540 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
541
542 xfs_ilock(ip, XFS_ILOCK_EXCL);
543 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
544 &imap, &nimaps, 0);
545 xfs_iunlock(ip, XFS_ILOCK_EXCL);
546 if (error)
547 return error;
548
549 ASSERT(nimaps > 0);
550
551 if (imap.br_state == XFS_EXT_UNWRITTEN ||
552 imap.br_startblock == HOLESTARTBLOCK) {
553 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
554 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
555 continue;
556 }
557
558 /*
559 * There are blocks we need to zero.
560 */
561 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
562 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
563
564 if ((zero_off + zero_len) > offset)
565 zero_len = offset - zero_off;
566
567 error = xfs_iozero(ip, zero_off, zero_len);
568 if (error)
569 return error;
570
571 *did_zeroing = true;
572 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
573 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
574 }
575
576 return 0;
577}
578
579/*
580 * Common pre-write limit and setup checks.
581 *
582 * Called with the iolocked held either shared and exclusive according to
583 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
584 * if called for a direct write beyond i_size.
585 */
586STATIC ssize_t
587xfs_file_aio_write_checks(
588 struct kiocb *iocb,
589 struct iov_iter *from,
590 int *iolock)
591{
592 struct file *file = iocb->ki_filp;
593 struct inode *inode = file->f_mapping->host;
594 struct xfs_inode *ip = XFS_I(inode);
595 ssize_t error = 0;
596 size_t count = iov_iter_count(from);
597 bool drained_dio = false;
598
599restart:
600 error = generic_write_checks(iocb, from);
601 if (error <= 0)
602 return error;
603
604 error = xfs_break_layouts(inode, iolock, true);
605 if (error)
606 return error;
607
608 /* For changing security info in file_remove_privs() we need i_mutex */
609 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
610 xfs_rw_iunlock(ip, *iolock);
611 *iolock = XFS_IOLOCK_EXCL;
612 xfs_rw_ilock(ip, *iolock);
613 goto restart;
614 }
615 /*
616 * If the offset is beyond the size of the file, we need to zero any
617 * blocks that fall between the existing EOF and the start of this
618 * write. If zeroing is needed and we are currently holding the
619 * iolock shared, we need to update it to exclusive which implies
620 * having to redo all checks before.
621 *
622 * We need to serialise against EOF updates that occur in IO
623 * completions here. We want to make sure that nobody is changing the
624 * size while we do this check until we have placed an IO barrier (i.e.
625 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
626 * The spinlock effectively forms a memory barrier once we have the
627 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
628 * and hence be able to correctly determine if we need to run zeroing.
629 */
630 spin_lock(&ip->i_flags_lock);
631 if (iocb->ki_pos > i_size_read(inode)) {
632 bool zero = false;
633
634 spin_unlock(&ip->i_flags_lock);
635 if (!drained_dio) {
636 if (*iolock == XFS_IOLOCK_SHARED) {
637 xfs_rw_iunlock(ip, *iolock);
638 *iolock = XFS_IOLOCK_EXCL;
639 xfs_rw_ilock(ip, *iolock);
640 iov_iter_reexpand(from, count);
641 }
642 /*
643 * We now have an IO submission barrier in place, but
644 * AIO can do EOF updates during IO completion and hence
645 * we now need to wait for all of them to drain. Non-AIO
646 * DIO will have drained before we are given the
647 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
648 * no-op.
649 */
650 inode_dio_wait(inode);
651 drained_dio = true;
652 goto restart;
653 }
654 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
655 if (error)
656 return error;
657 } else
658 spin_unlock(&ip->i_flags_lock);
659
660 /*
661 * Updating the timestamps will grab the ilock again from
662 * xfs_fs_dirty_inode, so we have to call it after dropping the
663 * lock above. Eventually we should look into a way to avoid
664 * the pointless lock roundtrip.
665 */
666 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
667 error = file_update_time(file);
668 if (error)
669 return error;
670 }
671
672 /*
673 * If we're writing the file then make sure to clear the setuid and
674 * setgid bits if the process is not being run by root. This keeps
675 * people from modifying setuid and setgid binaries.
676 */
677 if (!IS_NOSEC(inode))
678 return file_remove_privs(file);
679 return 0;
680}
681
682/*
683 * xfs_file_dio_aio_write - handle direct IO writes
684 *
685 * Lock the inode appropriately to prepare for and issue a direct IO write.
686 * By separating it from the buffered write path we remove all the tricky to
687 * follow locking changes and looping.
688 *
689 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
690 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
691 * pages are flushed out.
692 *
693 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
694 * allowing them to be done in parallel with reads and other direct IO writes.
695 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
696 * needs to do sub-block zeroing and that requires serialisation against other
697 * direct IOs to the same block. In this case we need to serialise the
698 * submission of the unaligned IOs so that we don't get racing block zeroing in
699 * the dio layer. To avoid the problem with aio, we also need to wait for
700 * outstanding IOs to complete so that unwritten extent conversion is completed
701 * before we try to map the overlapping block. This is currently implemented by
702 * hitting it with a big hammer (i.e. inode_dio_wait()).
703 *
704 * Returns with locks held indicated by @iolock and errors indicated by
705 * negative return values.
706 */
707STATIC ssize_t
708xfs_file_dio_aio_write(
709 struct kiocb *iocb,
710 struct iov_iter *from)
711{
712 struct file *file = iocb->ki_filp;
713 struct address_space *mapping = file->f_mapping;
714 struct inode *inode = mapping->host;
715 struct xfs_inode *ip = XFS_I(inode);
716 struct xfs_mount *mp = ip->i_mount;
717 ssize_t ret = 0;
718 int unaligned_io = 0;
719 int iolock;
720 size_t count = iov_iter_count(from);
721 loff_t pos = iocb->ki_pos;
722 loff_t end;
723 struct iov_iter data;
724 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
725 mp->m_rtdev_targp : mp->m_ddev_targp;
726
727 /* DIO must be aligned to device logical sector size */
728 if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask))
729 return -EINVAL;
730
731 /* "unaligned" here means not aligned to a filesystem block */
732 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
733 unaligned_io = 1;
734
735 /*
736 * We don't need to take an exclusive lock unless there page cache needs
737 * to be invalidated or unaligned IO is being executed. We don't need to
738 * consider the EOF extension case here because
739 * xfs_file_aio_write_checks() will relock the inode as necessary for
740 * EOF zeroing cases and fill out the new inode size as appropriate.
741 */
742 if (unaligned_io || mapping->nrpages)
743 iolock = XFS_IOLOCK_EXCL;
744 else
745 iolock = XFS_IOLOCK_SHARED;
746 xfs_rw_ilock(ip, iolock);
747
748 /*
749 * Recheck if there are cached pages that need invalidate after we got
750 * the iolock to protect against other threads adding new pages while
751 * we were waiting for the iolock.
752 */
753 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
754 xfs_rw_iunlock(ip, iolock);
755 iolock = XFS_IOLOCK_EXCL;
756 xfs_rw_ilock(ip, iolock);
757 }
758
759 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
760 if (ret)
761 goto out;
762 count = iov_iter_count(from);
763 pos = iocb->ki_pos;
764 end = pos + count - 1;
765
766 /*
767 * See xfs_file_read_iter() for why we do a full-file flush here.
768 */
769 if (mapping->nrpages) {
770 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
771 if (ret)
772 goto out;
773 /*
774 * Invalidate whole pages. This can return an error if we fail
775 * to invalidate a page, but this should never happen on XFS.
776 * Warn if it does fail.
777 */
778 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
779 WARN_ON_ONCE(ret);
780 ret = 0;
781 }
782
783 /*
784 * If we are doing unaligned IO, wait for all other IO to drain,
785 * otherwise demote the lock if we had to flush cached pages
786 */
787 if (unaligned_io)
788 inode_dio_wait(inode);
789 else if (iolock == XFS_IOLOCK_EXCL) {
790 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
791 iolock = XFS_IOLOCK_SHARED;
792 }
793
794 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
795
796 data = *from;
797 ret = mapping->a_ops->direct_IO(iocb, &data, pos);
798
799 /* see generic_file_direct_write() for why this is necessary */
800 if (mapping->nrpages) {
801 invalidate_inode_pages2_range(mapping,
802 pos >> PAGE_SHIFT,
803 end >> PAGE_SHIFT);
804 }
805
806 if (ret > 0) {
807 pos += ret;
808 iov_iter_advance(from, ret);
809 iocb->ki_pos = pos;
810 }
811out:
812 xfs_rw_iunlock(ip, iolock);
813
814 /*
815 * No fallback to buffered IO on errors for XFS. DAX can result in
816 * partial writes, but direct IO will either complete fully or fail.
817 */
818 ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip)));
819 return ret;
820}
821
822STATIC ssize_t
823xfs_file_buffered_aio_write(
824 struct kiocb *iocb,
825 struct iov_iter *from)
826{
827 struct file *file = iocb->ki_filp;
828 struct address_space *mapping = file->f_mapping;
829 struct inode *inode = mapping->host;
830 struct xfs_inode *ip = XFS_I(inode);
831 ssize_t ret;
832 int enospc = 0;
833 int iolock = XFS_IOLOCK_EXCL;
834
835 xfs_rw_ilock(ip, iolock);
836
837 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
838 if (ret)
839 goto out;
840
841 /* We can write back this queue in page reclaim */
842 current->backing_dev_info = inode_to_bdi(inode);
843
844write_retry:
845 trace_xfs_file_buffered_write(ip, iov_iter_count(from),
846 iocb->ki_pos, 0);
847 ret = generic_perform_write(file, from, iocb->ki_pos);
848 if (likely(ret >= 0))
849 iocb->ki_pos += ret;
850
851 /*
852 * If we hit a space limit, try to free up some lingering preallocated
853 * space before returning an error. In the case of ENOSPC, first try to
854 * write back all dirty inodes to free up some of the excess reserved
855 * metadata space. This reduces the chances that the eofblocks scan
856 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
857 * also behaves as a filter to prevent too many eofblocks scans from
858 * running at the same time.
859 */
860 if (ret == -EDQUOT && !enospc) {
861 enospc = xfs_inode_free_quota_eofblocks(ip);
862 if (enospc)
863 goto write_retry;
864 } else if (ret == -ENOSPC && !enospc) {
865 struct xfs_eofblocks eofb = {0};
866
867 enospc = 1;
868 xfs_flush_inodes(ip->i_mount);
869 eofb.eof_scan_owner = ip->i_ino; /* for locking */
870 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
871 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
872 goto write_retry;
873 }
874
875 current->backing_dev_info = NULL;
876out:
877 xfs_rw_iunlock(ip, iolock);
878 return ret;
879}
880
881STATIC ssize_t
882xfs_file_write_iter(
883 struct kiocb *iocb,
884 struct iov_iter *from)
885{
886 struct file *file = iocb->ki_filp;
887 struct address_space *mapping = file->f_mapping;
888 struct inode *inode = mapping->host;
889 struct xfs_inode *ip = XFS_I(inode);
890 ssize_t ret;
891 size_t ocount = iov_iter_count(from);
892
893 XFS_STATS_INC(ip->i_mount, xs_write_calls);
894
895 if (ocount == 0)
896 return 0;
897
898 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
899 return -EIO;
900
901 if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode))
902 ret = xfs_file_dio_aio_write(iocb, from);
903 else
904 ret = xfs_file_buffered_aio_write(iocb, from);
905
906 if (ret > 0) {
907 ssize_t err;
908
909 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
910
911 /* Handle various SYNC-type writes */
912 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
913 if (err < 0)
914 ret = err;
915 }
916 return ret;
917}
918
919#define XFS_FALLOC_FL_SUPPORTED \
920 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
921 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
922 FALLOC_FL_INSERT_RANGE)
923
924STATIC long
925xfs_file_fallocate(
926 struct file *file,
927 int mode,
928 loff_t offset,
929 loff_t len)
930{
931 struct inode *inode = file_inode(file);
932 struct xfs_inode *ip = XFS_I(inode);
933 long error;
934 enum xfs_prealloc_flags flags = 0;
935 uint iolock = XFS_IOLOCK_EXCL;
936 loff_t new_size = 0;
937 bool do_file_insert = 0;
938
939 if (!S_ISREG(inode->i_mode))
940 return -EINVAL;
941 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
942 return -EOPNOTSUPP;
943
944 xfs_ilock(ip, iolock);
945 error = xfs_break_layouts(inode, &iolock, false);
946 if (error)
947 goto out_unlock;
948
949 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
950 iolock |= XFS_MMAPLOCK_EXCL;
951
952 if (mode & FALLOC_FL_PUNCH_HOLE) {
953 error = xfs_free_file_space(ip, offset, len);
954 if (error)
955 goto out_unlock;
956 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
957 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
958
959 if (offset & blksize_mask || len & blksize_mask) {
960 error = -EINVAL;
961 goto out_unlock;
962 }
963
964 /*
965 * There is no need to overlap collapse range with EOF,
966 * in which case it is effectively a truncate operation
967 */
968 if (offset + len >= i_size_read(inode)) {
969 error = -EINVAL;
970 goto out_unlock;
971 }
972
973 new_size = i_size_read(inode) - len;
974
975 error = xfs_collapse_file_space(ip, offset, len);
976 if (error)
977 goto out_unlock;
978 } else if (mode & FALLOC_FL_INSERT_RANGE) {
979 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
980
981 new_size = i_size_read(inode) + len;
982 if (offset & blksize_mask || len & blksize_mask) {
983 error = -EINVAL;
984 goto out_unlock;
985 }
986
987 /* check the new inode size does not wrap through zero */
988 if (new_size > inode->i_sb->s_maxbytes) {
989 error = -EFBIG;
990 goto out_unlock;
991 }
992
993 /* Offset should be less than i_size */
994 if (offset >= i_size_read(inode)) {
995 error = -EINVAL;
996 goto out_unlock;
997 }
998 do_file_insert = 1;
999 } else {
1000 flags |= XFS_PREALLOC_SET;
1001
1002 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1003 offset + len > i_size_read(inode)) {
1004 new_size = offset + len;
1005 error = inode_newsize_ok(inode, new_size);
1006 if (error)
1007 goto out_unlock;
1008 }
1009
1010 if (mode & FALLOC_FL_ZERO_RANGE)
1011 error = xfs_zero_file_space(ip, offset, len);
1012 else
1013 error = xfs_alloc_file_space(ip, offset, len,
1014 XFS_BMAPI_PREALLOC);
1015 if (error)
1016 goto out_unlock;
1017 }
1018
1019 if (file->f_flags & O_DSYNC)
1020 flags |= XFS_PREALLOC_SYNC;
1021
1022 error = xfs_update_prealloc_flags(ip, flags);
1023 if (error)
1024 goto out_unlock;
1025
1026 /* Change file size if needed */
1027 if (new_size) {
1028 struct iattr iattr;
1029
1030 iattr.ia_valid = ATTR_SIZE;
1031 iattr.ia_size = new_size;
1032 error = xfs_setattr_size(ip, &iattr);
1033 if (error)
1034 goto out_unlock;
1035 }
1036
1037 /*
1038 * Perform hole insertion now that the file size has been
1039 * updated so that if we crash during the operation we don't
1040 * leave shifted extents past EOF and hence losing access to
1041 * the data that is contained within them.
1042 */
1043 if (do_file_insert)
1044 error = xfs_insert_file_space(ip, offset, len);
1045
1046out_unlock:
1047 xfs_iunlock(ip, iolock);
1048 return error;
1049}
1050
1051
1052STATIC int
1053xfs_file_open(
1054 struct inode *inode,
1055 struct file *file)
1056{
1057 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1058 return -EFBIG;
1059 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1060 return -EIO;
1061 return 0;
1062}
1063
1064STATIC int
1065xfs_dir_open(
1066 struct inode *inode,
1067 struct file *file)
1068{
1069 struct xfs_inode *ip = XFS_I(inode);
1070 int mode;
1071 int error;
1072
1073 error = xfs_file_open(inode, file);
1074 if (error)
1075 return error;
1076
1077 /*
1078 * If there are any blocks, read-ahead block 0 as we're almost
1079 * certain to have the next operation be a read there.
1080 */
1081 mode = xfs_ilock_data_map_shared(ip);
1082 if (ip->i_d.di_nextents > 0)
1083 xfs_dir3_data_readahead(ip, 0, -1);
1084 xfs_iunlock(ip, mode);
1085 return 0;
1086}
1087
1088STATIC int
1089xfs_file_release(
1090 struct inode *inode,
1091 struct file *filp)
1092{
1093 return xfs_release(XFS_I(inode));
1094}
1095
1096STATIC int
1097xfs_file_readdir(
1098 struct file *file,
1099 struct dir_context *ctx)
1100{
1101 struct inode *inode = file_inode(file);
1102 xfs_inode_t *ip = XFS_I(inode);
1103 size_t bufsize;
1104
1105 /*
1106 * The Linux API doesn't pass down the total size of the buffer
1107 * we read into down to the filesystem. With the filldir concept
1108 * it's not needed for correct information, but the XFS dir2 leaf
1109 * code wants an estimate of the buffer size to calculate it's
1110 * readahead window and size the buffers used for mapping to
1111 * physical blocks.
1112 *
1113 * Try to give it an estimate that's good enough, maybe at some
1114 * point we can change the ->readdir prototype to include the
1115 * buffer size. For now we use the current glibc buffer size.
1116 */
1117 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1118
1119 return xfs_readdir(ip, ctx, bufsize);
1120}
1121
1122/*
1123 * This type is designed to indicate the type of offset we would like
1124 * to search from page cache for xfs_seek_hole_data().
1125 */
1126enum {
1127 HOLE_OFF = 0,
1128 DATA_OFF,
1129};
1130
1131/*
1132 * Lookup the desired type of offset from the given page.
1133 *
1134 * On success, return true and the offset argument will point to the
1135 * start of the region that was found. Otherwise this function will
1136 * return false and keep the offset argument unchanged.
1137 */
1138STATIC bool
1139xfs_lookup_buffer_offset(
1140 struct page *page,
1141 loff_t *offset,
1142 unsigned int type)
1143{
1144 loff_t lastoff = page_offset(page);
1145 bool found = false;
1146 struct buffer_head *bh, *head;
1147
1148 bh = head = page_buffers(page);
1149 do {
1150 /*
1151 * Unwritten extents that have data in the page
1152 * cache covering them can be identified by the
1153 * BH_Unwritten state flag. Pages with multiple
1154 * buffers might have a mix of holes, data and
1155 * unwritten extents - any buffer with valid
1156 * data in it should have BH_Uptodate flag set
1157 * on it.
1158 */
1159 if (buffer_unwritten(bh) ||
1160 buffer_uptodate(bh)) {
1161 if (type == DATA_OFF)
1162 found = true;
1163 } else {
1164 if (type == HOLE_OFF)
1165 found = true;
1166 }
1167
1168 if (found) {
1169 *offset = lastoff;
1170 break;
1171 }
1172 lastoff += bh->b_size;
1173 } while ((bh = bh->b_this_page) != head);
1174
1175 return found;
1176}
1177
1178/*
1179 * This routine is called to find out and return a data or hole offset
1180 * from the page cache for unwritten extents according to the desired
1181 * type for xfs_seek_hole_data().
1182 *
1183 * The argument offset is used to tell where we start to search from the
1184 * page cache. Map is used to figure out the end points of the range to
1185 * lookup pages.
1186 *
1187 * Return true if the desired type of offset was found, and the argument
1188 * offset is filled with that address. Otherwise, return false and keep
1189 * offset unchanged.
1190 */
1191STATIC bool
1192xfs_find_get_desired_pgoff(
1193 struct inode *inode,
1194 struct xfs_bmbt_irec *map,
1195 unsigned int type,
1196 loff_t *offset)
1197{
1198 struct xfs_inode *ip = XFS_I(inode);
1199 struct xfs_mount *mp = ip->i_mount;
1200 struct pagevec pvec;
1201 pgoff_t index;
1202 pgoff_t end;
1203 loff_t endoff;
1204 loff_t startoff = *offset;
1205 loff_t lastoff = startoff;
1206 bool found = false;
1207
1208 pagevec_init(&pvec, 0);
1209
1210 index = startoff >> PAGE_SHIFT;
1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1212 end = endoff >> PAGE_SHIFT;
1213 do {
1214 int want;
1215 unsigned nr_pages;
1216 unsigned int i;
1217
1218 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1219 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1220 want);
1221 /*
1222 * No page mapped into given range. If we are searching holes
1223 * and if this is the first time we got into the loop, it means
1224 * that the given offset is landed in a hole, return it.
1225 *
1226 * If we have already stepped through some block buffers to find
1227 * holes but they all contains data. In this case, the last
1228 * offset is already updated and pointed to the end of the last
1229 * mapped page, if it does not reach the endpoint to search,
1230 * that means there should be a hole between them.
1231 */
1232 if (nr_pages == 0) {
1233 /* Data search found nothing */
1234 if (type == DATA_OFF)
1235 break;
1236
1237 ASSERT(type == HOLE_OFF);
1238 if (lastoff == startoff || lastoff < endoff) {
1239 found = true;
1240 *offset = lastoff;
1241 }
1242 break;
1243 }
1244
1245 /*
1246 * At lease we found one page. If this is the first time we
1247 * step into the loop, and if the first page index offset is
1248 * greater than the given search offset, a hole was found.
1249 */
1250 if (type == HOLE_OFF && lastoff == startoff &&
1251 lastoff < page_offset(pvec.pages[0])) {
1252 found = true;
1253 break;
1254 }
1255
1256 for (i = 0; i < nr_pages; i++) {
1257 struct page *page = pvec.pages[i];
1258 loff_t b_offset;
1259
1260 /*
1261 * At this point, the page may be truncated or
1262 * invalidated (changing page->mapping to NULL),
1263 * or even swizzled back from swapper_space to tmpfs
1264 * file mapping. However, page->index will not change
1265 * because we have a reference on the page.
1266 *
1267 * Searching done if the page index is out of range.
1268 * If the current offset is not reaches the end of
1269 * the specified search range, there should be a hole
1270 * between them.
1271 */
1272 if (page->index > end) {
1273 if (type == HOLE_OFF && lastoff < endoff) {
1274 *offset = lastoff;
1275 found = true;
1276 }
1277 goto out;
1278 }
1279
1280 lock_page(page);
1281 /*
1282 * Page truncated or invalidated(page->mapping == NULL).
1283 * We can freely skip it and proceed to check the next
1284 * page.
1285 */
1286 if (unlikely(page->mapping != inode->i_mapping)) {
1287 unlock_page(page);
1288 continue;
1289 }
1290
1291 if (!page_has_buffers(page)) {
1292 unlock_page(page);
1293 continue;
1294 }
1295
1296 found = xfs_lookup_buffer_offset(page, &b_offset, type);
1297 if (found) {
1298 /*
1299 * The found offset may be less than the start
1300 * point to search if this is the first time to
1301 * come here.
1302 */
1303 *offset = max_t(loff_t, startoff, b_offset);
1304 unlock_page(page);
1305 goto out;
1306 }
1307
1308 /*
1309 * We either searching data but nothing was found, or
1310 * searching hole but found a data buffer. In either
1311 * case, probably the next page contains the desired
1312 * things, update the last offset to it so.
1313 */
1314 lastoff = page_offset(page) + PAGE_SIZE;
1315 unlock_page(page);
1316 }
1317
1318 /*
1319 * The number of returned pages less than our desired, search
1320 * done. In this case, nothing was found for searching data,
1321 * but we found a hole behind the last offset.
1322 */
1323 if (nr_pages < want) {
1324 if (type == HOLE_OFF) {
1325 *offset = lastoff;
1326 found = true;
1327 }
1328 break;
1329 }
1330
1331 index = pvec.pages[i - 1]->index + 1;
1332 pagevec_release(&pvec);
1333 } while (index <= end);
1334
1335out:
1336 pagevec_release(&pvec);
1337 return found;
1338}
1339
1340/*
1341 * caller must lock inode with xfs_ilock_data_map_shared,
1342 * can we craft an appropriate ASSERT?
1343 *
1344 * end is because the VFS-level lseek interface is defined such that any
1345 * offset past i_size shall return -ENXIO, but we use this for quota code
1346 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1347 */
1348loff_t
1349__xfs_seek_hole_data(
1350 struct inode *inode,
1351 loff_t start,
1352 loff_t end,
1353 int whence)
1354{
1355 struct xfs_inode *ip = XFS_I(inode);
1356 struct xfs_mount *mp = ip->i_mount;
1357 loff_t uninitialized_var(offset);
1358 xfs_fileoff_t fsbno;
1359 xfs_filblks_t lastbno;
1360 int error;
1361
1362 if (start >= end) {
1363 error = -ENXIO;
1364 goto out_error;
1365 }
1366
1367 /*
1368 * Try to read extents from the first block indicated
1369 * by fsbno to the end block of the file.
1370 */
1371 fsbno = XFS_B_TO_FSBT(mp, start);
1372 lastbno = XFS_B_TO_FSB(mp, end);
1373
1374 for (;;) {
1375 struct xfs_bmbt_irec map[2];
1376 int nmap = 2;
1377 unsigned int i;
1378
1379 error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
1380 XFS_BMAPI_ENTIRE);
1381 if (error)
1382 goto out_error;
1383
1384 /* No extents at given offset, must be beyond EOF */
1385 if (nmap == 0) {
1386 error = -ENXIO;
1387 goto out_error;
1388 }
1389
1390 for (i = 0; i < nmap; i++) {
1391 offset = max_t(loff_t, start,
1392 XFS_FSB_TO_B(mp, map[i].br_startoff));
1393
1394 /* Landed in the hole we wanted? */
1395 if (whence == SEEK_HOLE &&
1396 map[i].br_startblock == HOLESTARTBLOCK)
1397 goto out;
1398
1399 /* Landed in the data extent we wanted? */
1400 if (whence == SEEK_DATA &&
1401 (map[i].br_startblock == DELAYSTARTBLOCK ||
1402 (map[i].br_state == XFS_EXT_NORM &&
1403 !isnullstartblock(map[i].br_startblock))))
1404 goto out;
1405
1406 /*
1407 * Landed in an unwritten extent, try to search
1408 * for hole or data from page cache.
1409 */
1410 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1411 if (xfs_find_get_desired_pgoff(inode, &map[i],
1412 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1413 &offset))
1414 goto out;
1415 }
1416 }
1417
1418 /*
1419 * We only received one extent out of the two requested. This
1420 * means we've hit EOF and didn't find what we are looking for.
1421 */
1422 if (nmap == 1) {
1423 /*
1424 * If we were looking for a hole, set offset to
1425 * the end of the file (i.e., there is an implicit
1426 * hole at the end of any file).
1427 */
1428 if (whence == SEEK_HOLE) {
1429 offset = end;
1430 break;
1431 }
1432 /*
1433 * If we were looking for data, it's nowhere to be found
1434 */
1435 ASSERT(whence == SEEK_DATA);
1436 error = -ENXIO;
1437 goto out_error;
1438 }
1439
1440 ASSERT(i > 1);
1441
1442 /*
1443 * Nothing was found, proceed to the next round of search
1444 * if the next reading offset is not at or beyond EOF.
1445 */
1446 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1447 start = XFS_FSB_TO_B(mp, fsbno);
1448 if (start >= end) {
1449 if (whence == SEEK_HOLE) {
1450 offset = end;
1451 break;
1452 }
1453 ASSERT(whence == SEEK_DATA);
1454 error = -ENXIO;
1455 goto out_error;
1456 }
1457 }
1458
1459out:
1460 /*
1461 * If at this point we have found the hole we wanted, the returned
1462 * offset may be bigger than the file size as it may be aligned to
1463 * page boundary for unwritten extents. We need to deal with this
1464 * situation in particular.
1465 */
1466 if (whence == SEEK_HOLE)
1467 offset = min_t(loff_t, offset, end);
1468
1469 return offset;
1470
1471out_error:
1472 return error;
1473}
1474
1475STATIC loff_t
1476xfs_seek_hole_data(
1477 struct file *file,
1478 loff_t start,
1479 int whence)
1480{
1481 struct inode *inode = file->f_mapping->host;
1482 struct xfs_inode *ip = XFS_I(inode);
1483 struct xfs_mount *mp = ip->i_mount;
1484 uint lock;
1485 loff_t offset, end;
1486 int error = 0;
1487
1488 if (XFS_FORCED_SHUTDOWN(mp))
1489 return -EIO;
1490
1491 lock = xfs_ilock_data_map_shared(ip);
1492
1493 end = i_size_read(inode);
1494 offset = __xfs_seek_hole_data(inode, start, end, whence);
1495 if (offset < 0) {
1496 error = offset;
1497 goto out_unlock;
1498 }
1499
1500 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1501
1502out_unlock:
1503 xfs_iunlock(ip, lock);
1504
1505 if (error)
1506 return error;
1507 return offset;
1508}
1509
1510STATIC loff_t
1511xfs_file_llseek(
1512 struct file *file,
1513 loff_t offset,
1514 int whence)
1515{
1516 switch (whence) {
1517 case SEEK_END:
1518 case SEEK_CUR:
1519 case SEEK_SET:
1520 return generic_file_llseek(file, offset, whence);
1521 case SEEK_HOLE:
1522 case SEEK_DATA:
1523 return xfs_seek_hole_data(file, offset, whence);
1524 default:
1525 return -EINVAL;
1526 }
1527}
1528
1529/*
1530 * Locking for serialisation of IO during page faults. This results in a lock
1531 * ordering of:
1532 *
1533 * mmap_sem (MM)
1534 * sb_start_pagefault(vfs, freeze)
1535 * i_mmaplock (XFS - truncate serialisation)
1536 * page_lock (MM)
1537 * i_lock (XFS - extent map serialisation)
1538 */
1539
1540/*
1541 * mmap()d file has taken write protection fault and is being made writable. We
1542 * can set the page state up correctly for a writable page, which means we can
1543 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1544 * mapping.
1545 */
1546STATIC int
1547xfs_filemap_page_mkwrite(
1548 struct vm_area_struct *vma,
1549 struct vm_fault *vmf)
1550{
1551 struct inode *inode = file_inode(vma->vm_file);
1552 int ret;
1553
1554 trace_xfs_filemap_page_mkwrite(XFS_I(inode));
1555
1556 sb_start_pagefault(inode->i_sb);
1557 file_update_time(vma->vm_file);
1558 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1559
1560 if (IS_DAX(inode)) {
1561 ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
1562 } else {
1563 ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
1564 ret = block_page_mkwrite_return(ret);
1565 }
1566
1567 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1568 sb_end_pagefault(inode->i_sb);
1569
1570 return ret;
1571}
1572
1573STATIC int
1574xfs_filemap_fault(
1575 struct vm_area_struct *vma,
1576 struct vm_fault *vmf)
1577{
1578 struct inode *inode = file_inode(vma->vm_file);
1579 int ret;
1580
1581 trace_xfs_filemap_fault(XFS_I(inode));
1582
1583 /* DAX can shortcut the normal fault path on write faults! */
1584 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1585 return xfs_filemap_page_mkwrite(vma, vmf);
1586
1587 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1588 if (IS_DAX(inode)) {
1589 /*
1590 * we do not want to trigger unwritten extent conversion on read
1591 * faults - that is unnecessary overhead and would also require
1592 * changes to xfs_get_blocks_direct() to map unwritten extent
1593 * ioend for conversion on read-only mappings.
1594 */
1595 ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
1596 } else
1597 ret = filemap_fault(vma, vmf);
1598 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1599
1600 return ret;
1601}
1602
1603/*
1604 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1605 * both read and write faults. Hence we need to handle both cases. There is no
1606 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1607 * handle both cases here. @flags carries the information on the type of fault
1608 * occuring.
1609 */
1610STATIC int
1611xfs_filemap_pmd_fault(
1612 struct vm_area_struct *vma,
1613 unsigned long addr,
1614 pmd_t *pmd,
1615 unsigned int flags)
1616{
1617 struct inode *inode = file_inode(vma->vm_file);
1618 struct xfs_inode *ip = XFS_I(inode);
1619 int ret;
1620
1621 if (!IS_DAX(inode))
1622 return VM_FAULT_FALLBACK;
1623
1624 trace_xfs_filemap_pmd_fault(ip);
1625
1626 if (flags & FAULT_FLAG_WRITE) {
1627 sb_start_pagefault(inode->i_sb);
1628 file_update_time(vma->vm_file);
1629 }
1630
1631 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1632 ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
1633 NULL);
1634 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1635
1636 if (flags & FAULT_FLAG_WRITE)
1637 sb_end_pagefault(inode->i_sb);
1638
1639 return ret;
1640}
1641
1642/*
1643 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1644 * updates on write faults. In reality, it's need to serialise against
1645 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1646 * to ensure we serialise the fault barrier in place.
1647 */
1648static int
1649xfs_filemap_pfn_mkwrite(
1650 struct vm_area_struct *vma,
1651 struct vm_fault *vmf)
1652{
1653
1654 struct inode *inode = file_inode(vma->vm_file);
1655 struct xfs_inode *ip = XFS_I(inode);
1656 int ret = VM_FAULT_NOPAGE;
1657 loff_t size;
1658
1659 trace_xfs_filemap_pfn_mkwrite(ip);
1660
1661 sb_start_pagefault(inode->i_sb);
1662 file_update_time(vma->vm_file);
1663
1664 /* check if the faulting page hasn't raced with truncate */
1665 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1666 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1667 if (vmf->pgoff >= size)
1668 ret = VM_FAULT_SIGBUS;
1669 else if (IS_DAX(inode))
1670 ret = dax_pfn_mkwrite(vma, vmf);
1671 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1672 sb_end_pagefault(inode->i_sb);
1673 return ret;
1674
1675}
1676
1677static const struct vm_operations_struct xfs_file_vm_ops = {
1678 .fault = xfs_filemap_fault,
1679 .pmd_fault = xfs_filemap_pmd_fault,
1680 .map_pages = filemap_map_pages,
1681 .page_mkwrite = xfs_filemap_page_mkwrite,
1682 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1683};
1684
1685STATIC int
1686xfs_file_mmap(
1687 struct file *filp,
1688 struct vm_area_struct *vma)
1689{
1690 file_accessed(filp);
1691 vma->vm_ops = &xfs_file_vm_ops;
1692 if (IS_DAX(file_inode(filp)))
1693 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1694 return 0;
1695}
1696
1697const struct file_operations xfs_file_operations = {
1698 .llseek = xfs_file_llseek,
1699 .read_iter = xfs_file_read_iter,
1700 .write_iter = xfs_file_write_iter,
1701 .splice_read = xfs_file_splice_read,
1702 .splice_write = iter_file_splice_write,
1703 .unlocked_ioctl = xfs_file_ioctl,
1704#ifdef CONFIG_COMPAT
1705 .compat_ioctl = xfs_file_compat_ioctl,
1706#endif
1707 .mmap = xfs_file_mmap,
1708 .open = xfs_file_open,
1709 .release = xfs_file_release,
1710 .fsync = xfs_file_fsync,
1711 .fallocate = xfs_file_fallocate,
1712};
1713
1714const struct file_operations xfs_dir_file_operations = {
1715 .open = xfs_dir_open,
1716 .read = generic_read_dir,
1717 .iterate = xfs_file_readdir,
1718 .llseek = generic_file_llseek,
1719 .unlocked_ioctl = xfs_file_ioctl,
1720#ifdef CONFIG_COMPAT
1721 .compat_ioctl = xfs_file_compat_ioctl,
1722#endif
1723 .fsync = xfs_dir_fsync,
1724};