Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
27#include "xfs_inode.h"
28#include "xfs_trans.h"
29#include "xfs_inode_item.h"
30#include "xfs_bmap.h"
31#include "xfs_bmap_util.h"
32#include "xfs_error.h"
33#include "xfs_dir2.h"
34#include "xfs_dir2_priv.h"
35#include "xfs_ioctl.h"
36#include "xfs_trace.h"
37#include "xfs_log.h"
38#include "xfs_icache.h"
39#include "xfs_pnfs.h"
40#include "xfs_iomap.h"
41#include "xfs_reflink.h"
42
43#include <linux/dcache.h>
44#include <linux/falloc.h>
45#include <linux/pagevec.h>
46#include <linux/backing-dev.h>
47#include <linux/mman.h>
48
49static const struct vm_operations_struct xfs_file_vm_ops;
50
51int
52xfs_update_prealloc_flags(
53 struct xfs_inode *ip,
54 enum xfs_prealloc_flags flags)
55{
56 struct xfs_trans *tp;
57 int error;
58
59 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
60 0, 0, 0, &tp);
61 if (error)
62 return error;
63
64 xfs_ilock(ip, XFS_ILOCK_EXCL);
65 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
66
67 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
68 VFS_I(ip)->i_mode &= ~S_ISUID;
69 if (VFS_I(ip)->i_mode & S_IXGRP)
70 VFS_I(ip)->i_mode &= ~S_ISGID;
71 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
72 }
73
74 if (flags & XFS_PREALLOC_SET)
75 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
76 if (flags & XFS_PREALLOC_CLEAR)
77 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
78
79 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
80 if (flags & XFS_PREALLOC_SYNC)
81 xfs_trans_set_sync(tp);
82 return xfs_trans_commit(tp);
83}
84
85/*
86 * Fsync operations on directories are much simpler than on regular files,
87 * as there is no file data to flush, and thus also no need for explicit
88 * cache flush operations, and there are no non-transaction metadata updates
89 * on directories either.
90 */
91STATIC int
92xfs_dir_fsync(
93 struct file *file,
94 loff_t start,
95 loff_t end,
96 int datasync)
97{
98 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
99 struct xfs_mount *mp = ip->i_mount;
100 xfs_lsn_t lsn = 0;
101
102 trace_xfs_dir_fsync(ip);
103
104 xfs_ilock(ip, XFS_ILOCK_SHARED);
105 if (xfs_ipincount(ip))
106 lsn = ip->i_itemp->ili_last_lsn;
107 xfs_iunlock(ip, XFS_ILOCK_SHARED);
108
109 if (!lsn)
110 return 0;
111 return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
112}
113
114STATIC int
115xfs_file_fsync(
116 struct file *file,
117 loff_t start,
118 loff_t end,
119 int datasync)
120{
121 struct inode *inode = file->f_mapping->host;
122 struct xfs_inode *ip = XFS_I(inode);
123 struct xfs_mount *mp = ip->i_mount;
124 int error = 0;
125 int log_flushed = 0;
126 xfs_lsn_t lsn = 0;
127
128 trace_xfs_file_fsync(ip);
129
130 error = file_write_and_wait_range(file, start, end);
131 if (error)
132 return error;
133
134 if (XFS_FORCED_SHUTDOWN(mp))
135 return -EIO;
136
137 xfs_iflags_clear(ip, XFS_ITRUNCATED);
138
139 /*
140 * If we have an RT and/or log subvolume we need to make sure to flush
141 * the write cache the device used for file data first. This is to
142 * ensure newly written file data make it to disk before logging the new
143 * inode size in case of an extending write.
144 */
145 if (XFS_IS_REALTIME_INODE(ip))
146 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
147 else if (mp->m_logdev_targp != mp->m_ddev_targp)
148 xfs_blkdev_issue_flush(mp->m_ddev_targp);
149
150 /*
151 * All metadata updates are logged, which means that we just have to
152 * flush the log up to the latest LSN that touched the inode. If we have
153 * concurrent fsync/fdatasync() calls, we need them to all block on the
154 * log force before we clear the ili_fsync_fields field. This ensures
155 * that we don't get a racing sync operation that does not wait for the
156 * metadata to hit the journal before returning. If we race with
157 * clearing the ili_fsync_fields, then all that will happen is the log
158 * force will do nothing as the lsn will already be on disk. We can't
159 * race with setting ili_fsync_fields because that is done under
160 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
161 * until after the ili_fsync_fields is cleared.
162 */
163 xfs_ilock(ip, XFS_ILOCK_SHARED);
164 if (xfs_ipincount(ip)) {
165 if (!datasync ||
166 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
167 lsn = ip->i_itemp->ili_last_lsn;
168 }
169
170 if (lsn) {
171 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
172 ip->i_itemp->ili_fsync_fields = 0;
173 }
174 xfs_iunlock(ip, XFS_ILOCK_SHARED);
175
176 /*
177 * If we only have a single device, and the log force about was
178 * a no-op we might have to flush the data device cache here.
179 * This can only happen for fdatasync/O_DSYNC if we were overwriting
180 * an already allocated file and thus do not have any metadata to
181 * commit.
182 */
183 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
184 mp->m_logdev_targp == mp->m_ddev_targp)
185 xfs_blkdev_issue_flush(mp->m_ddev_targp);
186
187 return error;
188}
189
190STATIC ssize_t
191xfs_file_dio_aio_read(
192 struct kiocb *iocb,
193 struct iov_iter *to)
194{
195 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
196 size_t count = iov_iter_count(to);
197 ssize_t ret;
198
199 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
200
201 if (!count)
202 return 0; /* skip atime */
203
204 file_accessed(iocb->ki_filp);
205
206 xfs_ilock(ip, XFS_IOLOCK_SHARED);
207 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
208 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
209
210 return ret;
211}
212
213static noinline ssize_t
214xfs_file_dax_read(
215 struct kiocb *iocb,
216 struct iov_iter *to)
217{
218 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
219 size_t count = iov_iter_count(to);
220 ssize_t ret = 0;
221
222 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
223
224 if (!count)
225 return 0; /* skip atime */
226
227 if (iocb->ki_flags & IOCB_NOWAIT) {
228 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
229 return -EAGAIN;
230 } else {
231 xfs_ilock(ip, XFS_IOLOCK_SHARED);
232 }
233
234 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
235 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
236
237 file_accessed(iocb->ki_filp);
238 return ret;
239}
240
241STATIC ssize_t
242xfs_file_buffered_aio_read(
243 struct kiocb *iocb,
244 struct iov_iter *to)
245{
246 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
247 ssize_t ret;
248
249 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
250
251 if (iocb->ki_flags & IOCB_NOWAIT) {
252 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
253 return -EAGAIN;
254 } else {
255 xfs_ilock(ip, XFS_IOLOCK_SHARED);
256 }
257 ret = generic_file_read_iter(iocb, to);
258 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
259
260 return ret;
261}
262
263STATIC ssize_t
264xfs_file_read_iter(
265 struct kiocb *iocb,
266 struct iov_iter *to)
267{
268 struct inode *inode = file_inode(iocb->ki_filp);
269 struct xfs_mount *mp = XFS_I(inode)->i_mount;
270 ssize_t ret = 0;
271
272 XFS_STATS_INC(mp, xs_read_calls);
273
274 if (XFS_FORCED_SHUTDOWN(mp))
275 return -EIO;
276
277 if (IS_DAX(inode))
278 ret = xfs_file_dax_read(iocb, to);
279 else if (iocb->ki_flags & IOCB_DIRECT)
280 ret = xfs_file_dio_aio_read(iocb, to);
281 else
282 ret = xfs_file_buffered_aio_read(iocb, to);
283
284 if (ret > 0)
285 XFS_STATS_ADD(mp, xs_read_bytes, ret);
286 return ret;
287}
288
289/*
290 * Common pre-write limit and setup checks.
291 *
292 * Called with the iolocked held either shared and exclusive according to
293 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
294 * if called for a direct write beyond i_size.
295 */
296STATIC ssize_t
297xfs_file_aio_write_checks(
298 struct kiocb *iocb,
299 struct iov_iter *from,
300 int *iolock)
301{
302 struct file *file = iocb->ki_filp;
303 struct inode *inode = file->f_mapping->host;
304 struct xfs_inode *ip = XFS_I(inode);
305 ssize_t error = 0;
306 size_t count = iov_iter_count(from);
307 bool drained_dio = false;
308 loff_t isize;
309
310restart:
311 error = generic_write_checks(iocb, from);
312 if (error <= 0)
313 return error;
314
315 error = xfs_break_layouts(inode, iolock);
316 if (error)
317 return error;
318
319 /*
320 * For changing security info in file_remove_privs() we need i_rwsem
321 * exclusively.
322 */
323 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
324 xfs_iunlock(ip, *iolock);
325 *iolock = XFS_IOLOCK_EXCL;
326 xfs_ilock(ip, *iolock);
327 goto restart;
328 }
329 /*
330 * If the offset is beyond the size of the file, we need to zero any
331 * blocks that fall between the existing EOF and the start of this
332 * write. If zeroing is needed and we are currently holding the
333 * iolock shared, we need to update it to exclusive which implies
334 * having to redo all checks before.
335 *
336 * We need to serialise against EOF updates that occur in IO
337 * completions here. We want to make sure that nobody is changing the
338 * size while we do this check until we have placed an IO barrier (i.e.
339 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
340 * The spinlock effectively forms a memory barrier once we have the
341 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
342 * and hence be able to correctly determine if we need to run zeroing.
343 */
344 spin_lock(&ip->i_flags_lock);
345 isize = i_size_read(inode);
346 if (iocb->ki_pos > isize) {
347 spin_unlock(&ip->i_flags_lock);
348 if (!drained_dio) {
349 if (*iolock == XFS_IOLOCK_SHARED) {
350 xfs_iunlock(ip, *iolock);
351 *iolock = XFS_IOLOCK_EXCL;
352 xfs_ilock(ip, *iolock);
353 iov_iter_reexpand(from, count);
354 }
355 /*
356 * We now have an IO submission barrier in place, but
357 * AIO can do EOF updates during IO completion and hence
358 * we now need to wait for all of them to drain. Non-AIO
359 * DIO will have drained before we are given the
360 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
361 * no-op.
362 */
363 inode_dio_wait(inode);
364 drained_dio = true;
365 goto restart;
366 }
367
368 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
369 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
370 NULL, &xfs_iomap_ops);
371 if (error)
372 return error;
373 } else
374 spin_unlock(&ip->i_flags_lock);
375
376 /*
377 * Updating the timestamps will grab the ilock again from
378 * xfs_fs_dirty_inode, so we have to call it after dropping the
379 * lock above. Eventually we should look into a way to avoid
380 * the pointless lock roundtrip.
381 */
382 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
383 error = file_update_time(file);
384 if (error)
385 return error;
386 }
387
388 /*
389 * If we're writing the file then make sure to clear the setuid and
390 * setgid bits if the process is not being run by root. This keeps
391 * people from modifying setuid and setgid binaries.
392 */
393 if (!IS_NOSEC(inode))
394 return file_remove_privs(file);
395 return 0;
396}
397
398static int
399xfs_dio_write_end_io(
400 struct kiocb *iocb,
401 ssize_t size,
402 unsigned flags)
403{
404 struct inode *inode = file_inode(iocb->ki_filp);
405 struct xfs_inode *ip = XFS_I(inode);
406 loff_t offset = iocb->ki_pos;
407 int error = 0;
408
409 trace_xfs_end_io_direct_write(ip, offset, size);
410
411 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
412 return -EIO;
413
414 if (size <= 0)
415 return size;
416
417 if (flags & IOMAP_DIO_COW) {
418 error = xfs_reflink_end_cow(ip, offset, size);
419 if (error)
420 return error;
421 }
422
423 /*
424 * Unwritten conversion updates the in-core isize after extent
425 * conversion but before updating the on-disk size. Updating isize any
426 * earlier allows a racing dio read to find unwritten extents before
427 * they are converted.
428 */
429 if (flags & IOMAP_DIO_UNWRITTEN)
430 return xfs_iomap_write_unwritten(ip, offset, size, true);
431
432 /*
433 * We need to update the in-core inode size here so that we don't end up
434 * with the on-disk inode size being outside the in-core inode size. We
435 * have no other method of updating EOF for AIO, so always do it here
436 * if necessary.
437 *
438 * We need to lock the test/set EOF update as we can be racing with
439 * other IO completions here to update the EOF. Failing to serialise
440 * here can result in EOF moving backwards and Bad Things Happen when
441 * that occurs.
442 */
443 spin_lock(&ip->i_flags_lock);
444 if (offset + size > i_size_read(inode)) {
445 i_size_write(inode, offset + size);
446 spin_unlock(&ip->i_flags_lock);
447 error = xfs_setfilesize(ip, offset, size);
448 } else {
449 spin_unlock(&ip->i_flags_lock);
450 }
451
452 return error;
453}
454
455/*
456 * xfs_file_dio_aio_write - handle direct IO writes
457 *
458 * Lock the inode appropriately to prepare for and issue a direct IO write.
459 * By separating it from the buffered write path we remove all the tricky to
460 * follow locking changes and looping.
461 *
462 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
463 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
464 * pages are flushed out.
465 *
466 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
467 * allowing them to be done in parallel with reads and other direct IO writes.
468 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
469 * needs to do sub-block zeroing and that requires serialisation against other
470 * direct IOs to the same block. In this case we need to serialise the
471 * submission of the unaligned IOs so that we don't get racing block zeroing in
472 * the dio layer. To avoid the problem with aio, we also need to wait for
473 * outstanding IOs to complete so that unwritten extent conversion is completed
474 * before we try to map the overlapping block. This is currently implemented by
475 * hitting it with a big hammer (i.e. inode_dio_wait()).
476 *
477 * Returns with locks held indicated by @iolock and errors indicated by
478 * negative return values.
479 */
480STATIC ssize_t
481xfs_file_dio_aio_write(
482 struct kiocb *iocb,
483 struct iov_iter *from)
484{
485 struct file *file = iocb->ki_filp;
486 struct address_space *mapping = file->f_mapping;
487 struct inode *inode = mapping->host;
488 struct xfs_inode *ip = XFS_I(inode);
489 struct xfs_mount *mp = ip->i_mount;
490 ssize_t ret = 0;
491 int unaligned_io = 0;
492 int iolock;
493 size_t count = iov_iter_count(from);
494 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
495 mp->m_rtdev_targp : mp->m_ddev_targp;
496
497 /* DIO must be aligned to device logical sector size */
498 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
499 return -EINVAL;
500
501 /*
502 * Don't take the exclusive iolock here unless the I/O is unaligned to
503 * the file system block size. We don't need to consider the EOF
504 * extension case here because xfs_file_aio_write_checks() will relock
505 * the inode as necessary for EOF zeroing cases and fill out the new
506 * inode size as appropriate.
507 */
508 if ((iocb->ki_pos & mp->m_blockmask) ||
509 ((iocb->ki_pos + count) & mp->m_blockmask)) {
510 unaligned_io = 1;
511
512 /*
513 * We can't properly handle unaligned direct I/O to reflink
514 * files yet, as we can't unshare a partial block.
515 */
516 if (xfs_is_reflink_inode(ip)) {
517 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
518 return -EREMCHG;
519 }
520 iolock = XFS_IOLOCK_EXCL;
521 } else {
522 iolock = XFS_IOLOCK_SHARED;
523 }
524
525 if (iocb->ki_flags & IOCB_NOWAIT) {
526 if (!xfs_ilock_nowait(ip, iolock))
527 return -EAGAIN;
528 } else {
529 xfs_ilock(ip, iolock);
530 }
531
532 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
533 if (ret)
534 goto out;
535 count = iov_iter_count(from);
536
537 /*
538 * If we are doing unaligned IO, wait for all other IO to drain,
539 * otherwise demote the lock if we had to take the exclusive lock
540 * for other reasons in xfs_file_aio_write_checks.
541 */
542 if (unaligned_io) {
543 /* If we are going to wait for other DIO to finish, bail */
544 if (iocb->ki_flags & IOCB_NOWAIT) {
545 if (atomic_read(&inode->i_dio_count))
546 return -EAGAIN;
547 } else {
548 inode_dio_wait(inode);
549 }
550 } else if (iolock == XFS_IOLOCK_EXCL) {
551 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
552 iolock = XFS_IOLOCK_SHARED;
553 }
554
555 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
556 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
557out:
558 xfs_iunlock(ip, iolock);
559
560 /*
561 * No fallback to buffered IO on errors for XFS, direct IO will either
562 * complete fully or fail.
563 */
564 ASSERT(ret < 0 || ret == count);
565 return ret;
566}
567
568static noinline ssize_t
569xfs_file_dax_write(
570 struct kiocb *iocb,
571 struct iov_iter *from)
572{
573 struct inode *inode = iocb->ki_filp->f_mapping->host;
574 struct xfs_inode *ip = XFS_I(inode);
575 int iolock = XFS_IOLOCK_EXCL;
576 ssize_t ret, error = 0;
577 size_t count;
578 loff_t pos;
579
580 if (iocb->ki_flags & IOCB_NOWAIT) {
581 if (!xfs_ilock_nowait(ip, iolock))
582 return -EAGAIN;
583 } else {
584 xfs_ilock(ip, iolock);
585 }
586
587 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
588 if (ret)
589 goto out;
590
591 pos = iocb->ki_pos;
592 count = iov_iter_count(from);
593
594 trace_xfs_file_dax_write(ip, count, pos);
595 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
596 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
597 i_size_write(inode, iocb->ki_pos);
598 error = xfs_setfilesize(ip, pos, ret);
599 }
600out:
601 xfs_iunlock(ip, iolock);
602 return error ? error : ret;
603}
604
605STATIC ssize_t
606xfs_file_buffered_aio_write(
607 struct kiocb *iocb,
608 struct iov_iter *from)
609{
610 struct file *file = iocb->ki_filp;
611 struct address_space *mapping = file->f_mapping;
612 struct inode *inode = mapping->host;
613 struct xfs_inode *ip = XFS_I(inode);
614 ssize_t ret;
615 int enospc = 0;
616 int iolock;
617
618 if (iocb->ki_flags & IOCB_NOWAIT)
619 return -EOPNOTSUPP;
620
621write_retry:
622 iolock = XFS_IOLOCK_EXCL;
623 xfs_ilock(ip, iolock);
624
625 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
626 if (ret)
627 goto out;
628
629 /* We can write back this queue in page reclaim */
630 current->backing_dev_info = inode_to_bdi(inode);
631
632 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
633 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
634 if (likely(ret >= 0))
635 iocb->ki_pos += ret;
636
637 /*
638 * If we hit a space limit, try to free up some lingering preallocated
639 * space before returning an error. In the case of ENOSPC, first try to
640 * write back all dirty inodes to free up some of the excess reserved
641 * metadata space. This reduces the chances that the eofblocks scan
642 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
643 * also behaves as a filter to prevent too many eofblocks scans from
644 * running at the same time.
645 */
646 if (ret == -EDQUOT && !enospc) {
647 xfs_iunlock(ip, iolock);
648 enospc = xfs_inode_free_quota_eofblocks(ip);
649 if (enospc)
650 goto write_retry;
651 enospc = xfs_inode_free_quota_cowblocks(ip);
652 if (enospc)
653 goto write_retry;
654 iolock = 0;
655 } else if (ret == -ENOSPC && !enospc) {
656 struct xfs_eofblocks eofb = {0};
657
658 enospc = 1;
659 xfs_flush_inodes(ip->i_mount);
660
661 xfs_iunlock(ip, iolock);
662 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
663 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
664 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
665 goto write_retry;
666 }
667
668 current->backing_dev_info = NULL;
669out:
670 if (iolock)
671 xfs_iunlock(ip, iolock);
672 return ret;
673}
674
675STATIC ssize_t
676xfs_file_write_iter(
677 struct kiocb *iocb,
678 struct iov_iter *from)
679{
680 struct file *file = iocb->ki_filp;
681 struct address_space *mapping = file->f_mapping;
682 struct inode *inode = mapping->host;
683 struct xfs_inode *ip = XFS_I(inode);
684 ssize_t ret;
685 size_t ocount = iov_iter_count(from);
686
687 XFS_STATS_INC(ip->i_mount, xs_write_calls);
688
689 if (ocount == 0)
690 return 0;
691
692 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
693 return -EIO;
694
695 if (IS_DAX(inode))
696 ret = xfs_file_dax_write(iocb, from);
697 else if (iocb->ki_flags & IOCB_DIRECT) {
698 /*
699 * Allow a directio write to fall back to a buffered
700 * write *only* in the case that we're doing a reflink
701 * CoW. In all other directio scenarios we do not
702 * allow an operation to fall back to buffered mode.
703 */
704 ret = xfs_file_dio_aio_write(iocb, from);
705 if (ret == -EREMCHG)
706 goto buffered;
707 } else {
708buffered:
709 ret = xfs_file_buffered_aio_write(iocb, from);
710 }
711
712 if (ret > 0) {
713 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
714
715 /* Handle various SYNC-type writes */
716 ret = generic_write_sync(iocb, ret);
717 }
718 return ret;
719}
720
721#define XFS_FALLOC_FL_SUPPORTED \
722 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
723 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
724 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
725
726STATIC long
727xfs_file_fallocate(
728 struct file *file,
729 int mode,
730 loff_t offset,
731 loff_t len)
732{
733 struct inode *inode = file_inode(file);
734 struct xfs_inode *ip = XFS_I(inode);
735 long error;
736 enum xfs_prealloc_flags flags = 0;
737 uint iolock = XFS_IOLOCK_EXCL;
738 loff_t new_size = 0;
739 bool do_file_insert = false;
740
741 if (!S_ISREG(inode->i_mode))
742 return -EINVAL;
743 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
744 return -EOPNOTSUPP;
745
746 xfs_ilock(ip, iolock);
747 error = xfs_break_layouts(inode, &iolock);
748 if (error)
749 goto out_unlock;
750
751 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
752 iolock |= XFS_MMAPLOCK_EXCL;
753
754 if (mode & FALLOC_FL_PUNCH_HOLE) {
755 error = xfs_free_file_space(ip, offset, len);
756 if (error)
757 goto out_unlock;
758 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
759 unsigned int blksize_mask = i_blocksize(inode) - 1;
760
761 if (offset & blksize_mask || len & blksize_mask) {
762 error = -EINVAL;
763 goto out_unlock;
764 }
765
766 /*
767 * There is no need to overlap collapse range with EOF,
768 * in which case it is effectively a truncate operation
769 */
770 if (offset + len >= i_size_read(inode)) {
771 error = -EINVAL;
772 goto out_unlock;
773 }
774
775 new_size = i_size_read(inode) - len;
776
777 error = xfs_collapse_file_space(ip, offset, len);
778 if (error)
779 goto out_unlock;
780 } else if (mode & FALLOC_FL_INSERT_RANGE) {
781 unsigned int blksize_mask = i_blocksize(inode) - 1;
782 loff_t isize = i_size_read(inode);
783
784 if (offset & blksize_mask || len & blksize_mask) {
785 error = -EINVAL;
786 goto out_unlock;
787 }
788
789 /*
790 * New inode size must not exceed ->s_maxbytes, accounting for
791 * possible signed overflow.
792 */
793 if (inode->i_sb->s_maxbytes - isize < len) {
794 error = -EFBIG;
795 goto out_unlock;
796 }
797 new_size = isize + len;
798
799 /* Offset should be less than i_size */
800 if (offset >= isize) {
801 error = -EINVAL;
802 goto out_unlock;
803 }
804 do_file_insert = true;
805 } else {
806 flags |= XFS_PREALLOC_SET;
807
808 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
809 offset + len > i_size_read(inode)) {
810 new_size = offset + len;
811 error = inode_newsize_ok(inode, new_size);
812 if (error)
813 goto out_unlock;
814 }
815
816 if (mode & FALLOC_FL_ZERO_RANGE)
817 error = xfs_zero_file_space(ip, offset, len);
818 else {
819 if (mode & FALLOC_FL_UNSHARE_RANGE) {
820 error = xfs_reflink_unshare(ip, offset, len);
821 if (error)
822 goto out_unlock;
823 }
824 error = xfs_alloc_file_space(ip, offset, len,
825 XFS_BMAPI_PREALLOC);
826 }
827 if (error)
828 goto out_unlock;
829 }
830
831 if (file->f_flags & O_DSYNC)
832 flags |= XFS_PREALLOC_SYNC;
833
834 error = xfs_update_prealloc_flags(ip, flags);
835 if (error)
836 goto out_unlock;
837
838 /* Change file size if needed */
839 if (new_size) {
840 struct iattr iattr;
841
842 iattr.ia_valid = ATTR_SIZE;
843 iattr.ia_size = new_size;
844 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
845 if (error)
846 goto out_unlock;
847 }
848
849 /*
850 * Perform hole insertion now that the file size has been
851 * updated so that if we crash during the operation we don't
852 * leave shifted extents past EOF and hence losing access to
853 * the data that is contained within them.
854 */
855 if (do_file_insert)
856 error = xfs_insert_file_space(ip, offset, len);
857
858out_unlock:
859 xfs_iunlock(ip, iolock);
860 return error;
861}
862
863STATIC int
864xfs_file_clone_range(
865 struct file *file_in,
866 loff_t pos_in,
867 struct file *file_out,
868 loff_t pos_out,
869 u64 len)
870{
871 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
872 len, false);
873}
874
875STATIC ssize_t
876xfs_file_dedupe_range(
877 struct file *src_file,
878 u64 loff,
879 u64 len,
880 struct file *dst_file,
881 u64 dst_loff)
882{
883 struct inode *srci = file_inode(src_file);
884 u64 max_dedupe;
885 int error;
886
887 /*
888 * Since we have to read all these pages in to compare them, cut
889 * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
890 * That means we won't do more than MAX_RW_COUNT IO per request.
891 */
892 max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
893 if (len > max_dedupe)
894 len = max_dedupe;
895 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
896 len, true);
897 if (error)
898 return error;
899 return len;
900}
901
902STATIC int
903xfs_file_open(
904 struct inode *inode,
905 struct file *file)
906{
907 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
908 return -EFBIG;
909 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
910 return -EIO;
911 file->f_mode |= FMODE_NOWAIT;
912 return 0;
913}
914
915STATIC int
916xfs_dir_open(
917 struct inode *inode,
918 struct file *file)
919{
920 struct xfs_inode *ip = XFS_I(inode);
921 int mode;
922 int error;
923
924 error = xfs_file_open(inode, file);
925 if (error)
926 return error;
927
928 /*
929 * If there are any blocks, read-ahead block 0 as we're almost
930 * certain to have the next operation be a read there.
931 */
932 mode = xfs_ilock_data_map_shared(ip);
933 if (ip->i_d.di_nextents > 0)
934 error = xfs_dir3_data_readahead(ip, 0, -1);
935 xfs_iunlock(ip, mode);
936 return error;
937}
938
939STATIC int
940xfs_file_release(
941 struct inode *inode,
942 struct file *filp)
943{
944 return xfs_release(XFS_I(inode));
945}
946
947STATIC int
948xfs_file_readdir(
949 struct file *file,
950 struct dir_context *ctx)
951{
952 struct inode *inode = file_inode(file);
953 xfs_inode_t *ip = XFS_I(inode);
954 size_t bufsize;
955
956 /*
957 * The Linux API doesn't pass down the total size of the buffer
958 * we read into down to the filesystem. With the filldir concept
959 * it's not needed for correct information, but the XFS dir2 leaf
960 * code wants an estimate of the buffer size to calculate it's
961 * readahead window and size the buffers used for mapping to
962 * physical blocks.
963 *
964 * Try to give it an estimate that's good enough, maybe at some
965 * point we can change the ->readdir prototype to include the
966 * buffer size. For now we use the current glibc buffer size.
967 */
968 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
969
970 return xfs_readdir(NULL, ip, ctx, bufsize);
971}
972
973STATIC loff_t
974xfs_file_llseek(
975 struct file *file,
976 loff_t offset,
977 int whence)
978{
979 struct inode *inode = file->f_mapping->host;
980
981 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
982 return -EIO;
983
984 switch (whence) {
985 default:
986 return generic_file_llseek(file, offset, whence);
987 case SEEK_HOLE:
988 offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
989 break;
990 case SEEK_DATA:
991 offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
992 break;
993 }
994
995 if (offset < 0)
996 return offset;
997 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
998}
999
1000/*
1001 * Locking for serialisation of IO during page faults. This results in a lock
1002 * ordering of:
1003 *
1004 * mmap_sem (MM)
1005 * sb_start_pagefault(vfs, freeze)
1006 * i_mmaplock (XFS - truncate serialisation)
1007 * page_lock (MM)
1008 * i_lock (XFS - extent map serialisation)
1009 */
1010static int
1011__xfs_filemap_fault(
1012 struct vm_fault *vmf,
1013 enum page_entry_size pe_size,
1014 bool write_fault)
1015{
1016 struct inode *inode = file_inode(vmf->vma->vm_file);
1017 struct xfs_inode *ip = XFS_I(inode);
1018 int ret;
1019
1020 trace_xfs_filemap_fault(ip, pe_size, write_fault);
1021
1022 if (write_fault) {
1023 sb_start_pagefault(inode->i_sb);
1024 file_update_time(vmf->vma->vm_file);
1025 }
1026
1027 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1028 if (IS_DAX(inode)) {
1029 pfn_t pfn;
1030
1031 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1032 if (ret & VM_FAULT_NEEDDSYNC)
1033 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1034 } else {
1035 if (write_fault)
1036 ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1037 else
1038 ret = filemap_fault(vmf);
1039 }
1040 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1041
1042 if (write_fault)
1043 sb_end_pagefault(inode->i_sb);
1044 return ret;
1045}
1046
1047static int
1048xfs_filemap_fault(
1049 struct vm_fault *vmf)
1050{
1051 /* DAX can shortcut the normal fault path on write faults! */
1052 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1053 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1054 (vmf->flags & FAULT_FLAG_WRITE));
1055}
1056
1057static int
1058xfs_filemap_huge_fault(
1059 struct vm_fault *vmf,
1060 enum page_entry_size pe_size)
1061{
1062 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1063 return VM_FAULT_FALLBACK;
1064
1065 /* DAX can shortcut the normal fault path on write faults! */
1066 return __xfs_filemap_fault(vmf, pe_size,
1067 (vmf->flags & FAULT_FLAG_WRITE));
1068}
1069
1070static int
1071xfs_filemap_page_mkwrite(
1072 struct vm_fault *vmf)
1073{
1074 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1075}
1076
1077/*
1078 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1079 * on write faults. In reality, it needs to serialise against truncate and
1080 * prepare memory for writing so handle is as standard write fault.
1081 */
1082static int
1083xfs_filemap_pfn_mkwrite(
1084 struct vm_fault *vmf)
1085{
1086
1087 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1088}
1089
1090static const struct vm_operations_struct xfs_file_vm_ops = {
1091 .fault = xfs_filemap_fault,
1092 .huge_fault = xfs_filemap_huge_fault,
1093 .map_pages = filemap_map_pages,
1094 .page_mkwrite = xfs_filemap_page_mkwrite,
1095 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1096};
1097
1098STATIC int
1099xfs_file_mmap(
1100 struct file *filp,
1101 struct vm_area_struct *vma)
1102{
1103 /*
1104 * We don't support synchronous mappings for non-DAX files. At least
1105 * until someone comes with a sensible use case.
1106 */
1107 if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1108 return -EOPNOTSUPP;
1109
1110 file_accessed(filp);
1111 vma->vm_ops = &xfs_file_vm_ops;
1112 if (IS_DAX(file_inode(filp)))
1113 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1114 return 0;
1115}
1116
1117const struct file_operations xfs_file_operations = {
1118 .llseek = xfs_file_llseek,
1119 .read_iter = xfs_file_read_iter,
1120 .write_iter = xfs_file_write_iter,
1121 .splice_read = generic_file_splice_read,
1122 .splice_write = iter_file_splice_write,
1123 .unlocked_ioctl = xfs_file_ioctl,
1124#ifdef CONFIG_COMPAT
1125 .compat_ioctl = xfs_file_compat_ioctl,
1126#endif
1127 .mmap = xfs_file_mmap,
1128 .mmap_supported_flags = MAP_SYNC,
1129 .open = xfs_file_open,
1130 .release = xfs_file_release,
1131 .fsync = xfs_file_fsync,
1132 .get_unmapped_area = thp_get_unmapped_area,
1133 .fallocate = xfs_file_fallocate,
1134 .clone_file_range = xfs_file_clone_range,
1135 .dedupe_file_range = xfs_file_dedupe_range,
1136};
1137
1138const struct file_operations xfs_dir_file_operations = {
1139 .open = xfs_dir_open,
1140 .read = generic_read_dir,
1141 .iterate_shared = xfs_file_readdir,
1142 .llseek = generic_file_llseek,
1143 .unlocked_ioctl = xfs_file_ioctl,
1144#ifdef CONFIG_COMPAT
1145 .compat_ioctl = xfs_file_compat_ioctl,
1146#endif
1147 .fsync = xfs_dir_fsync,
1148};
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_trans.h"
26#include "xfs_mount.h"
27#include "xfs_bmap_btree.h"
28#include "xfs_alloc.h"
29#include "xfs_dinode.h"
30#include "xfs_inode.h"
31#include "xfs_inode_item.h"
32#include "xfs_bmap.h"
33#include "xfs_error.h"
34#include "xfs_vnodeops.h"
35#include "xfs_da_btree.h"
36#include "xfs_ioctl.h"
37#include "xfs_trace.h"
38
39#include <linux/dcache.h>
40#include <linux/falloc.h>
41
42static const struct vm_operations_struct xfs_file_vm_ops;
43
44/*
45 * Locking primitives for read and write IO paths to ensure we consistently use
46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
47 */
48static inline void
49xfs_rw_ilock(
50 struct xfs_inode *ip,
51 int type)
52{
53 if (type & XFS_IOLOCK_EXCL)
54 mutex_lock(&VFS_I(ip)->i_mutex);
55 xfs_ilock(ip, type);
56}
57
58static inline void
59xfs_rw_iunlock(
60 struct xfs_inode *ip,
61 int type)
62{
63 xfs_iunlock(ip, type);
64 if (type & XFS_IOLOCK_EXCL)
65 mutex_unlock(&VFS_I(ip)->i_mutex);
66}
67
68static inline void
69xfs_rw_ilock_demote(
70 struct xfs_inode *ip,
71 int type)
72{
73 xfs_ilock_demote(ip, type);
74 if (type & XFS_IOLOCK_EXCL)
75 mutex_unlock(&VFS_I(ip)->i_mutex);
76}
77
78/*
79 * xfs_iozero
80 *
81 * xfs_iozero clears the specified range of buffer supplied,
82 * and marks all the affected blocks as valid and modified. If
83 * an affected block is not allocated, it will be allocated. If
84 * an affected block is not completely overwritten, and is not
85 * valid before the operation, it will be read from disk before
86 * being partially zeroed.
87 */
88STATIC int
89xfs_iozero(
90 struct xfs_inode *ip, /* inode */
91 loff_t pos, /* offset in file */
92 size_t count) /* size of data to zero */
93{
94 struct page *page;
95 struct address_space *mapping;
96 int status;
97
98 mapping = VFS_I(ip)->i_mapping;
99 do {
100 unsigned offset, bytes;
101 void *fsdata;
102
103 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
104 bytes = PAGE_CACHE_SIZE - offset;
105 if (bytes > count)
106 bytes = count;
107
108 status = pagecache_write_begin(NULL, mapping, pos, bytes,
109 AOP_FLAG_UNINTERRUPTIBLE,
110 &page, &fsdata);
111 if (status)
112 break;
113
114 zero_user(page, offset, bytes);
115
116 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
117 page, fsdata);
118 WARN_ON(status <= 0); /* can't return less than zero! */
119 pos += bytes;
120 count -= bytes;
121 status = 0;
122 } while (count);
123
124 return (-status);
125}
126
127STATIC int
128xfs_file_fsync(
129 struct file *file,
130 loff_t start,
131 loff_t end,
132 int datasync)
133{
134 struct inode *inode = file->f_mapping->host;
135 struct xfs_inode *ip = XFS_I(inode);
136 struct xfs_mount *mp = ip->i_mount;
137 struct xfs_trans *tp;
138 int error = 0;
139 int log_flushed = 0;
140
141 trace_xfs_file_fsync(ip);
142
143 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
144 if (error)
145 return error;
146
147 if (XFS_FORCED_SHUTDOWN(mp))
148 return -XFS_ERROR(EIO);
149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151
152 xfs_ilock(ip, XFS_IOLOCK_SHARED);
153 xfs_ioend_wait(ip);
154 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
155
156 if (mp->m_flags & XFS_MOUNT_BARRIER) {
157 /*
158 * If we have an RT and/or log subvolume we need to make sure
159 * to flush the write cache the device used for file data
160 * first. This is to ensure newly written file data make
161 * it to disk before logging the new inode size in case of
162 * an extending write.
163 */
164 if (XFS_IS_REALTIME_INODE(ip))
165 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
166 else if (mp->m_logdev_targp != mp->m_ddev_targp)
167 xfs_blkdev_issue_flush(mp->m_ddev_targp);
168 }
169
170 /*
171 * We always need to make sure that the required inode state is safe on
172 * disk. The inode might be clean but we still might need to force the
173 * log because of committed transactions that haven't hit the disk yet.
174 * Likewise, there could be unflushed non-transactional changes to the
175 * inode core that have to go to disk and this requires us to issue
176 * a synchronous transaction to capture these changes correctly.
177 *
178 * This code relies on the assumption that if the i_update_core field
179 * of the inode is clear and the inode is unpinned then it is clean
180 * and no action is required.
181 */
182 xfs_ilock(ip, XFS_ILOCK_SHARED);
183
184 /*
185 * First check if the VFS inode is marked dirty. All the dirtying
186 * of non-transactional updates no goes through mark_inode_dirty*,
187 * which allows us to distinguish beteeen pure timestamp updates
188 * and i_size updates which need to be caught for fdatasync.
189 * After that also theck for the dirty state in the XFS inode, which
190 * might gets cleared when the inode gets written out via the AIL
191 * or xfs_iflush_cluster.
192 */
193 if (((inode->i_state & I_DIRTY_DATASYNC) ||
194 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
195 ip->i_update_core) {
196 /*
197 * Kick off a transaction to log the inode core to get the
198 * updates. The sync transaction will also force the log.
199 */
200 xfs_iunlock(ip, XFS_ILOCK_SHARED);
201 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
202 error = xfs_trans_reserve(tp, 0,
203 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
204 if (error) {
205 xfs_trans_cancel(tp, 0);
206 return -error;
207 }
208 xfs_ilock(ip, XFS_ILOCK_EXCL);
209
210 /*
211 * Note - it's possible that we might have pushed ourselves out
212 * of the way during trans_reserve which would flush the inode.
213 * But there's no guarantee that the inode buffer has actually
214 * gone out yet (it's delwri). Plus the buffer could be pinned
215 * anyway if it's part of an inode in another recent
216 * transaction. So we play it safe and fire off the
217 * transaction anyway.
218 */
219 xfs_trans_ijoin(tp, ip);
220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
221 xfs_trans_set_sync(tp);
222 error = _xfs_trans_commit(tp, 0, &log_flushed);
223
224 xfs_iunlock(ip, XFS_ILOCK_EXCL);
225 } else {
226 /*
227 * Timestamps/size haven't changed since last inode flush or
228 * inode transaction commit. That means either nothing got
229 * written or a transaction committed which caught the updates.
230 * If the latter happened and the transaction hasn't hit the
231 * disk yet, the inode will be still be pinned. If it is,
232 * force the log.
233 */
234 if (xfs_ipincount(ip)) {
235 error = _xfs_log_force_lsn(mp,
236 ip->i_itemp->ili_last_lsn,
237 XFS_LOG_SYNC, &log_flushed);
238 }
239 xfs_iunlock(ip, XFS_ILOCK_SHARED);
240 }
241
242 /*
243 * If we only have a single device, and the log force about was
244 * a no-op we might have to flush the data device cache here.
245 * This can only happen for fdatasync/O_DSYNC if we were overwriting
246 * an already allocated file and thus do not have any metadata to
247 * commit.
248 */
249 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
250 mp->m_logdev_targp == mp->m_ddev_targp &&
251 !XFS_IS_REALTIME_INODE(ip) &&
252 !log_flushed)
253 xfs_blkdev_issue_flush(mp->m_ddev_targp);
254
255 return -error;
256}
257
258STATIC ssize_t
259xfs_file_aio_read(
260 struct kiocb *iocb,
261 const struct iovec *iovp,
262 unsigned long nr_segs,
263 loff_t pos)
264{
265 struct file *file = iocb->ki_filp;
266 struct inode *inode = file->f_mapping->host;
267 struct xfs_inode *ip = XFS_I(inode);
268 struct xfs_mount *mp = ip->i_mount;
269 size_t size = 0;
270 ssize_t ret = 0;
271 int ioflags = 0;
272 xfs_fsize_t n;
273 unsigned long seg;
274
275 XFS_STATS_INC(xs_read_calls);
276
277 BUG_ON(iocb->ki_pos != pos);
278
279 if (unlikely(file->f_flags & O_DIRECT))
280 ioflags |= IO_ISDIRECT;
281 if (file->f_mode & FMODE_NOCMTIME)
282 ioflags |= IO_INVIS;
283
284 /* START copy & waste from filemap.c */
285 for (seg = 0; seg < nr_segs; seg++) {
286 const struct iovec *iv = &iovp[seg];
287
288 /*
289 * If any segment has a negative length, or the cumulative
290 * length ever wraps negative then return -EINVAL.
291 */
292 size += iv->iov_len;
293 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
294 return XFS_ERROR(-EINVAL);
295 }
296 /* END copy & waste from filemap.c */
297
298 if (unlikely(ioflags & IO_ISDIRECT)) {
299 xfs_buftarg_t *target =
300 XFS_IS_REALTIME_INODE(ip) ?
301 mp->m_rtdev_targp : mp->m_ddev_targp;
302 if ((iocb->ki_pos & target->bt_smask) ||
303 (size & target->bt_smask)) {
304 if (iocb->ki_pos == ip->i_size)
305 return 0;
306 return -XFS_ERROR(EINVAL);
307 }
308 }
309
310 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
311 if (n <= 0 || size == 0)
312 return 0;
313
314 if (n < size)
315 size = n;
316
317 if (XFS_FORCED_SHUTDOWN(mp))
318 return -EIO;
319
320 if (unlikely(ioflags & IO_ISDIRECT)) {
321 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
322
323 if (inode->i_mapping->nrpages) {
324 ret = -xfs_flushinval_pages(ip,
325 (iocb->ki_pos & PAGE_CACHE_MASK),
326 -1, FI_REMAPF_LOCKED);
327 if (ret) {
328 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
329 return ret;
330 }
331 }
332 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
333 } else
334 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
335
336 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
337
338 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
339 if (ret > 0)
340 XFS_STATS_ADD(xs_read_bytes, ret);
341
342 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
343 return ret;
344}
345
346STATIC ssize_t
347xfs_file_splice_read(
348 struct file *infilp,
349 loff_t *ppos,
350 struct pipe_inode_info *pipe,
351 size_t count,
352 unsigned int flags)
353{
354 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
355 int ioflags = 0;
356 ssize_t ret;
357
358 XFS_STATS_INC(xs_read_calls);
359
360 if (infilp->f_mode & FMODE_NOCMTIME)
361 ioflags |= IO_INVIS;
362
363 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
364 return -EIO;
365
366 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
367
368 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
369
370 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
371 if (ret > 0)
372 XFS_STATS_ADD(xs_read_bytes, ret);
373
374 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
375 return ret;
376}
377
378STATIC void
379xfs_aio_write_isize_update(
380 struct inode *inode,
381 loff_t *ppos,
382 ssize_t bytes_written)
383{
384 struct xfs_inode *ip = XFS_I(inode);
385 xfs_fsize_t isize = i_size_read(inode);
386
387 if (bytes_written > 0)
388 XFS_STATS_ADD(xs_write_bytes, bytes_written);
389
390 if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
391 *ppos > isize))
392 *ppos = isize;
393
394 if (*ppos > ip->i_size) {
395 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
396 if (*ppos > ip->i_size)
397 ip->i_size = *ppos;
398 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
399 }
400}
401
402/*
403 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
404 * part of the I/O may have been written to disk before the error occurred. In
405 * this case the on-disk file size may have been adjusted beyond the in-memory
406 * file size and now needs to be truncated back.
407 */
408STATIC void
409xfs_aio_write_newsize_update(
410 struct xfs_inode *ip)
411{
412 if (ip->i_new_size) {
413 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
414 ip->i_new_size = 0;
415 if (ip->i_d.di_size > ip->i_size)
416 ip->i_d.di_size = ip->i_size;
417 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
418 }
419}
420
421/*
422 * xfs_file_splice_write() does not use xfs_rw_ilock() because
423 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
424 * couuld cause lock inversions between the aio_write path and the splice path
425 * if someone is doing concurrent splice(2) based writes and write(2) based
426 * writes to the same inode. The only real way to fix this is to re-implement
427 * the generic code here with correct locking orders.
428 */
429STATIC ssize_t
430xfs_file_splice_write(
431 struct pipe_inode_info *pipe,
432 struct file *outfilp,
433 loff_t *ppos,
434 size_t count,
435 unsigned int flags)
436{
437 struct inode *inode = outfilp->f_mapping->host;
438 struct xfs_inode *ip = XFS_I(inode);
439 xfs_fsize_t new_size;
440 int ioflags = 0;
441 ssize_t ret;
442
443 XFS_STATS_INC(xs_write_calls);
444
445 if (outfilp->f_mode & FMODE_NOCMTIME)
446 ioflags |= IO_INVIS;
447
448 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
449 return -EIO;
450
451 xfs_ilock(ip, XFS_IOLOCK_EXCL);
452
453 new_size = *ppos + count;
454
455 xfs_ilock(ip, XFS_ILOCK_EXCL);
456 if (new_size > ip->i_size)
457 ip->i_new_size = new_size;
458 xfs_iunlock(ip, XFS_ILOCK_EXCL);
459
460 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
461
462 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
463
464 xfs_aio_write_isize_update(inode, ppos, ret);
465 xfs_aio_write_newsize_update(ip);
466 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
467 return ret;
468}
469
470/*
471 * This routine is called to handle zeroing any space in the last
472 * block of the file that is beyond the EOF. We do this since the
473 * size is being increased without writing anything to that block
474 * and we don't want anyone to read the garbage on the disk.
475 */
476STATIC int /* error (positive) */
477xfs_zero_last_block(
478 xfs_inode_t *ip,
479 xfs_fsize_t offset,
480 xfs_fsize_t isize)
481{
482 xfs_fileoff_t last_fsb;
483 xfs_mount_t *mp = ip->i_mount;
484 int nimaps;
485 int zero_offset;
486 int zero_len;
487 int error = 0;
488 xfs_bmbt_irec_t imap;
489
490 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
491
492 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
493 if (zero_offset == 0) {
494 /*
495 * There are no extra bytes in the last block on disk to
496 * zero, so return.
497 */
498 return 0;
499 }
500
501 last_fsb = XFS_B_TO_FSBT(mp, isize);
502 nimaps = 1;
503 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
504 &nimaps, NULL);
505 if (error) {
506 return error;
507 }
508 ASSERT(nimaps > 0);
509 /*
510 * If the block underlying isize is just a hole, then there
511 * is nothing to zero.
512 */
513 if (imap.br_startblock == HOLESTARTBLOCK) {
514 return 0;
515 }
516 /*
517 * Zero the part of the last block beyond the EOF, and write it
518 * out sync. We need to drop the ilock while we do this so we
519 * don't deadlock when the buffer cache calls back to us.
520 */
521 xfs_iunlock(ip, XFS_ILOCK_EXCL);
522
523 zero_len = mp->m_sb.sb_blocksize - zero_offset;
524 if (isize + zero_len > offset)
525 zero_len = offset - isize;
526 error = xfs_iozero(ip, isize, zero_len);
527
528 xfs_ilock(ip, XFS_ILOCK_EXCL);
529 ASSERT(error >= 0);
530 return error;
531}
532
533/*
534 * Zero any on disk space between the current EOF and the new,
535 * larger EOF. This handles the normal case of zeroing the remainder
536 * of the last block in the file and the unusual case of zeroing blocks
537 * out beyond the size of the file. This second case only happens
538 * with fixed size extents and when the system crashes before the inode
539 * size was updated but after blocks were allocated. If fill is set,
540 * then any holes in the range are filled and zeroed. If not, the holes
541 * are left alone as holes.
542 */
543
544int /* error (positive) */
545xfs_zero_eof(
546 xfs_inode_t *ip,
547 xfs_off_t offset, /* starting I/O offset */
548 xfs_fsize_t isize) /* current inode size */
549{
550 xfs_mount_t *mp = ip->i_mount;
551 xfs_fileoff_t start_zero_fsb;
552 xfs_fileoff_t end_zero_fsb;
553 xfs_fileoff_t zero_count_fsb;
554 xfs_fileoff_t last_fsb;
555 xfs_fileoff_t zero_off;
556 xfs_fsize_t zero_len;
557 int nimaps;
558 int error = 0;
559 xfs_bmbt_irec_t imap;
560
561 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
562 ASSERT(offset > isize);
563
564 /*
565 * First handle zeroing the block on which isize resides.
566 * We only zero a part of that block so it is handled specially.
567 */
568 error = xfs_zero_last_block(ip, offset, isize);
569 if (error) {
570 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
571 return error;
572 }
573
574 /*
575 * Calculate the range between the new size and the old
576 * where blocks needing to be zeroed may exist. To get the
577 * block where the last byte in the file currently resides,
578 * we need to subtract one from the size and truncate back
579 * to a block boundary. We subtract 1 in case the size is
580 * exactly on a block boundary.
581 */
582 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
583 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
584 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
585 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
586 if (last_fsb == end_zero_fsb) {
587 /*
588 * The size was only incremented on its last block.
589 * We took care of that above, so just return.
590 */
591 return 0;
592 }
593
594 ASSERT(start_zero_fsb <= end_zero_fsb);
595 while (start_zero_fsb <= end_zero_fsb) {
596 nimaps = 1;
597 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
598 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
599 0, NULL, 0, &imap, &nimaps, NULL);
600 if (error) {
601 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
602 return error;
603 }
604 ASSERT(nimaps > 0);
605
606 if (imap.br_state == XFS_EXT_UNWRITTEN ||
607 imap.br_startblock == HOLESTARTBLOCK) {
608 /*
609 * This loop handles initializing pages that were
610 * partially initialized by the code below this
611 * loop. It basically zeroes the part of the page
612 * that sits on a hole and sets the page as P_HOLE
613 * and calls remapf if it is a mapped file.
614 */
615 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
616 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
617 continue;
618 }
619
620 /*
621 * There are blocks we need to zero.
622 * Drop the inode lock while we're doing the I/O.
623 * We'll still have the iolock to protect us.
624 */
625 xfs_iunlock(ip, XFS_ILOCK_EXCL);
626
627 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
628 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
629
630 if ((zero_off + zero_len) > offset)
631 zero_len = offset - zero_off;
632
633 error = xfs_iozero(ip, zero_off, zero_len);
634 if (error) {
635 goto out_lock;
636 }
637
638 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
639 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
640
641 xfs_ilock(ip, XFS_ILOCK_EXCL);
642 }
643
644 return 0;
645
646out_lock:
647 xfs_ilock(ip, XFS_ILOCK_EXCL);
648 ASSERT(error >= 0);
649 return error;
650}
651
652/*
653 * Common pre-write limit and setup checks.
654 *
655 * Returns with iolock held according to @iolock.
656 */
657STATIC ssize_t
658xfs_file_aio_write_checks(
659 struct file *file,
660 loff_t *pos,
661 size_t *count,
662 int *iolock)
663{
664 struct inode *inode = file->f_mapping->host;
665 struct xfs_inode *ip = XFS_I(inode);
666 xfs_fsize_t new_size;
667 int error = 0;
668
669 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
670 if (error) {
671 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
672 *iolock = 0;
673 return error;
674 }
675
676 new_size = *pos + *count;
677 if (new_size > ip->i_size)
678 ip->i_new_size = new_size;
679
680 if (likely(!(file->f_mode & FMODE_NOCMTIME)))
681 file_update_time(file);
682
683 /*
684 * If the offset is beyond the size of the file, we need to zero any
685 * blocks that fall between the existing EOF and the start of this
686 * write.
687 */
688 if (*pos > ip->i_size)
689 error = -xfs_zero_eof(ip, *pos, ip->i_size);
690
691 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
692 if (error)
693 return error;
694
695 /*
696 * If we're writing the file then make sure to clear the setuid and
697 * setgid bits if the process is not being run by root. This keeps
698 * people from modifying setuid and setgid binaries.
699 */
700 return file_remove_suid(file);
701
702}
703
704/*
705 * xfs_file_dio_aio_write - handle direct IO writes
706 *
707 * Lock the inode appropriately to prepare for and issue a direct IO write.
708 * By separating it from the buffered write path we remove all the tricky to
709 * follow locking changes and looping.
710 *
711 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
712 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
713 * pages are flushed out.
714 *
715 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
716 * allowing them to be done in parallel with reads and other direct IO writes.
717 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
718 * needs to do sub-block zeroing and that requires serialisation against other
719 * direct IOs to the same block. In this case we need to serialise the
720 * submission of the unaligned IOs so that we don't get racing block zeroing in
721 * the dio layer. To avoid the problem with aio, we also need to wait for
722 * outstanding IOs to complete so that unwritten extent conversion is completed
723 * before we try to map the overlapping block. This is currently implemented by
724 * hitting it with a big hammer (i.e. xfs_ioend_wait()).
725 *
726 * Returns with locks held indicated by @iolock and errors indicated by
727 * negative return values.
728 */
729STATIC ssize_t
730xfs_file_dio_aio_write(
731 struct kiocb *iocb,
732 const struct iovec *iovp,
733 unsigned long nr_segs,
734 loff_t pos,
735 size_t ocount,
736 int *iolock)
737{
738 struct file *file = iocb->ki_filp;
739 struct address_space *mapping = file->f_mapping;
740 struct inode *inode = mapping->host;
741 struct xfs_inode *ip = XFS_I(inode);
742 struct xfs_mount *mp = ip->i_mount;
743 ssize_t ret = 0;
744 size_t count = ocount;
745 int unaligned_io = 0;
746 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
747 mp->m_rtdev_targp : mp->m_ddev_targp;
748
749 *iolock = 0;
750 if ((pos & target->bt_smask) || (count & target->bt_smask))
751 return -XFS_ERROR(EINVAL);
752
753 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
754 unaligned_io = 1;
755
756 if (unaligned_io || mapping->nrpages || pos > ip->i_size)
757 *iolock = XFS_IOLOCK_EXCL;
758 else
759 *iolock = XFS_IOLOCK_SHARED;
760 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
761
762 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
763 if (ret)
764 return ret;
765
766 if (mapping->nrpages) {
767 WARN_ON(*iolock != XFS_IOLOCK_EXCL);
768 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
769 FI_REMAPF_LOCKED);
770 if (ret)
771 return ret;
772 }
773
774 /*
775 * If we are doing unaligned IO, wait for all other IO to drain,
776 * otherwise demote the lock if we had to flush cached pages
777 */
778 if (unaligned_io)
779 xfs_ioend_wait(ip);
780 else if (*iolock == XFS_IOLOCK_EXCL) {
781 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
782 *iolock = XFS_IOLOCK_SHARED;
783 }
784
785 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
786 ret = generic_file_direct_write(iocb, iovp,
787 &nr_segs, pos, &iocb->ki_pos, count, ocount);
788
789 /* No fallback to buffered IO on errors for XFS. */
790 ASSERT(ret < 0 || ret == count);
791 return ret;
792}
793
794STATIC ssize_t
795xfs_file_buffered_aio_write(
796 struct kiocb *iocb,
797 const struct iovec *iovp,
798 unsigned long nr_segs,
799 loff_t pos,
800 size_t ocount,
801 int *iolock)
802{
803 struct file *file = iocb->ki_filp;
804 struct address_space *mapping = file->f_mapping;
805 struct inode *inode = mapping->host;
806 struct xfs_inode *ip = XFS_I(inode);
807 ssize_t ret;
808 int enospc = 0;
809 size_t count = ocount;
810
811 *iolock = XFS_IOLOCK_EXCL;
812 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
813
814 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
815 if (ret)
816 return ret;
817
818 /* We can write back this queue in page reclaim */
819 current->backing_dev_info = mapping->backing_dev_info;
820
821write_retry:
822 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
823 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
824 pos, &iocb->ki_pos, count, ret);
825 /*
826 * if we just got an ENOSPC, flush the inode now we aren't holding any
827 * page locks and retry *once*
828 */
829 if (ret == -ENOSPC && !enospc) {
830 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
831 if (ret)
832 return ret;
833 enospc = 1;
834 goto write_retry;
835 }
836 current->backing_dev_info = NULL;
837 return ret;
838}
839
840STATIC ssize_t
841xfs_file_aio_write(
842 struct kiocb *iocb,
843 const struct iovec *iovp,
844 unsigned long nr_segs,
845 loff_t pos)
846{
847 struct file *file = iocb->ki_filp;
848 struct address_space *mapping = file->f_mapping;
849 struct inode *inode = mapping->host;
850 struct xfs_inode *ip = XFS_I(inode);
851 ssize_t ret;
852 int iolock;
853 size_t ocount = 0;
854
855 XFS_STATS_INC(xs_write_calls);
856
857 BUG_ON(iocb->ki_pos != pos);
858
859 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
860 if (ret)
861 return ret;
862
863 if (ocount == 0)
864 return 0;
865
866 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
867
868 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
869 return -EIO;
870
871 if (unlikely(file->f_flags & O_DIRECT))
872 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
873 ocount, &iolock);
874 else
875 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
876 ocount, &iolock);
877
878 xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
879
880 if (ret <= 0)
881 goto out_unlock;
882
883 /* Handle various SYNC-type writes */
884 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
885 loff_t end = pos + ret - 1;
886 int error;
887
888 xfs_rw_iunlock(ip, iolock);
889 error = xfs_file_fsync(file, pos, end,
890 (file->f_flags & __O_SYNC) ? 0 : 1);
891 xfs_rw_ilock(ip, iolock);
892 if (error)
893 ret = error;
894 }
895
896out_unlock:
897 xfs_aio_write_newsize_update(ip);
898 xfs_rw_iunlock(ip, iolock);
899 return ret;
900}
901
902STATIC long
903xfs_file_fallocate(
904 struct file *file,
905 int mode,
906 loff_t offset,
907 loff_t len)
908{
909 struct inode *inode = file->f_path.dentry->d_inode;
910 long error;
911 loff_t new_size = 0;
912 xfs_flock64_t bf;
913 xfs_inode_t *ip = XFS_I(inode);
914 int cmd = XFS_IOC_RESVSP;
915 int attr_flags = XFS_ATTR_NOLOCK;
916
917 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
918 return -EOPNOTSUPP;
919
920 bf.l_whence = 0;
921 bf.l_start = offset;
922 bf.l_len = len;
923
924 xfs_ilock(ip, XFS_IOLOCK_EXCL);
925
926 if (mode & FALLOC_FL_PUNCH_HOLE)
927 cmd = XFS_IOC_UNRESVSP;
928
929 /* check the new inode size is valid before allocating */
930 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
931 offset + len > i_size_read(inode)) {
932 new_size = offset + len;
933 error = inode_newsize_ok(inode, new_size);
934 if (error)
935 goto out_unlock;
936 }
937
938 if (file->f_flags & O_DSYNC)
939 attr_flags |= XFS_ATTR_SYNC;
940
941 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
942 if (error)
943 goto out_unlock;
944
945 /* Change file size if needed */
946 if (new_size) {
947 struct iattr iattr;
948
949 iattr.ia_valid = ATTR_SIZE;
950 iattr.ia_size = new_size;
951 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
952 }
953
954out_unlock:
955 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
956 return error;
957}
958
959
960STATIC int
961xfs_file_open(
962 struct inode *inode,
963 struct file *file)
964{
965 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
966 return -EFBIG;
967 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
968 return -EIO;
969 return 0;
970}
971
972STATIC int
973xfs_dir_open(
974 struct inode *inode,
975 struct file *file)
976{
977 struct xfs_inode *ip = XFS_I(inode);
978 int mode;
979 int error;
980
981 error = xfs_file_open(inode, file);
982 if (error)
983 return error;
984
985 /*
986 * If there are any blocks, read-ahead block 0 as we're almost
987 * certain to have the next operation be a read there.
988 */
989 mode = xfs_ilock_map_shared(ip);
990 if (ip->i_d.di_nextents > 0)
991 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
992 xfs_iunlock(ip, mode);
993 return 0;
994}
995
996STATIC int
997xfs_file_release(
998 struct inode *inode,
999 struct file *filp)
1000{
1001 return -xfs_release(XFS_I(inode));
1002}
1003
1004STATIC int
1005xfs_file_readdir(
1006 struct file *filp,
1007 void *dirent,
1008 filldir_t filldir)
1009{
1010 struct inode *inode = filp->f_path.dentry->d_inode;
1011 xfs_inode_t *ip = XFS_I(inode);
1012 int error;
1013 size_t bufsize;
1014
1015 /*
1016 * The Linux API doesn't pass down the total size of the buffer
1017 * we read into down to the filesystem. With the filldir concept
1018 * it's not needed for correct information, but the XFS dir2 leaf
1019 * code wants an estimate of the buffer size to calculate it's
1020 * readahead window and size the buffers used for mapping to
1021 * physical blocks.
1022 *
1023 * Try to give it an estimate that's good enough, maybe at some
1024 * point we can change the ->readdir prototype to include the
1025 * buffer size. For now we use the current glibc buffer size.
1026 */
1027 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1028
1029 error = xfs_readdir(ip, dirent, bufsize,
1030 (xfs_off_t *)&filp->f_pos, filldir);
1031 if (error)
1032 return -error;
1033 return 0;
1034}
1035
1036STATIC int
1037xfs_file_mmap(
1038 struct file *filp,
1039 struct vm_area_struct *vma)
1040{
1041 vma->vm_ops = &xfs_file_vm_ops;
1042 vma->vm_flags |= VM_CAN_NONLINEAR;
1043
1044 file_accessed(filp);
1045 return 0;
1046}
1047
1048/*
1049 * mmap()d file has taken write protection fault and is being made
1050 * writable. We can set the page state up correctly for a writable
1051 * page, which means we can do correct delalloc accounting (ENOSPC
1052 * checking!) and unwritten extent mapping.
1053 */
1054STATIC int
1055xfs_vm_page_mkwrite(
1056 struct vm_area_struct *vma,
1057 struct vm_fault *vmf)
1058{
1059 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
1060}
1061
1062const struct file_operations xfs_file_operations = {
1063 .llseek = generic_file_llseek,
1064 .read = do_sync_read,
1065 .write = do_sync_write,
1066 .aio_read = xfs_file_aio_read,
1067 .aio_write = xfs_file_aio_write,
1068 .splice_read = xfs_file_splice_read,
1069 .splice_write = xfs_file_splice_write,
1070 .unlocked_ioctl = xfs_file_ioctl,
1071#ifdef CONFIG_COMPAT
1072 .compat_ioctl = xfs_file_compat_ioctl,
1073#endif
1074 .mmap = xfs_file_mmap,
1075 .open = xfs_file_open,
1076 .release = xfs_file_release,
1077 .fsync = xfs_file_fsync,
1078 .fallocate = xfs_file_fallocate,
1079};
1080
1081const struct file_operations xfs_dir_file_operations = {
1082 .open = xfs_dir_open,
1083 .read = generic_read_dir,
1084 .readdir = xfs_file_readdir,
1085 .llseek = generic_file_llseek,
1086 .unlocked_ioctl = xfs_file_ioctl,
1087#ifdef CONFIG_COMPAT
1088 .compat_ioctl = xfs_file_compat_ioctl,
1089#endif
1090 .fsync = xfs_file_fsync,
1091};
1092
1093static const struct vm_operations_struct xfs_file_vm_ops = {
1094 .fault = filemap_fault,
1095 .page_mkwrite = xfs_vm_page_mkwrite,
1096};