Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_trans.h"
26#include "xfs_mount.h"
27#include "xfs_bmap_btree.h"
28#include "xfs_alloc.h"
29#include "xfs_dinode.h"
30#include "xfs_inode.h"
31#include "xfs_inode_item.h"
32#include "xfs_bmap.h"
33#include "xfs_error.h"
34#include "xfs_vnodeops.h"
35#include "xfs_da_btree.h"
36#include "xfs_ioctl.h"
37#include "xfs_trace.h"
38
39#include <linux/dcache.h>
40#include <linux/falloc.h>
41
42static const struct vm_operations_struct xfs_file_vm_ops;
43
44/*
45 * Locking primitives for read and write IO paths to ensure we consistently use
46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
47 */
48static inline void
49xfs_rw_ilock(
50 struct xfs_inode *ip,
51 int type)
52{
53 if (type & XFS_IOLOCK_EXCL)
54 mutex_lock(&VFS_I(ip)->i_mutex);
55 xfs_ilock(ip, type);
56}
57
58static inline void
59xfs_rw_iunlock(
60 struct xfs_inode *ip,
61 int type)
62{
63 xfs_iunlock(ip, type);
64 if (type & XFS_IOLOCK_EXCL)
65 mutex_unlock(&VFS_I(ip)->i_mutex);
66}
67
68static inline void
69xfs_rw_ilock_demote(
70 struct xfs_inode *ip,
71 int type)
72{
73 xfs_ilock_demote(ip, type);
74 if (type & XFS_IOLOCK_EXCL)
75 mutex_unlock(&VFS_I(ip)->i_mutex);
76}
77
78/*
79 * xfs_iozero
80 *
81 * xfs_iozero clears the specified range of buffer supplied,
82 * and marks all the affected blocks as valid and modified. If
83 * an affected block is not allocated, it will be allocated. If
84 * an affected block is not completely overwritten, and is not
85 * valid before the operation, it will be read from disk before
86 * being partially zeroed.
87 */
88STATIC int
89xfs_iozero(
90 struct xfs_inode *ip, /* inode */
91 loff_t pos, /* offset in file */
92 size_t count) /* size of data to zero */
93{
94 struct page *page;
95 struct address_space *mapping;
96 int status;
97
98 mapping = VFS_I(ip)->i_mapping;
99 do {
100 unsigned offset, bytes;
101 void *fsdata;
102
103 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
104 bytes = PAGE_CACHE_SIZE - offset;
105 if (bytes > count)
106 bytes = count;
107
108 status = pagecache_write_begin(NULL, mapping, pos, bytes,
109 AOP_FLAG_UNINTERRUPTIBLE,
110 &page, &fsdata);
111 if (status)
112 break;
113
114 zero_user(page, offset, bytes);
115
116 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
117 page, fsdata);
118 WARN_ON(status <= 0); /* can't return less than zero! */
119 pos += bytes;
120 count -= bytes;
121 status = 0;
122 } while (count);
123
124 return (-status);
125}
126
127STATIC int
128xfs_file_fsync(
129 struct file *file,
130 loff_t start,
131 loff_t end,
132 int datasync)
133{
134 struct inode *inode = file->f_mapping->host;
135 struct xfs_inode *ip = XFS_I(inode);
136 struct xfs_mount *mp = ip->i_mount;
137 struct xfs_trans *tp;
138 int error = 0;
139 int log_flushed = 0;
140
141 trace_xfs_file_fsync(ip);
142
143 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
144 if (error)
145 return error;
146
147 if (XFS_FORCED_SHUTDOWN(mp))
148 return -XFS_ERROR(EIO);
149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151
152 xfs_ilock(ip, XFS_IOLOCK_SHARED);
153 xfs_ioend_wait(ip);
154 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
155
156 if (mp->m_flags & XFS_MOUNT_BARRIER) {
157 /*
158 * If we have an RT and/or log subvolume we need to make sure
159 * to flush the write cache the device used for file data
160 * first. This is to ensure newly written file data make
161 * it to disk before logging the new inode size in case of
162 * an extending write.
163 */
164 if (XFS_IS_REALTIME_INODE(ip))
165 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
166 else if (mp->m_logdev_targp != mp->m_ddev_targp)
167 xfs_blkdev_issue_flush(mp->m_ddev_targp);
168 }
169
170 /*
171 * We always need to make sure that the required inode state is safe on
172 * disk. The inode might be clean but we still might need to force the
173 * log because of committed transactions that haven't hit the disk yet.
174 * Likewise, there could be unflushed non-transactional changes to the
175 * inode core that have to go to disk and this requires us to issue
176 * a synchronous transaction to capture these changes correctly.
177 *
178 * This code relies on the assumption that if the i_update_core field
179 * of the inode is clear and the inode is unpinned then it is clean
180 * and no action is required.
181 */
182 xfs_ilock(ip, XFS_ILOCK_SHARED);
183
184 /*
185 * First check if the VFS inode is marked dirty. All the dirtying
186 * of non-transactional updates no goes through mark_inode_dirty*,
187 * which allows us to distinguish beteeen pure timestamp updates
188 * and i_size updates which need to be caught for fdatasync.
189 * After that also theck for the dirty state in the XFS inode, which
190 * might gets cleared when the inode gets written out via the AIL
191 * or xfs_iflush_cluster.
192 */
193 if (((inode->i_state & I_DIRTY_DATASYNC) ||
194 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
195 ip->i_update_core) {
196 /*
197 * Kick off a transaction to log the inode core to get the
198 * updates. The sync transaction will also force the log.
199 */
200 xfs_iunlock(ip, XFS_ILOCK_SHARED);
201 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
202 error = xfs_trans_reserve(tp, 0,
203 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
204 if (error) {
205 xfs_trans_cancel(tp, 0);
206 return -error;
207 }
208 xfs_ilock(ip, XFS_ILOCK_EXCL);
209
210 /*
211 * Note - it's possible that we might have pushed ourselves out
212 * of the way during trans_reserve which would flush the inode.
213 * But there's no guarantee that the inode buffer has actually
214 * gone out yet (it's delwri). Plus the buffer could be pinned
215 * anyway if it's part of an inode in another recent
216 * transaction. So we play it safe and fire off the
217 * transaction anyway.
218 */
219 xfs_trans_ijoin(tp, ip);
220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
221 xfs_trans_set_sync(tp);
222 error = _xfs_trans_commit(tp, 0, &log_flushed);
223
224 xfs_iunlock(ip, XFS_ILOCK_EXCL);
225 } else {
226 /*
227 * Timestamps/size haven't changed since last inode flush or
228 * inode transaction commit. That means either nothing got
229 * written or a transaction committed which caught the updates.
230 * If the latter happened and the transaction hasn't hit the
231 * disk yet, the inode will be still be pinned. If it is,
232 * force the log.
233 */
234 if (xfs_ipincount(ip)) {
235 error = _xfs_log_force_lsn(mp,
236 ip->i_itemp->ili_last_lsn,
237 XFS_LOG_SYNC, &log_flushed);
238 }
239 xfs_iunlock(ip, XFS_ILOCK_SHARED);
240 }
241
242 /*
243 * If we only have a single device, and the log force about was
244 * a no-op we might have to flush the data device cache here.
245 * This can only happen for fdatasync/O_DSYNC if we were overwriting
246 * an already allocated file and thus do not have any metadata to
247 * commit.
248 */
249 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
250 mp->m_logdev_targp == mp->m_ddev_targp &&
251 !XFS_IS_REALTIME_INODE(ip) &&
252 !log_flushed)
253 xfs_blkdev_issue_flush(mp->m_ddev_targp);
254
255 return -error;
256}
257
258STATIC ssize_t
259xfs_file_aio_read(
260 struct kiocb *iocb,
261 const struct iovec *iovp,
262 unsigned long nr_segs,
263 loff_t pos)
264{
265 struct file *file = iocb->ki_filp;
266 struct inode *inode = file->f_mapping->host;
267 struct xfs_inode *ip = XFS_I(inode);
268 struct xfs_mount *mp = ip->i_mount;
269 size_t size = 0;
270 ssize_t ret = 0;
271 int ioflags = 0;
272 xfs_fsize_t n;
273 unsigned long seg;
274
275 XFS_STATS_INC(xs_read_calls);
276
277 BUG_ON(iocb->ki_pos != pos);
278
279 if (unlikely(file->f_flags & O_DIRECT))
280 ioflags |= IO_ISDIRECT;
281 if (file->f_mode & FMODE_NOCMTIME)
282 ioflags |= IO_INVIS;
283
284 /* START copy & waste from filemap.c */
285 for (seg = 0; seg < nr_segs; seg++) {
286 const struct iovec *iv = &iovp[seg];
287
288 /*
289 * If any segment has a negative length, or the cumulative
290 * length ever wraps negative then return -EINVAL.
291 */
292 size += iv->iov_len;
293 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
294 return XFS_ERROR(-EINVAL);
295 }
296 /* END copy & waste from filemap.c */
297
298 if (unlikely(ioflags & IO_ISDIRECT)) {
299 xfs_buftarg_t *target =
300 XFS_IS_REALTIME_INODE(ip) ?
301 mp->m_rtdev_targp : mp->m_ddev_targp;
302 if ((iocb->ki_pos & target->bt_smask) ||
303 (size & target->bt_smask)) {
304 if (iocb->ki_pos == ip->i_size)
305 return 0;
306 return -XFS_ERROR(EINVAL);
307 }
308 }
309
310 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
311 if (n <= 0 || size == 0)
312 return 0;
313
314 if (n < size)
315 size = n;
316
317 if (XFS_FORCED_SHUTDOWN(mp))
318 return -EIO;
319
320 if (unlikely(ioflags & IO_ISDIRECT)) {
321 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
322
323 if (inode->i_mapping->nrpages) {
324 ret = -xfs_flushinval_pages(ip,
325 (iocb->ki_pos & PAGE_CACHE_MASK),
326 -1, FI_REMAPF_LOCKED);
327 if (ret) {
328 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
329 return ret;
330 }
331 }
332 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
333 } else
334 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
335
336 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
337
338 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
339 if (ret > 0)
340 XFS_STATS_ADD(xs_read_bytes, ret);
341
342 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
343 return ret;
344}
345
346STATIC ssize_t
347xfs_file_splice_read(
348 struct file *infilp,
349 loff_t *ppos,
350 struct pipe_inode_info *pipe,
351 size_t count,
352 unsigned int flags)
353{
354 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
355 int ioflags = 0;
356 ssize_t ret;
357
358 XFS_STATS_INC(xs_read_calls);
359
360 if (infilp->f_mode & FMODE_NOCMTIME)
361 ioflags |= IO_INVIS;
362
363 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
364 return -EIO;
365
366 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
367
368 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
369
370 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
371 if (ret > 0)
372 XFS_STATS_ADD(xs_read_bytes, ret);
373
374 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
375 return ret;
376}
377
378STATIC void
379xfs_aio_write_isize_update(
380 struct inode *inode,
381 loff_t *ppos,
382 ssize_t bytes_written)
383{
384 struct xfs_inode *ip = XFS_I(inode);
385 xfs_fsize_t isize = i_size_read(inode);
386
387 if (bytes_written > 0)
388 XFS_STATS_ADD(xs_write_bytes, bytes_written);
389
390 if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
391 *ppos > isize))
392 *ppos = isize;
393
394 if (*ppos > ip->i_size) {
395 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
396 if (*ppos > ip->i_size)
397 ip->i_size = *ppos;
398 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
399 }
400}
401
402/*
403 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
404 * part of the I/O may have been written to disk before the error occurred. In
405 * this case the on-disk file size may have been adjusted beyond the in-memory
406 * file size and now needs to be truncated back.
407 */
408STATIC void
409xfs_aio_write_newsize_update(
410 struct xfs_inode *ip)
411{
412 if (ip->i_new_size) {
413 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
414 ip->i_new_size = 0;
415 if (ip->i_d.di_size > ip->i_size)
416 ip->i_d.di_size = ip->i_size;
417 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
418 }
419}
420
421/*
422 * xfs_file_splice_write() does not use xfs_rw_ilock() because
423 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
424 * couuld cause lock inversions between the aio_write path and the splice path
425 * if someone is doing concurrent splice(2) based writes and write(2) based
426 * writes to the same inode. The only real way to fix this is to re-implement
427 * the generic code here with correct locking orders.
428 */
429STATIC ssize_t
430xfs_file_splice_write(
431 struct pipe_inode_info *pipe,
432 struct file *outfilp,
433 loff_t *ppos,
434 size_t count,
435 unsigned int flags)
436{
437 struct inode *inode = outfilp->f_mapping->host;
438 struct xfs_inode *ip = XFS_I(inode);
439 xfs_fsize_t new_size;
440 int ioflags = 0;
441 ssize_t ret;
442
443 XFS_STATS_INC(xs_write_calls);
444
445 if (outfilp->f_mode & FMODE_NOCMTIME)
446 ioflags |= IO_INVIS;
447
448 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
449 return -EIO;
450
451 xfs_ilock(ip, XFS_IOLOCK_EXCL);
452
453 new_size = *ppos + count;
454
455 xfs_ilock(ip, XFS_ILOCK_EXCL);
456 if (new_size > ip->i_size)
457 ip->i_new_size = new_size;
458 xfs_iunlock(ip, XFS_ILOCK_EXCL);
459
460 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
461
462 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
463
464 xfs_aio_write_isize_update(inode, ppos, ret);
465 xfs_aio_write_newsize_update(ip);
466 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
467 return ret;
468}
469
470/*
471 * This routine is called to handle zeroing any space in the last
472 * block of the file that is beyond the EOF. We do this since the
473 * size is being increased without writing anything to that block
474 * and we don't want anyone to read the garbage on the disk.
475 */
476STATIC int /* error (positive) */
477xfs_zero_last_block(
478 xfs_inode_t *ip,
479 xfs_fsize_t offset,
480 xfs_fsize_t isize)
481{
482 xfs_fileoff_t last_fsb;
483 xfs_mount_t *mp = ip->i_mount;
484 int nimaps;
485 int zero_offset;
486 int zero_len;
487 int error = 0;
488 xfs_bmbt_irec_t imap;
489
490 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
491
492 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
493 if (zero_offset == 0) {
494 /*
495 * There are no extra bytes in the last block on disk to
496 * zero, so return.
497 */
498 return 0;
499 }
500
501 last_fsb = XFS_B_TO_FSBT(mp, isize);
502 nimaps = 1;
503 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
504 &nimaps, NULL);
505 if (error) {
506 return error;
507 }
508 ASSERT(nimaps > 0);
509 /*
510 * If the block underlying isize is just a hole, then there
511 * is nothing to zero.
512 */
513 if (imap.br_startblock == HOLESTARTBLOCK) {
514 return 0;
515 }
516 /*
517 * Zero the part of the last block beyond the EOF, and write it
518 * out sync. We need to drop the ilock while we do this so we
519 * don't deadlock when the buffer cache calls back to us.
520 */
521 xfs_iunlock(ip, XFS_ILOCK_EXCL);
522
523 zero_len = mp->m_sb.sb_blocksize - zero_offset;
524 if (isize + zero_len > offset)
525 zero_len = offset - isize;
526 error = xfs_iozero(ip, isize, zero_len);
527
528 xfs_ilock(ip, XFS_ILOCK_EXCL);
529 ASSERT(error >= 0);
530 return error;
531}
532
533/*
534 * Zero any on disk space between the current EOF and the new,
535 * larger EOF. This handles the normal case of zeroing the remainder
536 * of the last block in the file and the unusual case of zeroing blocks
537 * out beyond the size of the file. This second case only happens
538 * with fixed size extents and when the system crashes before the inode
539 * size was updated but after blocks were allocated. If fill is set,
540 * then any holes in the range are filled and zeroed. If not, the holes
541 * are left alone as holes.
542 */
543
544int /* error (positive) */
545xfs_zero_eof(
546 xfs_inode_t *ip,
547 xfs_off_t offset, /* starting I/O offset */
548 xfs_fsize_t isize) /* current inode size */
549{
550 xfs_mount_t *mp = ip->i_mount;
551 xfs_fileoff_t start_zero_fsb;
552 xfs_fileoff_t end_zero_fsb;
553 xfs_fileoff_t zero_count_fsb;
554 xfs_fileoff_t last_fsb;
555 xfs_fileoff_t zero_off;
556 xfs_fsize_t zero_len;
557 int nimaps;
558 int error = 0;
559 xfs_bmbt_irec_t imap;
560
561 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
562 ASSERT(offset > isize);
563
564 /*
565 * First handle zeroing the block on which isize resides.
566 * We only zero a part of that block so it is handled specially.
567 */
568 error = xfs_zero_last_block(ip, offset, isize);
569 if (error) {
570 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
571 return error;
572 }
573
574 /*
575 * Calculate the range between the new size and the old
576 * where blocks needing to be zeroed may exist. To get the
577 * block where the last byte in the file currently resides,
578 * we need to subtract one from the size and truncate back
579 * to a block boundary. We subtract 1 in case the size is
580 * exactly on a block boundary.
581 */
582 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
583 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
584 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
585 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
586 if (last_fsb == end_zero_fsb) {
587 /*
588 * The size was only incremented on its last block.
589 * We took care of that above, so just return.
590 */
591 return 0;
592 }
593
594 ASSERT(start_zero_fsb <= end_zero_fsb);
595 while (start_zero_fsb <= end_zero_fsb) {
596 nimaps = 1;
597 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
598 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
599 0, NULL, 0, &imap, &nimaps, NULL);
600 if (error) {
601 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
602 return error;
603 }
604 ASSERT(nimaps > 0);
605
606 if (imap.br_state == XFS_EXT_UNWRITTEN ||
607 imap.br_startblock == HOLESTARTBLOCK) {
608 /*
609 * This loop handles initializing pages that were
610 * partially initialized by the code below this
611 * loop. It basically zeroes the part of the page
612 * that sits on a hole and sets the page as P_HOLE
613 * and calls remapf if it is a mapped file.
614 */
615 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
616 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
617 continue;
618 }
619
620 /*
621 * There are blocks we need to zero.
622 * Drop the inode lock while we're doing the I/O.
623 * We'll still have the iolock to protect us.
624 */
625 xfs_iunlock(ip, XFS_ILOCK_EXCL);
626
627 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
628 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
629
630 if ((zero_off + zero_len) > offset)
631 zero_len = offset - zero_off;
632
633 error = xfs_iozero(ip, zero_off, zero_len);
634 if (error) {
635 goto out_lock;
636 }
637
638 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
639 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
640
641 xfs_ilock(ip, XFS_ILOCK_EXCL);
642 }
643
644 return 0;
645
646out_lock:
647 xfs_ilock(ip, XFS_ILOCK_EXCL);
648 ASSERT(error >= 0);
649 return error;
650}
651
652/*
653 * Common pre-write limit and setup checks.
654 *
655 * Returns with iolock held according to @iolock.
656 */
657STATIC ssize_t
658xfs_file_aio_write_checks(
659 struct file *file,
660 loff_t *pos,
661 size_t *count,
662 int *iolock)
663{
664 struct inode *inode = file->f_mapping->host;
665 struct xfs_inode *ip = XFS_I(inode);
666 xfs_fsize_t new_size;
667 int error = 0;
668
669 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
670 if (error) {
671 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
672 *iolock = 0;
673 return error;
674 }
675
676 new_size = *pos + *count;
677 if (new_size > ip->i_size)
678 ip->i_new_size = new_size;
679
680 if (likely(!(file->f_mode & FMODE_NOCMTIME)))
681 file_update_time(file);
682
683 /*
684 * If the offset is beyond the size of the file, we need to zero any
685 * blocks that fall between the existing EOF and the start of this
686 * write.
687 */
688 if (*pos > ip->i_size)
689 error = -xfs_zero_eof(ip, *pos, ip->i_size);
690
691 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
692 if (error)
693 return error;
694
695 /*
696 * If we're writing the file then make sure to clear the setuid and
697 * setgid bits if the process is not being run by root. This keeps
698 * people from modifying setuid and setgid binaries.
699 */
700 return file_remove_suid(file);
701
702}
703
704/*
705 * xfs_file_dio_aio_write - handle direct IO writes
706 *
707 * Lock the inode appropriately to prepare for and issue a direct IO write.
708 * By separating it from the buffered write path we remove all the tricky to
709 * follow locking changes and looping.
710 *
711 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
712 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
713 * pages are flushed out.
714 *
715 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
716 * allowing them to be done in parallel with reads and other direct IO writes.
717 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
718 * needs to do sub-block zeroing and that requires serialisation against other
719 * direct IOs to the same block. In this case we need to serialise the
720 * submission of the unaligned IOs so that we don't get racing block zeroing in
721 * the dio layer. To avoid the problem with aio, we also need to wait for
722 * outstanding IOs to complete so that unwritten extent conversion is completed
723 * before we try to map the overlapping block. This is currently implemented by
724 * hitting it with a big hammer (i.e. xfs_ioend_wait()).
725 *
726 * Returns with locks held indicated by @iolock and errors indicated by
727 * negative return values.
728 */
729STATIC ssize_t
730xfs_file_dio_aio_write(
731 struct kiocb *iocb,
732 const struct iovec *iovp,
733 unsigned long nr_segs,
734 loff_t pos,
735 size_t ocount,
736 int *iolock)
737{
738 struct file *file = iocb->ki_filp;
739 struct address_space *mapping = file->f_mapping;
740 struct inode *inode = mapping->host;
741 struct xfs_inode *ip = XFS_I(inode);
742 struct xfs_mount *mp = ip->i_mount;
743 ssize_t ret = 0;
744 size_t count = ocount;
745 int unaligned_io = 0;
746 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
747 mp->m_rtdev_targp : mp->m_ddev_targp;
748
749 *iolock = 0;
750 if ((pos & target->bt_smask) || (count & target->bt_smask))
751 return -XFS_ERROR(EINVAL);
752
753 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
754 unaligned_io = 1;
755
756 if (unaligned_io || mapping->nrpages || pos > ip->i_size)
757 *iolock = XFS_IOLOCK_EXCL;
758 else
759 *iolock = XFS_IOLOCK_SHARED;
760 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
761
762 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
763 if (ret)
764 return ret;
765
766 if (mapping->nrpages) {
767 WARN_ON(*iolock != XFS_IOLOCK_EXCL);
768 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
769 FI_REMAPF_LOCKED);
770 if (ret)
771 return ret;
772 }
773
774 /*
775 * If we are doing unaligned IO, wait for all other IO to drain,
776 * otherwise demote the lock if we had to flush cached pages
777 */
778 if (unaligned_io)
779 xfs_ioend_wait(ip);
780 else if (*iolock == XFS_IOLOCK_EXCL) {
781 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
782 *iolock = XFS_IOLOCK_SHARED;
783 }
784
785 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
786 ret = generic_file_direct_write(iocb, iovp,
787 &nr_segs, pos, &iocb->ki_pos, count, ocount);
788
789 /* No fallback to buffered IO on errors for XFS. */
790 ASSERT(ret < 0 || ret == count);
791 return ret;
792}
793
794STATIC ssize_t
795xfs_file_buffered_aio_write(
796 struct kiocb *iocb,
797 const struct iovec *iovp,
798 unsigned long nr_segs,
799 loff_t pos,
800 size_t ocount,
801 int *iolock)
802{
803 struct file *file = iocb->ki_filp;
804 struct address_space *mapping = file->f_mapping;
805 struct inode *inode = mapping->host;
806 struct xfs_inode *ip = XFS_I(inode);
807 ssize_t ret;
808 int enospc = 0;
809 size_t count = ocount;
810
811 *iolock = XFS_IOLOCK_EXCL;
812 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
813
814 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
815 if (ret)
816 return ret;
817
818 /* We can write back this queue in page reclaim */
819 current->backing_dev_info = mapping->backing_dev_info;
820
821write_retry:
822 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
823 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
824 pos, &iocb->ki_pos, count, ret);
825 /*
826 * if we just got an ENOSPC, flush the inode now we aren't holding any
827 * page locks and retry *once*
828 */
829 if (ret == -ENOSPC && !enospc) {
830 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
831 if (ret)
832 return ret;
833 enospc = 1;
834 goto write_retry;
835 }
836 current->backing_dev_info = NULL;
837 return ret;
838}
839
840STATIC ssize_t
841xfs_file_aio_write(
842 struct kiocb *iocb,
843 const struct iovec *iovp,
844 unsigned long nr_segs,
845 loff_t pos)
846{
847 struct file *file = iocb->ki_filp;
848 struct address_space *mapping = file->f_mapping;
849 struct inode *inode = mapping->host;
850 struct xfs_inode *ip = XFS_I(inode);
851 ssize_t ret;
852 int iolock;
853 size_t ocount = 0;
854
855 XFS_STATS_INC(xs_write_calls);
856
857 BUG_ON(iocb->ki_pos != pos);
858
859 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
860 if (ret)
861 return ret;
862
863 if (ocount == 0)
864 return 0;
865
866 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
867
868 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
869 return -EIO;
870
871 if (unlikely(file->f_flags & O_DIRECT))
872 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
873 ocount, &iolock);
874 else
875 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
876 ocount, &iolock);
877
878 xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
879
880 if (ret <= 0)
881 goto out_unlock;
882
883 /* Handle various SYNC-type writes */
884 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
885 loff_t end = pos + ret - 1;
886 int error;
887
888 xfs_rw_iunlock(ip, iolock);
889 error = xfs_file_fsync(file, pos, end,
890 (file->f_flags & __O_SYNC) ? 0 : 1);
891 xfs_rw_ilock(ip, iolock);
892 if (error)
893 ret = error;
894 }
895
896out_unlock:
897 xfs_aio_write_newsize_update(ip);
898 xfs_rw_iunlock(ip, iolock);
899 return ret;
900}
901
902STATIC long
903xfs_file_fallocate(
904 struct file *file,
905 int mode,
906 loff_t offset,
907 loff_t len)
908{
909 struct inode *inode = file->f_path.dentry->d_inode;
910 long error;
911 loff_t new_size = 0;
912 xfs_flock64_t bf;
913 xfs_inode_t *ip = XFS_I(inode);
914 int cmd = XFS_IOC_RESVSP;
915 int attr_flags = XFS_ATTR_NOLOCK;
916
917 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
918 return -EOPNOTSUPP;
919
920 bf.l_whence = 0;
921 bf.l_start = offset;
922 bf.l_len = len;
923
924 xfs_ilock(ip, XFS_IOLOCK_EXCL);
925
926 if (mode & FALLOC_FL_PUNCH_HOLE)
927 cmd = XFS_IOC_UNRESVSP;
928
929 /* check the new inode size is valid before allocating */
930 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
931 offset + len > i_size_read(inode)) {
932 new_size = offset + len;
933 error = inode_newsize_ok(inode, new_size);
934 if (error)
935 goto out_unlock;
936 }
937
938 if (file->f_flags & O_DSYNC)
939 attr_flags |= XFS_ATTR_SYNC;
940
941 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
942 if (error)
943 goto out_unlock;
944
945 /* Change file size if needed */
946 if (new_size) {
947 struct iattr iattr;
948
949 iattr.ia_valid = ATTR_SIZE;
950 iattr.ia_size = new_size;
951 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
952 }
953
954out_unlock:
955 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
956 return error;
957}
958
959
960STATIC int
961xfs_file_open(
962 struct inode *inode,
963 struct file *file)
964{
965 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
966 return -EFBIG;
967 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
968 return -EIO;
969 return 0;
970}
971
972STATIC int
973xfs_dir_open(
974 struct inode *inode,
975 struct file *file)
976{
977 struct xfs_inode *ip = XFS_I(inode);
978 int mode;
979 int error;
980
981 error = xfs_file_open(inode, file);
982 if (error)
983 return error;
984
985 /*
986 * If there are any blocks, read-ahead block 0 as we're almost
987 * certain to have the next operation be a read there.
988 */
989 mode = xfs_ilock_map_shared(ip);
990 if (ip->i_d.di_nextents > 0)
991 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
992 xfs_iunlock(ip, mode);
993 return 0;
994}
995
996STATIC int
997xfs_file_release(
998 struct inode *inode,
999 struct file *filp)
1000{
1001 return -xfs_release(XFS_I(inode));
1002}
1003
1004STATIC int
1005xfs_file_readdir(
1006 struct file *filp,
1007 void *dirent,
1008 filldir_t filldir)
1009{
1010 struct inode *inode = filp->f_path.dentry->d_inode;
1011 xfs_inode_t *ip = XFS_I(inode);
1012 int error;
1013 size_t bufsize;
1014
1015 /*
1016 * The Linux API doesn't pass down the total size of the buffer
1017 * we read into down to the filesystem. With the filldir concept
1018 * it's not needed for correct information, but the XFS dir2 leaf
1019 * code wants an estimate of the buffer size to calculate it's
1020 * readahead window and size the buffers used for mapping to
1021 * physical blocks.
1022 *
1023 * Try to give it an estimate that's good enough, maybe at some
1024 * point we can change the ->readdir prototype to include the
1025 * buffer size. For now we use the current glibc buffer size.
1026 */
1027 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1028
1029 error = xfs_readdir(ip, dirent, bufsize,
1030 (xfs_off_t *)&filp->f_pos, filldir);
1031 if (error)
1032 return -error;
1033 return 0;
1034}
1035
1036STATIC int
1037xfs_file_mmap(
1038 struct file *filp,
1039 struct vm_area_struct *vma)
1040{
1041 vma->vm_ops = &xfs_file_vm_ops;
1042 vma->vm_flags |= VM_CAN_NONLINEAR;
1043
1044 file_accessed(filp);
1045 return 0;
1046}
1047
1048/*
1049 * mmap()d file has taken write protection fault and is being made
1050 * writable. We can set the page state up correctly for a writable
1051 * page, which means we can do correct delalloc accounting (ENOSPC
1052 * checking!) and unwritten extent mapping.
1053 */
1054STATIC int
1055xfs_vm_page_mkwrite(
1056 struct vm_area_struct *vma,
1057 struct vm_fault *vmf)
1058{
1059 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
1060}
1061
1062const struct file_operations xfs_file_operations = {
1063 .llseek = generic_file_llseek,
1064 .read = do_sync_read,
1065 .write = do_sync_write,
1066 .aio_read = xfs_file_aio_read,
1067 .aio_write = xfs_file_aio_write,
1068 .splice_read = xfs_file_splice_read,
1069 .splice_write = xfs_file_splice_write,
1070 .unlocked_ioctl = xfs_file_ioctl,
1071#ifdef CONFIG_COMPAT
1072 .compat_ioctl = xfs_file_compat_ioctl,
1073#endif
1074 .mmap = xfs_file_mmap,
1075 .open = xfs_file_open,
1076 .release = xfs_file_release,
1077 .fsync = xfs_file_fsync,
1078 .fallocate = xfs_file_fallocate,
1079};
1080
1081const struct file_operations xfs_dir_file_operations = {
1082 .open = xfs_dir_open,
1083 .read = generic_read_dir,
1084 .readdir = xfs_file_readdir,
1085 .llseek = generic_file_llseek,
1086 .unlocked_ioctl = xfs_file_ioctl,
1087#ifdef CONFIG_COMPAT
1088 .compat_ioctl = xfs_file_compat_ioctl,
1089#endif
1090 .fsync = xfs_file_fsync,
1091};
1092
1093static const struct vm_operations_struct xfs_file_vm_ops = {
1094 .fault = filemap_fault,
1095 .page_mkwrite = xfs_vm_page_mkwrite,
1096};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_inode_item.h"
16#include "xfs_bmap.h"
17#include "xfs_bmap_util.h"
18#include "xfs_dir2.h"
19#include "xfs_dir2_priv.h"
20#include "xfs_ioctl.h"
21#include "xfs_trace.h"
22#include "xfs_log.h"
23#include "xfs_icache.h"
24#include "xfs_pnfs.h"
25#include "xfs_iomap.h"
26#include "xfs_reflink.h"
27
28#include <linux/dax.h>
29#include <linux/falloc.h>
30#include <linux/backing-dev.h>
31#include <linux/mman.h>
32#include <linux/fadvise.h>
33#include <linux/mount.h>
34
35static const struct vm_operations_struct xfs_file_vm_ops;
36
37/*
38 * Decide if the given file range is aligned to the size of the fundamental
39 * allocation unit for the file.
40 */
41static bool
42xfs_is_falloc_aligned(
43 struct xfs_inode *ip,
44 loff_t pos,
45 long long int len)
46{
47 struct xfs_mount *mp = ip->i_mount;
48 uint64_t mask;
49
50 if (XFS_IS_REALTIME_INODE(ip)) {
51 if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
52 u64 rextbytes;
53 u32 mod;
54
55 rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
56 div_u64_rem(pos, rextbytes, &mod);
57 if (mod)
58 return false;
59 div_u64_rem(len, rextbytes, &mod);
60 return mod == 0;
61 }
62 mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
63 } else {
64 mask = mp->m_sb.sb_blocksize - 1;
65 }
66
67 return !((pos | len) & mask);
68}
69
70/*
71 * Fsync operations on directories are much simpler than on regular files,
72 * as there is no file data to flush, and thus also no need for explicit
73 * cache flush operations, and there are no non-transaction metadata updates
74 * on directories either.
75 */
76STATIC int
77xfs_dir_fsync(
78 struct file *file,
79 loff_t start,
80 loff_t end,
81 int datasync)
82{
83 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
84
85 trace_xfs_dir_fsync(ip);
86 return xfs_log_force_inode(ip);
87}
88
89static xfs_csn_t
90xfs_fsync_seq(
91 struct xfs_inode *ip,
92 bool datasync)
93{
94 if (!xfs_ipincount(ip))
95 return 0;
96 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
97 return 0;
98 return ip->i_itemp->ili_commit_seq;
99}
100
101/*
102 * All metadata updates are logged, which means that we just have to flush the
103 * log up to the latest LSN that touched the inode.
104 *
105 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
106 * the log force before we clear the ili_fsync_fields field. This ensures that
107 * we don't get a racing sync operation that does not wait for the metadata to
108 * hit the journal before returning. If we race with clearing ili_fsync_fields,
109 * then all that will happen is the log force will do nothing as the lsn will
110 * already be on disk. We can't race with setting ili_fsync_fields because that
111 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
112 * shared until after the ili_fsync_fields is cleared.
113 */
114static int
115xfs_fsync_flush_log(
116 struct xfs_inode *ip,
117 bool datasync,
118 int *log_flushed)
119{
120 int error = 0;
121 xfs_csn_t seq;
122
123 xfs_ilock(ip, XFS_ILOCK_SHARED);
124 seq = xfs_fsync_seq(ip, datasync);
125 if (seq) {
126 error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
127 log_flushed);
128
129 spin_lock(&ip->i_itemp->ili_lock);
130 ip->i_itemp->ili_fsync_fields = 0;
131 spin_unlock(&ip->i_itemp->ili_lock);
132 }
133 xfs_iunlock(ip, XFS_ILOCK_SHARED);
134 return error;
135}
136
137STATIC int
138xfs_file_fsync(
139 struct file *file,
140 loff_t start,
141 loff_t end,
142 int datasync)
143{
144 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
145 struct xfs_mount *mp = ip->i_mount;
146 int error, err2;
147 int log_flushed = 0;
148
149 trace_xfs_file_fsync(ip);
150
151 error = file_write_and_wait_range(file, start, end);
152 if (error)
153 return error;
154
155 if (xfs_is_shutdown(mp))
156 return -EIO;
157
158 xfs_iflags_clear(ip, XFS_ITRUNCATED);
159
160 /*
161 * If we have an RT and/or log subvolume we need to make sure to flush
162 * the write cache the device used for file data first. This is to
163 * ensure newly written file data make it to disk before logging the new
164 * inode size in case of an extending write.
165 */
166 if (XFS_IS_REALTIME_INODE(ip))
167 error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
168 else if (mp->m_logdev_targp != mp->m_ddev_targp)
169 error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
170
171 /*
172 * Any inode that has dirty modifications in the log is pinned. The
173 * racy check here for a pinned inode will not catch modifications
174 * that happen concurrently to the fsync call, but fsync semantics
175 * only require to sync previously completed I/O.
176 */
177 if (xfs_ipincount(ip)) {
178 err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
179 if (err2 && !error)
180 error = err2;
181 }
182
183 /*
184 * If we only have a single device, and the log force about was
185 * a no-op we might have to flush the data device cache here.
186 * This can only happen for fdatasync/O_DSYNC if we were overwriting
187 * an already allocated file and thus do not have any metadata to
188 * commit.
189 */
190 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
191 mp->m_logdev_targp == mp->m_ddev_targp) {
192 err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
193 if (err2 && !error)
194 error = err2;
195 }
196
197 return error;
198}
199
200static int
201xfs_ilock_iocb(
202 struct kiocb *iocb,
203 unsigned int lock_mode)
204{
205 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
206
207 if (iocb->ki_flags & IOCB_NOWAIT) {
208 if (!xfs_ilock_nowait(ip, lock_mode))
209 return -EAGAIN;
210 } else {
211 xfs_ilock(ip, lock_mode);
212 }
213
214 return 0;
215}
216
217STATIC ssize_t
218xfs_file_dio_read(
219 struct kiocb *iocb,
220 struct iov_iter *to)
221{
222 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
223 ssize_t ret;
224
225 trace_xfs_file_direct_read(iocb, to);
226
227 if (!iov_iter_count(to))
228 return 0; /* skip atime */
229
230 file_accessed(iocb->ki_filp);
231
232 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
233 if (ret)
234 return ret;
235 ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
236 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
237
238 return ret;
239}
240
241static noinline ssize_t
242xfs_file_dax_read(
243 struct kiocb *iocb,
244 struct iov_iter *to)
245{
246 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
247 ssize_t ret = 0;
248
249 trace_xfs_file_dax_read(iocb, to);
250
251 if (!iov_iter_count(to))
252 return 0; /* skip atime */
253
254 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
255 if (ret)
256 return ret;
257 ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
258 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
259
260 file_accessed(iocb->ki_filp);
261 return ret;
262}
263
264STATIC ssize_t
265xfs_file_buffered_read(
266 struct kiocb *iocb,
267 struct iov_iter *to)
268{
269 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
270 ssize_t ret;
271
272 trace_xfs_file_buffered_read(iocb, to);
273
274 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
275 if (ret)
276 return ret;
277 ret = generic_file_read_iter(iocb, to);
278 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
279
280 return ret;
281}
282
283STATIC ssize_t
284xfs_file_read_iter(
285 struct kiocb *iocb,
286 struct iov_iter *to)
287{
288 struct inode *inode = file_inode(iocb->ki_filp);
289 struct xfs_mount *mp = XFS_I(inode)->i_mount;
290 ssize_t ret = 0;
291
292 XFS_STATS_INC(mp, xs_read_calls);
293
294 if (xfs_is_shutdown(mp))
295 return -EIO;
296
297 if (IS_DAX(inode))
298 ret = xfs_file_dax_read(iocb, to);
299 else if (iocb->ki_flags & IOCB_DIRECT)
300 ret = xfs_file_dio_read(iocb, to);
301 else
302 ret = xfs_file_buffered_read(iocb, to);
303
304 if (ret > 0)
305 XFS_STATS_ADD(mp, xs_read_bytes, ret);
306 return ret;
307}
308
309/*
310 * Common pre-write limit and setup checks.
311 *
312 * Called with the iolocked held either shared and exclusive according to
313 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
314 * if called for a direct write beyond i_size.
315 */
316STATIC ssize_t
317xfs_file_write_checks(
318 struct kiocb *iocb,
319 struct iov_iter *from,
320 unsigned int *iolock)
321{
322 struct file *file = iocb->ki_filp;
323 struct inode *inode = file->f_mapping->host;
324 struct xfs_inode *ip = XFS_I(inode);
325 ssize_t error = 0;
326 size_t count = iov_iter_count(from);
327 bool drained_dio = false;
328 loff_t isize;
329
330restart:
331 error = generic_write_checks(iocb, from);
332 if (error <= 0)
333 return error;
334
335 if (iocb->ki_flags & IOCB_NOWAIT) {
336 error = break_layout(inode, false);
337 if (error == -EWOULDBLOCK)
338 error = -EAGAIN;
339 } else {
340 error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
341 }
342
343 if (error)
344 return error;
345
346 /*
347 * For changing security info in file_remove_privs() we need i_rwsem
348 * exclusively.
349 */
350 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
351 xfs_iunlock(ip, *iolock);
352 *iolock = XFS_IOLOCK_EXCL;
353 error = xfs_ilock_iocb(iocb, *iolock);
354 if (error) {
355 *iolock = 0;
356 return error;
357 }
358 goto restart;
359 }
360
361 /*
362 * If the offset is beyond the size of the file, we need to zero any
363 * blocks that fall between the existing EOF and the start of this
364 * write. If zeroing is needed and we are currently holding the iolock
365 * shared, we need to update it to exclusive which implies having to
366 * redo all checks before.
367 *
368 * We need to serialise against EOF updates that occur in IO completions
369 * here. We want to make sure that nobody is changing the size while we
370 * do this check until we have placed an IO barrier (i.e. hold the
371 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. The
372 * spinlock effectively forms a memory barrier once we have the
373 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
374 * hence be able to correctly determine if we need to run zeroing.
375 *
376 * We can do an unlocked check here safely as IO completion can only
377 * extend EOF. Truncate is locked out at this point, so the EOF can
378 * not move backwards, only forwards. Hence we only need to take the
379 * slow path and spin locks when we are at or beyond the current EOF.
380 */
381 if (iocb->ki_pos <= i_size_read(inode))
382 goto out;
383
384 spin_lock(&ip->i_flags_lock);
385 isize = i_size_read(inode);
386 if (iocb->ki_pos > isize) {
387 spin_unlock(&ip->i_flags_lock);
388
389 if (iocb->ki_flags & IOCB_NOWAIT)
390 return -EAGAIN;
391
392 if (!drained_dio) {
393 if (*iolock == XFS_IOLOCK_SHARED) {
394 xfs_iunlock(ip, *iolock);
395 *iolock = XFS_IOLOCK_EXCL;
396 xfs_ilock(ip, *iolock);
397 iov_iter_reexpand(from, count);
398 }
399 /*
400 * We now have an IO submission barrier in place, but
401 * AIO can do EOF updates during IO completion and hence
402 * we now need to wait for all of them to drain. Non-AIO
403 * DIO will have drained before we are given the
404 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
405 * no-op.
406 */
407 inode_dio_wait(inode);
408 drained_dio = true;
409 goto restart;
410 }
411
412 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
413 error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
414 if (error)
415 return error;
416 } else
417 spin_unlock(&ip->i_flags_lock);
418
419out:
420 return kiocb_modified(iocb);
421}
422
423static int
424xfs_dio_write_end_io(
425 struct kiocb *iocb,
426 ssize_t size,
427 int error,
428 unsigned flags)
429{
430 struct inode *inode = file_inode(iocb->ki_filp);
431 struct xfs_inode *ip = XFS_I(inode);
432 loff_t offset = iocb->ki_pos;
433 unsigned int nofs_flag;
434
435 trace_xfs_end_io_direct_write(ip, offset, size);
436
437 if (xfs_is_shutdown(ip->i_mount))
438 return -EIO;
439
440 if (error)
441 return error;
442 if (!size)
443 return 0;
444
445 /*
446 * Capture amount written on completion as we can't reliably account
447 * for it on submission.
448 */
449 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
450
451 /*
452 * We can allocate memory here while doing writeback on behalf of
453 * memory reclaim. To avoid memory allocation deadlocks set the
454 * task-wide nofs context for the following operations.
455 */
456 nofs_flag = memalloc_nofs_save();
457
458 if (flags & IOMAP_DIO_COW) {
459 error = xfs_reflink_end_cow(ip, offset, size);
460 if (error)
461 goto out;
462 }
463
464 /*
465 * Unwritten conversion updates the in-core isize after extent
466 * conversion but before updating the on-disk size. Updating isize any
467 * earlier allows a racing dio read to find unwritten extents before
468 * they are converted.
469 */
470 if (flags & IOMAP_DIO_UNWRITTEN) {
471 error = xfs_iomap_write_unwritten(ip, offset, size, true);
472 goto out;
473 }
474
475 /*
476 * We need to update the in-core inode size here so that we don't end up
477 * with the on-disk inode size being outside the in-core inode size. We
478 * have no other method of updating EOF for AIO, so always do it here
479 * if necessary.
480 *
481 * We need to lock the test/set EOF update as we can be racing with
482 * other IO completions here to update the EOF. Failing to serialise
483 * here can result in EOF moving backwards and Bad Things Happen when
484 * that occurs.
485 *
486 * As IO completion only ever extends EOF, we can do an unlocked check
487 * here to avoid taking the spinlock. If we land within the current EOF,
488 * then we do not need to do an extending update at all, and we don't
489 * need to take the lock to check this. If we race with an update moving
490 * EOF, then we'll either still be beyond EOF and need to take the lock,
491 * or we'll be within EOF and we don't need to take it at all.
492 */
493 if (offset + size <= i_size_read(inode))
494 goto out;
495
496 spin_lock(&ip->i_flags_lock);
497 if (offset + size > i_size_read(inode)) {
498 i_size_write(inode, offset + size);
499 spin_unlock(&ip->i_flags_lock);
500 error = xfs_setfilesize(ip, offset, size);
501 } else {
502 spin_unlock(&ip->i_flags_lock);
503 }
504
505out:
506 memalloc_nofs_restore(nofs_flag);
507 return error;
508}
509
510static const struct iomap_dio_ops xfs_dio_write_ops = {
511 .end_io = xfs_dio_write_end_io,
512};
513
514/*
515 * Handle block aligned direct I/O writes
516 */
517static noinline ssize_t
518xfs_file_dio_write_aligned(
519 struct xfs_inode *ip,
520 struct kiocb *iocb,
521 struct iov_iter *from)
522{
523 unsigned int iolock = XFS_IOLOCK_SHARED;
524 ssize_t ret;
525
526 ret = xfs_ilock_iocb(iocb, iolock);
527 if (ret)
528 return ret;
529 ret = xfs_file_write_checks(iocb, from, &iolock);
530 if (ret)
531 goto out_unlock;
532
533 /*
534 * We don't need to hold the IOLOCK exclusively across the IO, so demote
535 * the iolock back to shared if we had to take the exclusive lock in
536 * xfs_file_write_checks() for other reasons.
537 */
538 if (iolock == XFS_IOLOCK_EXCL) {
539 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
540 iolock = XFS_IOLOCK_SHARED;
541 }
542 trace_xfs_file_direct_write(iocb, from);
543 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
544 &xfs_dio_write_ops, 0, NULL, 0);
545out_unlock:
546 if (iolock)
547 xfs_iunlock(ip, iolock);
548 return ret;
549}
550
551/*
552 * Handle block unaligned direct I/O writes
553 *
554 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
555 * them to be done in parallel with reads and other direct I/O writes. However,
556 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
557 * to do sub-block zeroing and that requires serialisation against other direct
558 * I/O to the same block. In this case we need to serialise the submission of
559 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
560 * In the case where sub-block zeroing is not required, we can do concurrent
561 * sub-block dios to the same block successfully.
562 *
563 * Optimistically submit the I/O using the shared lock first, but use the
564 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
565 * if block allocation or partial block zeroing would be required. In that case
566 * we try again with the exclusive lock.
567 */
568static noinline ssize_t
569xfs_file_dio_write_unaligned(
570 struct xfs_inode *ip,
571 struct kiocb *iocb,
572 struct iov_iter *from)
573{
574 size_t isize = i_size_read(VFS_I(ip));
575 size_t count = iov_iter_count(from);
576 unsigned int iolock = XFS_IOLOCK_SHARED;
577 unsigned int flags = IOMAP_DIO_OVERWRITE_ONLY;
578 ssize_t ret;
579
580 /*
581 * Extending writes need exclusivity because of the sub-block zeroing
582 * that the DIO code always does for partial tail blocks beyond EOF, so
583 * don't even bother trying the fast path in this case.
584 */
585 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
586 if (iocb->ki_flags & IOCB_NOWAIT)
587 return -EAGAIN;
588retry_exclusive:
589 iolock = XFS_IOLOCK_EXCL;
590 flags = IOMAP_DIO_FORCE_WAIT;
591 }
592
593 ret = xfs_ilock_iocb(iocb, iolock);
594 if (ret)
595 return ret;
596
597 /*
598 * We can't properly handle unaligned direct I/O to reflink files yet,
599 * as we can't unshare a partial block.
600 */
601 if (xfs_is_cow_inode(ip)) {
602 trace_xfs_reflink_bounce_dio_write(iocb, from);
603 ret = -ENOTBLK;
604 goto out_unlock;
605 }
606
607 ret = xfs_file_write_checks(iocb, from, &iolock);
608 if (ret)
609 goto out_unlock;
610
611 /*
612 * If we are doing exclusive unaligned I/O, this must be the only I/O
613 * in-flight. Otherwise we risk data corruption due to unwritten extent
614 * conversions from the AIO end_io handler. Wait for all other I/O to
615 * drain first.
616 */
617 if (flags & IOMAP_DIO_FORCE_WAIT)
618 inode_dio_wait(VFS_I(ip));
619
620 trace_xfs_file_direct_write(iocb, from);
621 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
622 &xfs_dio_write_ops, flags, NULL, 0);
623
624 /*
625 * Retry unaligned I/O with exclusive blocking semantics if the DIO
626 * layer rejected it for mapping or locking reasons. If we are doing
627 * nonblocking user I/O, propagate the error.
628 */
629 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
630 ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
631 xfs_iunlock(ip, iolock);
632 goto retry_exclusive;
633 }
634
635out_unlock:
636 if (iolock)
637 xfs_iunlock(ip, iolock);
638 return ret;
639}
640
641static ssize_t
642xfs_file_dio_write(
643 struct kiocb *iocb,
644 struct iov_iter *from)
645{
646 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
647 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
648 size_t count = iov_iter_count(from);
649
650 /* direct I/O must be aligned to device logical sector size */
651 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
652 return -EINVAL;
653 if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
654 return xfs_file_dio_write_unaligned(ip, iocb, from);
655 return xfs_file_dio_write_aligned(ip, iocb, from);
656}
657
658static noinline ssize_t
659xfs_file_dax_write(
660 struct kiocb *iocb,
661 struct iov_iter *from)
662{
663 struct inode *inode = iocb->ki_filp->f_mapping->host;
664 struct xfs_inode *ip = XFS_I(inode);
665 unsigned int iolock = XFS_IOLOCK_EXCL;
666 ssize_t ret, error = 0;
667 loff_t pos;
668
669 ret = xfs_ilock_iocb(iocb, iolock);
670 if (ret)
671 return ret;
672 ret = xfs_file_write_checks(iocb, from, &iolock);
673 if (ret)
674 goto out;
675
676 pos = iocb->ki_pos;
677
678 trace_xfs_file_dax_write(iocb, from);
679 ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
680 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
681 i_size_write(inode, iocb->ki_pos);
682 error = xfs_setfilesize(ip, pos, ret);
683 }
684out:
685 if (iolock)
686 xfs_iunlock(ip, iolock);
687 if (error)
688 return error;
689
690 if (ret > 0) {
691 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
692
693 /* Handle various SYNC-type writes */
694 ret = generic_write_sync(iocb, ret);
695 }
696 return ret;
697}
698
699STATIC ssize_t
700xfs_file_buffered_write(
701 struct kiocb *iocb,
702 struct iov_iter *from)
703{
704 struct inode *inode = iocb->ki_filp->f_mapping->host;
705 struct xfs_inode *ip = XFS_I(inode);
706 ssize_t ret;
707 bool cleared_space = false;
708 unsigned int iolock;
709
710write_retry:
711 iolock = XFS_IOLOCK_EXCL;
712 ret = xfs_ilock_iocb(iocb, iolock);
713 if (ret)
714 return ret;
715
716 ret = xfs_file_write_checks(iocb, from, &iolock);
717 if (ret)
718 goto out;
719
720 /* We can write back this queue in page reclaim */
721 current->backing_dev_info = inode_to_bdi(inode);
722
723 trace_xfs_file_buffered_write(iocb, from);
724 ret = iomap_file_buffered_write(iocb, from,
725 &xfs_buffered_write_iomap_ops);
726 if (likely(ret >= 0))
727 iocb->ki_pos += ret;
728
729 /*
730 * If we hit a space limit, try to free up some lingering preallocated
731 * space before returning an error. In the case of ENOSPC, first try to
732 * write back all dirty inodes to free up some of the excess reserved
733 * metadata space. This reduces the chances that the eofblocks scan
734 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
735 * also behaves as a filter to prevent too many eofblocks scans from
736 * running at the same time. Use a synchronous scan to increase the
737 * effectiveness of the scan.
738 */
739 if (ret == -EDQUOT && !cleared_space) {
740 xfs_iunlock(ip, iolock);
741 xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
742 cleared_space = true;
743 goto write_retry;
744 } else if (ret == -ENOSPC && !cleared_space) {
745 struct xfs_icwalk icw = {0};
746
747 cleared_space = true;
748 xfs_flush_inodes(ip->i_mount);
749
750 xfs_iunlock(ip, iolock);
751 icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
752 xfs_blockgc_free_space(ip->i_mount, &icw);
753 goto write_retry;
754 }
755
756 current->backing_dev_info = NULL;
757out:
758 if (iolock)
759 xfs_iunlock(ip, iolock);
760
761 if (ret > 0) {
762 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
763 /* Handle various SYNC-type writes */
764 ret = generic_write_sync(iocb, ret);
765 }
766 return ret;
767}
768
769STATIC ssize_t
770xfs_file_write_iter(
771 struct kiocb *iocb,
772 struct iov_iter *from)
773{
774 struct inode *inode = iocb->ki_filp->f_mapping->host;
775 struct xfs_inode *ip = XFS_I(inode);
776 ssize_t ret;
777 size_t ocount = iov_iter_count(from);
778
779 XFS_STATS_INC(ip->i_mount, xs_write_calls);
780
781 if (ocount == 0)
782 return 0;
783
784 if (xfs_is_shutdown(ip->i_mount))
785 return -EIO;
786
787 if (IS_DAX(inode))
788 return xfs_file_dax_write(iocb, from);
789
790 if (iocb->ki_flags & IOCB_DIRECT) {
791 /*
792 * Allow a directio write to fall back to a buffered
793 * write *only* in the case that we're doing a reflink
794 * CoW. In all other directio scenarios we do not
795 * allow an operation to fall back to buffered mode.
796 */
797 ret = xfs_file_dio_write(iocb, from);
798 if (ret != -ENOTBLK)
799 return ret;
800 }
801
802 return xfs_file_buffered_write(iocb, from);
803}
804
805static void
806xfs_wait_dax_page(
807 struct inode *inode)
808{
809 struct xfs_inode *ip = XFS_I(inode);
810
811 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
812 schedule();
813 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
814}
815
816int
817xfs_break_dax_layouts(
818 struct inode *inode,
819 bool *retry)
820{
821 struct page *page;
822
823 ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
824
825 page = dax_layout_busy_page(inode->i_mapping);
826 if (!page)
827 return 0;
828
829 *retry = true;
830 return ___wait_var_event(&page->_refcount,
831 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
832 0, 0, xfs_wait_dax_page(inode));
833}
834
835int
836xfs_break_layouts(
837 struct inode *inode,
838 uint *iolock,
839 enum layout_break_reason reason)
840{
841 bool retry;
842 int error;
843
844 ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
845
846 do {
847 retry = false;
848 switch (reason) {
849 case BREAK_UNMAP:
850 error = xfs_break_dax_layouts(inode, &retry);
851 if (error || retry)
852 break;
853 fallthrough;
854 case BREAK_WRITE:
855 error = xfs_break_leased_layouts(inode, iolock, &retry);
856 break;
857 default:
858 WARN_ON_ONCE(1);
859 error = -EINVAL;
860 }
861 } while (error == 0 && retry);
862
863 return error;
864}
865
866/* Does this file, inode, or mount want synchronous writes? */
867static inline bool xfs_file_sync_writes(struct file *filp)
868{
869 struct xfs_inode *ip = XFS_I(file_inode(filp));
870
871 if (xfs_has_wsync(ip->i_mount))
872 return true;
873 if (filp->f_flags & (__O_SYNC | O_DSYNC))
874 return true;
875 if (IS_SYNC(file_inode(filp)))
876 return true;
877
878 return false;
879}
880
881#define XFS_FALLOC_FL_SUPPORTED \
882 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
883 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
884 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
885
886STATIC long
887xfs_file_fallocate(
888 struct file *file,
889 int mode,
890 loff_t offset,
891 loff_t len)
892{
893 struct inode *inode = file_inode(file);
894 struct xfs_inode *ip = XFS_I(inode);
895 long error;
896 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
897 loff_t new_size = 0;
898 bool do_file_insert = false;
899
900 if (!S_ISREG(inode->i_mode))
901 return -EINVAL;
902 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
903 return -EOPNOTSUPP;
904
905 xfs_ilock(ip, iolock);
906 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
907 if (error)
908 goto out_unlock;
909
910 /*
911 * Must wait for all AIO to complete before we continue as AIO can
912 * change the file size on completion without holding any locks we
913 * currently hold. We must do this first because AIO can update both
914 * the on disk and in memory inode sizes, and the operations that follow
915 * require the in-memory size to be fully up-to-date.
916 */
917 inode_dio_wait(inode);
918
919 /*
920 * Now AIO and DIO has drained we flush and (if necessary) invalidate
921 * the cached range over the first operation we are about to run.
922 *
923 * We care about zero and collapse here because they both run a hole
924 * punch over the range first. Because that can zero data, and the range
925 * of invalidation for the shift operations is much larger, we still do
926 * the required flush for collapse in xfs_prepare_shift().
927 *
928 * Insert has the same range requirements as collapse, and we extend the
929 * file first which can zero data. Hence insert has the same
930 * flush/invalidate requirements as collapse and so they are both
931 * handled at the right time by xfs_prepare_shift().
932 */
933 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
934 FALLOC_FL_COLLAPSE_RANGE)) {
935 error = xfs_flush_unmap_range(ip, offset, len);
936 if (error)
937 goto out_unlock;
938 }
939
940 error = file_modified(file);
941 if (error)
942 goto out_unlock;
943
944 if (mode & FALLOC_FL_PUNCH_HOLE) {
945 error = xfs_free_file_space(ip, offset, len);
946 if (error)
947 goto out_unlock;
948 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
949 if (!xfs_is_falloc_aligned(ip, offset, len)) {
950 error = -EINVAL;
951 goto out_unlock;
952 }
953
954 /*
955 * There is no need to overlap collapse range with EOF,
956 * in which case it is effectively a truncate operation
957 */
958 if (offset + len >= i_size_read(inode)) {
959 error = -EINVAL;
960 goto out_unlock;
961 }
962
963 new_size = i_size_read(inode) - len;
964
965 error = xfs_collapse_file_space(ip, offset, len);
966 if (error)
967 goto out_unlock;
968 } else if (mode & FALLOC_FL_INSERT_RANGE) {
969 loff_t isize = i_size_read(inode);
970
971 if (!xfs_is_falloc_aligned(ip, offset, len)) {
972 error = -EINVAL;
973 goto out_unlock;
974 }
975
976 /*
977 * New inode size must not exceed ->s_maxbytes, accounting for
978 * possible signed overflow.
979 */
980 if (inode->i_sb->s_maxbytes - isize < len) {
981 error = -EFBIG;
982 goto out_unlock;
983 }
984 new_size = isize + len;
985
986 /* Offset should be less than i_size */
987 if (offset >= isize) {
988 error = -EINVAL;
989 goto out_unlock;
990 }
991 do_file_insert = true;
992 } else {
993 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
994 offset + len > i_size_read(inode)) {
995 new_size = offset + len;
996 error = inode_newsize_ok(inode, new_size);
997 if (error)
998 goto out_unlock;
999 }
1000
1001 if (mode & FALLOC_FL_ZERO_RANGE) {
1002 /*
1003 * Punch a hole and prealloc the range. We use a hole
1004 * punch rather than unwritten extent conversion for two
1005 * reasons:
1006 *
1007 * 1.) Hole punch handles partial block zeroing for us.
1008 * 2.) If prealloc returns ENOSPC, the file range is
1009 * still zero-valued by virtue of the hole punch.
1010 */
1011 unsigned int blksize = i_blocksize(inode);
1012
1013 trace_xfs_zero_file_space(ip);
1014
1015 error = xfs_free_file_space(ip, offset, len);
1016 if (error)
1017 goto out_unlock;
1018
1019 len = round_up(offset + len, blksize) -
1020 round_down(offset, blksize);
1021 offset = round_down(offset, blksize);
1022 } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1023 error = xfs_reflink_unshare(ip, offset, len);
1024 if (error)
1025 goto out_unlock;
1026 } else {
1027 /*
1028 * If always_cow mode we can't use preallocations and
1029 * thus should not create them.
1030 */
1031 if (xfs_is_always_cow_inode(ip)) {
1032 error = -EOPNOTSUPP;
1033 goto out_unlock;
1034 }
1035 }
1036
1037 if (!xfs_is_always_cow_inode(ip)) {
1038 error = xfs_alloc_file_space(ip, offset, len);
1039 if (error)
1040 goto out_unlock;
1041 }
1042 }
1043
1044 /* Change file size if needed */
1045 if (new_size) {
1046 struct iattr iattr;
1047
1048 iattr.ia_valid = ATTR_SIZE;
1049 iattr.ia_size = new_size;
1050 error = xfs_vn_setattr_size(file_mnt_user_ns(file),
1051 file_dentry(file), &iattr);
1052 if (error)
1053 goto out_unlock;
1054 }
1055
1056 /*
1057 * Perform hole insertion now that the file size has been
1058 * updated so that if we crash during the operation we don't
1059 * leave shifted extents past EOF and hence losing access to
1060 * the data that is contained within them.
1061 */
1062 if (do_file_insert) {
1063 error = xfs_insert_file_space(ip, offset, len);
1064 if (error)
1065 goto out_unlock;
1066 }
1067
1068 if (xfs_file_sync_writes(file))
1069 error = xfs_log_force_inode(ip);
1070
1071out_unlock:
1072 xfs_iunlock(ip, iolock);
1073 return error;
1074}
1075
1076STATIC int
1077xfs_file_fadvise(
1078 struct file *file,
1079 loff_t start,
1080 loff_t end,
1081 int advice)
1082{
1083 struct xfs_inode *ip = XFS_I(file_inode(file));
1084 int ret;
1085 int lockflags = 0;
1086
1087 /*
1088 * Operations creating pages in page cache need protection from hole
1089 * punching and similar ops
1090 */
1091 if (advice == POSIX_FADV_WILLNEED) {
1092 lockflags = XFS_IOLOCK_SHARED;
1093 xfs_ilock(ip, lockflags);
1094 }
1095 ret = generic_fadvise(file, start, end, advice);
1096 if (lockflags)
1097 xfs_iunlock(ip, lockflags);
1098 return ret;
1099}
1100
1101STATIC loff_t
1102xfs_file_remap_range(
1103 struct file *file_in,
1104 loff_t pos_in,
1105 struct file *file_out,
1106 loff_t pos_out,
1107 loff_t len,
1108 unsigned int remap_flags)
1109{
1110 struct inode *inode_in = file_inode(file_in);
1111 struct xfs_inode *src = XFS_I(inode_in);
1112 struct inode *inode_out = file_inode(file_out);
1113 struct xfs_inode *dest = XFS_I(inode_out);
1114 struct xfs_mount *mp = src->i_mount;
1115 loff_t remapped = 0;
1116 xfs_extlen_t cowextsize;
1117 int ret;
1118
1119 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1120 return -EINVAL;
1121
1122 if (!xfs_has_reflink(mp))
1123 return -EOPNOTSUPP;
1124
1125 if (xfs_is_shutdown(mp))
1126 return -EIO;
1127
1128 /* Prepare and then clone file data. */
1129 ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1130 &len, remap_flags);
1131 if (ret || len == 0)
1132 return ret;
1133
1134 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1135
1136 ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1137 &remapped);
1138 if (ret)
1139 goto out_unlock;
1140
1141 /*
1142 * Carry the cowextsize hint from src to dest if we're sharing the
1143 * entire source file to the entire destination file, the source file
1144 * has a cowextsize hint, and the destination file does not.
1145 */
1146 cowextsize = 0;
1147 if (pos_in == 0 && len == i_size_read(inode_in) &&
1148 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1149 pos_out == 0 && len >= i_size_read(inode_out) &&
1150 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1151 cowextsize = src->i_cowextsize;
1152
1153 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1154 remap_flags);
1155 if (ret)
1156 goto out_unlock;
1157
1158 if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1159 xfs_log_force_inode(dest);
1160out_unlock:
1161 xfs_iunlock2_io_mmap(src, dest);
1162 if (ret)
1163 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1164 return remapped > 0 ? remapped : ret;
1165}
1166
1167STATIC int
1168xfs_file_open(
1169 struct inode *inode,
1170 struct file *file)
1171{
1172 if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1173 return -EIO;
1174 file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC;
1175 return generic_file_open(inode, file);
1176}
1177
1178STATIC int
1179xfs_dir_open(
1180 struct inode *inode,
1181 struct file *file)
1182{
1183 struct xfs_inode *ip = XFS_I(inode);
1184 unsigned int mode;
1185 int error;
1186
1187 error = xfs_file_open(inode, file);
1188 if (error)
1189 return error;
1190
1191 /*
1192 * If there are any blocks, read-ahead block 0 as we're almost
1193 * certain to have the next operation be a read there.
1194 */
1195 mode = xfs_ilock_data_map_shared(ip);
1196 if (ip->i_df.if_nextents > 0)
1197 error = xfs_dir3_data_readahead(ip, 0, 0);
1198 xfs_iunlock(ip, mode);
1199 return error;
1200}
1201
1202STATIC int
1203xfs_file_release(
1204 struct inode *inode,
1205 struct file *filp)
1206{
1207 return xfs_release(XFS_I(inode));
1208}
1209
1210STATIC int
1211xfs_file_readdir(
1212 struct file *file,
1213 struct dir_context *ctx)
1214{
1215 struct inode *inode = file_inode(file);
1216 xfs_inode_t *ip = XFS_I(inode);
1217 size_t bufsize;
1218
1219 /*
1220 * The Linux API doesn't pass down the total size of the buffer
1221 * we read into down to the filesystem. With the filldir concept
1222 * it's not needed for correct information, but the XFS dir2 leaf
1223 * code wants an estimate of the buffer size to calculate it's
1224 * readahead window and size the buffers used for mapping to
1225 * physical blocks.
1226 *
1227 * Try to give it an estimate that's good enough, maybe at some
1228 * point we can change the ->readdir prototype to include the
1229 * buffer size. For now we use the current glibc buffer size.
1230 */
1231 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1232
1233 return xfs_readdir(NULL, ip, ctx, bufsize);
1234}
1235
1236STATIC loff_t
1237xfs_file_llseek(
1238 struct file *file,
1239 loff_t offset,
1240 int whence)
1241{
1242 struct inode *inode = file->f_mapping->host;
1243
1244 if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1245 return -EIO;
1246
1247 switch (whence) {
1248 default:
1249 return generic_file_llseek(file, offset, whence);
1250 case SEEK_HOLE:
1251 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1252 break;
1253 case SEEK_DATA:
1254 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1255 break;
1256 }
1257
1258 if (offset < 0)
1259 return offset;
1260 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1261}
1262
1263#ifdef CONFIG_FS_DAX
1264static inline vm_fault_t
1265xfs_dax_fault(
1266 struct vm_fault *vmf,
1267 enum page_entry_size pe_size,
1268 bool write_fault,
1269 pfn_t *pfn)
1270{
1271 return dax_iomap_fault(vmf, pe_size, pfn, NULL,
1272 (write_fault && !vmf->cow_page) ?
1273 &xfs_dax_write_iomap_ops :
1274 &xfs_read_iomap_ops);
1275}
1276#else
1277static inline vm_fault_t
1278xfs_dax_fault(
1279 struct vm_fault *vmf,
1280 enum page_entry_size pe_size,
1281 bool write_fault,
1282 pfn_t *pfn)
1283{
1284 ASSERT(0);
1285 return VM_FAULT_SIGBUS;
1286}
1287#endif
1288
1289/*
1290 * Locking for serialisation of IO during page faults. This results in a lock
1291 * ordering of:
1292 *
1293 * mmap_lock (MM)
1294 * sb_start_pagefault(vfs, freeze)
1295 * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1296 * page_lock (MM)
1297 * i_lock (XFS - extent map serialisation)
1298 */
1299static vm_fault_t
1300__xfs_filemap_fault(
1301 struct vm_fault *vmf,
1302 enum page_entry_size pe_size,
1303 bool write_fault)
1304{
1305 struct inode *inode = file_inode(vmf->vma->vm_file);
1306 struct xfs_inode *ip = XFS_I(inode);
1307 vm_fault_t ret;
1308
1309 trace_xfs_filemap_fault(ip, pe_size, write_fault);
1310
1311 if (write_fault) {
1312 sb_start_pagefault(inode->i_sb);
1313 file_update_time(vmf->vma->vm_file);
1314 }
1315
1316 if (IS_DAX(inode)) {
1317 pfn_t pfn;
1318
1319 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1320 ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn);
1321 if (ret & VM_FAULT_NEEDDSYNC)
1322 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1323 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1324 } else {
1325 if (write_fault) {
1326 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1327 ret = iomap_page_mkwrite(vmf,
1328 &xfs_page_mkwrite_iomap_ops);
1329 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1330 } else {
1331 ret = filemap_fault(vmf);
1332 }
1333 }
1334
1335 if (write_fault)
1336 sb_end_pagefault(inode->i_sb);
1337 return ret;
1338}
1339
1340static inline bool
1341xfs_is_write_fault(
1342 struct vm_fault *vmf)
1343{
1344 return (vmf->flags & FAULT_FLAG_WRITE) &&
1345 (vmf->vma->vm_flags & VM_SHARED);
1346}
1347
1348static vm_fault_t
1349xfs_filemap_fault(
1350 struct vm_fault *vmf)
1351{
1352 /* DAX can shortcut the normal fault path on write faults! */
1353 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1354 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1355 xfs_is_write_fault(vmf));
1356}
1357
1358static vm_fault_t
1359xfs_filemap_huge_fault(
1360 struct vm_fault *vmf,
1361 enum page_entry_size pe_size)
1362{
1363 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1364 return VM_FAULT_FALLBACK;
1365
1366 /* DAX can shortcut the normal fault path on write faults! */
1367 return __xfs_filemap_fault(vmf, pe_size,
1368 xfs_is_write_fault(vmf));
1369}
1370
1371static vm_fault_t
1372xfs_filemap_page_mkwrite(
1373 struct vm_fault *vmf)
1374{
1375 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1376}
1377
1378/*
1379 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1380 * on write faults. In reality, it needs to serialise against truncate and
1381 * prepare memory for writing so handle is as standard write fault.
1382 */
1383static vm_fault_t
1384xfs_filemap_pfn_mkwrite(
1385 struct vm_fault *vmf)
1386{
1387
1388 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1389}
1390
1391static vm_fault_t
1392xfs_filemap_map_pages(
1393 struct vm_fault *vmf,
1394 pgoff_t start_pgoff,
1395 pgoff_t end_pgoff)
1396{
1397 struct inode *inode = file_inode(vmf->vma->vm_file);
1398 vm_fault_t ret;
1399
1400 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1401 ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1402 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1403 return ret;
1404}
1405
1406static const struct vm_operations_struct xfs_file_vm_ops = {
1407 .fault = xfs_filemap_fault,
1408 .huge_fault = xfs_filemap_huge_fault,
1409 .map_pages = xfs_filemap_map_pages,
1410 .page_mkwrite = xfs_filemap_page_mkwrite,
1411 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1412};
1413
1414STATIC int
1415xfs_file_mmap(
1416 struct file *file,
1417 struct vm_area_struct *vma)
1418{
1419 struct inode *inode = file_inode(file);
1420 struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
1421
1422 /*
1423 * We don't support synchronous mappings for non-DAX files and
1424 * for DAX files if underneath dax_device is not synchronous.
1425 */
1426 if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1427 return -EOPNOTSUPP;
1428
1429 file_accessed(file);
1430 vma->vm_ops = &xfs_file_vm_ops;
1431 if (IS_DAX(inode))
1432 vma->vm_flags |= VM_HUGEPAGE;
1433 return 0;
1434}
1435
1436const struct file_operations xfs_file_operations = {
1437 .llseek = xfs_file_llseek,
1438 .read_iter = xfs_file_read_iter,
1439 .write_iter = xfs_file_write_iter,
1440 .splice_read = generic_file_splice_read,
1441 .splice_write = iter_file_splice_write,
1442 .iopoll = iocb_bio_iopoll,
1443 .unlocked_ioctl = xfs_file_ioctl,
1444#ifdef CONFIG_COMPAT
1445 .compat_ioctl = xfs_file_compat_ioctl,
1446#endif
1447 .mmap = xfs_file_mmap,
1448 .mmap_supported_flags = MAP_SYNC,
1449 .open = xfs_file_open,
1450 .release = xfs_file_release,
1451 .fsync = xfs_file_fsync,
1452 .get_unmapped_area = thp_get_unmapped_area,
1453 .fallocate = xfs_file_fallocate,
1454 .fadvise = xfs_file_fadvise,
1455 .remap_file_range = xfs_file_remap_range,
1456};
1457
1458const struct file_operations xfs_dir_file_operations = {
1459 .open = xfs_dir_open,
1460 .read = generic_read_dir,
1461 .iterate_shared = xfs_file_readdir,
1462 .llseek = generic_file_llseek,
1463 .unlocked_ioctl = xfs_file_ioctl,
1464#ifdef CONFIG_COMPAT
1465 .compat_ioctl = xfs_file_compat_ioctl,
1466#endif
1467 .fsync = xfs_dir_fsync,
1468};