Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_trans.h"
26#include "xfs_mount.h"
27#include "xfs_bmap_btree.h"
28#include "xfs_alloc.h"
29#include "xfs_dinode.h"
30#include "xfs_inode.h"
31#include "xfs_inode_item.h"
32#include "xfs_bmap.h"
33#include "xfs_error.h"
34#include "xfs_vnodeops.h"
35#include "xfs_da_btree.h"
36#include "xfs_ioctl.h"
37#include "xfs_trace.h"
38
39#include <linux/dcache.h>
40#include <linux/falloc.h>
41
42static const struct vm_operations_struct xfs_file_vm_ops;
43
44/*
45 * Locking primitives for read and write IO paths to ensure we consistently use
46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
47 */
48static inline void
49xfs_rw_ilock(
50 struct xfs_inode *ip,
51 int type)
52{
53 if (type & XFS_IOLOCK_EXCL)
54 mutex_lock(&VFS_I(ip)->i_mutex);
55 xfs_ilock(ip, type);
56}
57
58static inline void
59xfs_rw_iunlock(
60 struct xfs_inode *ip,
61 int type)
62{
63 xfs_iunlock(ip, type);
64 if (type & XFS_IOLOCK_EXCL)
65 mutex_unlock(&VFS_I(ip)->i_mutex);
66}
67
68static inline void
69xfs_rw_ilock_demote(
70 struct xfs_inode *ip,
71 int type)
72{
73 xfs_ilock_demote(ip, type);
74 if (type & XFS_IOLOCK_EXCL)
75 mutex_unlock(&VFS_I(ip)->i_mutex);
76}
77
78/*
79 * xfs_iozero
80 *
81 * xfs_iozero clears the specified range of buffer supplied,
82 * and marks all the affected blocks as valid and modified. If
83 * an affected block is not allocated, it will be allocated. If
84 * an affected block is not completely overwritten, and is not
85 * valid before the operation, it will be read from disk before
86 * being partially zeroed.
87 */
88STATIC int
89xfs_iozero(
90 struct xfs_inode *ip, /* inode */
91 loff_t pos, /* offset in file */
92 size_t count) /* size of data to zero */
93{
94 struct page *page;
95 struct address_space *mapping;
96 int status;
97
98 mapping = VFS_I(ip)->i_mapping;
99 do {
100 unsigned offset, bytes;
101 void *fsdata;
102
103 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
104 bytes = PAGE_CACHE_SIZE - offset;
105 if (bytes > count)
106 bytes = count;
107
108 status = pagecache_write_begin(NULL, mapping, pos, bytes,
109 AOP_FLAG_UNINTERRUPTIBLE,
110 &page, &fsdata);
111 if (status)
112 break;
113
114 zero_user(page, offset, bytes);
115
116 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
117 page, fsdata);
118 WARN_ON(status <= 0); /* can't return less than zero! */
119 pos += bytes;
120 count -= bytes;
121 status = 0;
122 } while (count);
123
124 return (-status);
125}
126
127STATIC int
128xfs_file_fsync(
129 struct file *file,
130 loff_t start,
131 loff_t end,
132 int datasync)
133{
134 struct inode *inode = file->f_mapping->host;
135 struct xfs_inode *ip = XFS_I(inode);
136 struct xfs_mount *mp = ip->i_mount;
137 struct xfs_trans *tp;
138 int error = 0;
139 int log_flushed = 0;
140
141 trace_xfs_file_fsync(ip);
142
143 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
144 if (error)
145 return error;
146
147 if (XFS_FORCED_SHUTDOWN(mp))
148 return -XFS_ERROR(EIO);
149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151
152 xfs_ilock(ip, XFS_IOLOCK_SHARED);
153 xfs_ioend_wait(ip);
154 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
155
156 if (mp->m_flags & XFS_MOUNT_BARRIER) {
157 /*
158 * If we have an RT and/or log subvolume we need to make sure
159 * to flush the write cache the device used for file data
160 * first. This is to ensure newly written file data make
161 * it to disk before logging the new inode size in case of
162 * an extending write.
163 */
164 if (XFS_IS_REALTIME_INODE(ip))
165 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
166 else if (mp->m_logdev_targp != mp->m_ddev_targp)
167 xfs_blkdev_issue_flush(mp->m_ddev_targp);
168 }
169
170 /*
171 * We always need to make sure that the required inode state is safe on
172 * disk. The inode might be clean but we still might need to force the
173 * log because of committed transactions that haven't hit the disk yet.
174 * Likewise, there could be unflushed non-transactional changes to the
175 * inode core that have to go to disk and this requires us to issue
176 * a synchronous transaction to capture these changes correctly.
177 *
178 * This code relies on the assumption that if the i_update_core field
179 * of the inode is clear and the inode is unpinned then it is clean
180 * and no action is required.
181 */
182 xfs_ilock(ip, XFS_ILOCK_SHARED);
183
184 /*
185 * First check if the VFS inode is marked dirty. All the dirtying
186 * of non-transactional updates no goes through mark_inode_dirty*,
187 * which allows us to distinguish beteeen pure timestamp updates
188 * and i_size updates which need to be caught for fdatasync.
189 * After that also theck for the dirty state in the XFS inode, which
190 * might gets cleared when the inode gets written out via the AIL
191 * or xfs_iflush_cluster.
192 */
193 if (((inode->i_state & I_DIRTY_DATASYNC) ||
194 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
195 ip->i_update_core) {
196 /*
197 * Kick off a transaction to log the inode core to get the
198 * updates. The sync transaction will also force the log.
199 */
200 xfs_iunlock(ip, XFS_ILOCK_SHARED);
201 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
202 error = xfs_trans_reserve(tp, 0,
203 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
204 if (error) {
205 xfs_trans_cancel(tp, 0);
206 return -error;
207 }
208 xfs_ilock(ip, XFS_ILOCK_EXCL);
209
210 /*
211 * Note - it's possible that we might have pushed ourselves out
212 * of the way during trans_reserve which would flush the inode.
213 * But there's no guarantee that the inode buffer has actually
214 * gone out yet (it's delwri). Plus the buffer could be pinned
215 * anyway if it's part of an inode in another recent
216 * transaction. So we play it safe and fire off the
217 * transaction anyway.
218 */
219 xfs_trans_ijoin(tp, ip);
220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
221 xfs_trans_set_sync(tp);
222 error = _xfs_trans_commit(tp, 0, &log_flushed);
223
224 xfs_iunlock(ip, XFS_ILOCK_EXCL);
225 } else {
226 /*
227 * Timestamps/size haven't changed since last inode flush or
228 * inode transaction commit. That means either nothing got
229 * written or a transaction committed which caught the updates.
230 * If the latter happened and the transaction hasn't hit the
231 * disk yet, the inode will be still be pinned. If it is,
232 * force the log.
233 */
234 if (xfs_ipincount(ip)) {
235 error = _xfs_log_force_lsn(mp,
236 ip->i_itemp->ili_last_lsn,
237 XFS_LOG_SYNC, &log_flushed);
238 }
239 xfs_iunlock(ip, XFS_ILOCK_SHARED);
240 }
241
242 /*
243 * If we only have a single device, and the log force about was
244 * a no-op we might have to flush the data device cache here.
245 * This can only happen for fdatasync/O_DSYNC if we were overwriting
246 * an already allocated file and thus do not have any metadata to
247 * commit.
248 */
249 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
250 mp->m_logdev_targp == mp->m_ddev_targp &&
251 !XFS_IS_REALTIME_INODE(ip) &&
252 !log_flushed)
253 xfs_blkdev_issue_flush(mp->m_ddev_targp);
254
255 return -error;
256}
257
258STATIC ssize_t
259xfs_file_aio_read(
260 struct kiocb *iocb,
261 const struct iovec *iovp,
262 unsigned long nr_segs,
263 loff_t pos)
264{
265 struct file *file = iocb->ki_filp;
266 struct inode *inode = file->f_mapping->host;
267 struct xfs_inode *ip = XFS_I(inode);
268 struct xfs_mount *mp = ip->i_mount;
269 size_t size = 0;
270 ssize_t ret = 0;
271 int ioflags = 0;
272 xfs_fsize_t n;
273 unsigned long seg;
274
275 XFS_STATS_INC(xs_read_calls);
276
277 BUG_ON(iocb->ki_pos != pos);
278
279 if (unlikely(file->f_flags & O_DIRECT))
280 ioflags |= IO_ISDIRECT;
281 if (file->f_mode & FMODE_NOCMTIME)
282 ioflags |= IO_INVIS;
283
284 /* START copy & waste from filemap.c */
285 for (seg = 0; seg < nr_segs; seg++) {
286 const struct iovec *iv = &iovp[seg];
287
288 /*
289 * If any segment has a negative length, or the cumulative
290 * length ever wraps negative then return -EINVAL.
291 */
292 size += iv->iov_len;
293 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
294 return XFS_ERROR(-EINVAL);
295 }
296 /* END copy & waste from filemap.c */
297
298 if (unlikely(ioflags & IO_ISDIRECT)) {
299 xfs_buftarg_t *target =
300 XFS_IS_REALTIME_INODE(ip) ?
301 mp->m_rtdev_targp : mp->m_ddev_targp;
302 if ((iocb->ki_pos & target->bt_smask) ||
303 (size & target->bt_smask)) {
304 if (iocb->ki_pos == ip->i_size)
305 return 0;
306 return -XFS_ERROR(EINVAL);
307 }
308 }
309
310 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
311 if (n <= 0 || size == 0)
312 return 0;
313
314 if (n < size)
315 size = n;
316
317 if (XFS_FORCED_SHUTDOWN(mp))
318 return -EIO;
319
320 if (unlikely(ioflags & IO_ISDIRECT)) {
321 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
322
323 if (inode->i_mapping->nrpages) {
324 ret = -xfs_flushinval_pages(ip,
325 (iocb->ki_pos & PAGE_CACHE_MASK),
326 -1, FI_REMAPF_LOCKED);
327 if (ret) {
328 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
329 return ret;
330 }
331 }
332 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
333 } else
334 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
335
336 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
337
338 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
339 if (ret > 0)
340 XFS_STATS_ADD(xs_read_bytes, ret);
341
342 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
343 return ret;
344}
345
346STATIC ssize_t
347xfs_file_splice_read(
348 struct file *infilp,
349 loff_t *ppos,
350 struct pipe_inode_info *pipe,
351 size_t count,
352 unsigned int flags)
353{
354 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
355 int ioflags = 0;
356 ssize_t ret;
357
358 XFS_STATS_INC(xs_read_calls);
359
360 if (infilp->f_mode & FMODE_NOCMTIME)
361 ioflags |= IO_INVIS;
362
363 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
364 return -EIO;
365
366 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
367
368 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
369
370 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
371 if (ret > 0)
372 XFS_STATS_ADD(xs_read_bytes, ret);
373
374 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
375 return ret;
376}
377
378STATIC void
379xfs_aio_write_isize_update(
380 struct inode *inode,
381 loff_t *ppos,
382 ssize_t bytes_written)
383{
384 struct xfs_inode *ip = XFS_I(inode);
385 xfs_fsize_t isize = i_size_read(inode);
386
387 if (bytes_written > 0)
388 XFS_STATS_ADD(xs_write_bytes, bytes_written);
389
390 if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
391 *ppos > isize))
392 *ppos = isize;
393
394 if (*ppos > ip->i_size) {
395 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
396 if (*ppos > ip->i_size)
397 ip->i_size = *ppos;
398 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
399 }
400}
401
402/*
403 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
404 * part of the I/O may have been written to disk before the error occurred. In
405 * this case the on-disk file size may have been adjusted beyond the in-memory
406 * file size and now needs to be truncated back.
407 */
408STATIC void
409xfs_aio_write_newsize_update(
410 struct xfs_inode *ip)
411{
412 if (ip->i_new_size) {
413 xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
414 ip->i_new_size = 0;
415 if (ip->i_d.di_size > ip->i_size)
416 ip->i_d.di_size = ip->i_size;
417 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
418 }
419}
420
421/*
422 * xfs_file_splice_write() does not use xfs_rw_ilock() because
423 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
424 * couuld cause lock inversions between the aio_write path and the splice path
425 * if someone is doing concurrent splice(2) based writes and write(2) based
426 * writes to the same inode. The only real way to fix this is to re-implement
427 * the generic code here with correct locking orders.
428 */
429STATIC ssize_t
430xfs_file_splice_write(
431 struct pipe_inode_info *pipe,
432 struct file *outfilp,
433 loff_t *ppos,
434 size_t count,
435 unsigned int flags)
436{
437 struct inode *inode = outfilp->f_mapping->host;
438 struct xfs_inode *ip = XFS_I(inode);
439 xfs_fsize_t new_size;
440 int ioflags = 0;
441 ssize_t ret;
442
443 XFS_STATS_INC(xs_write_calls);
444
445 if (outfilp->f_mode & FMODE_NOCMTIME)
446 ioflags |= IO_INVIS;
447
448 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
449 return -EIO;
450
451 xfs_ilock(ip, XFS_IOLOCK_EXCL);
452
453 new_size = *ppos + count;
454
455 xfs_ilock(ip, XFS_ILOCK_EXCL);
456 if (new_size > ip->i_size)
457 ip->i_new_size = new_size;
458 xfs_iunlock(ip, XFS_ILOCK_EXCL);
459
460 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
461
462 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
463
464 xfs_aio_write_isize_update(inode, ppos, ret);
465 xfs_aio_write_newsize_update(ip);
466 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
467 return ret;
468}
469
470/*
471 * This routine is called to handle zeroing any space in the last
472 * block of the file that is beyond the EOF. We do this since the
473 * size is being increased without writing anything to that block
474 * and we don't want anyone to read the garbage on the disk.
475 */
476STATIC int /* error (positive) */
477xfs_zero_last_block(
478 xfs_inode_t *ip,
479 xfs_fsize_t offset,
480 xfs_fsize_t isize)
481{
482 xfs_fileoff_t last_fsb;
483 xfs_mount_t *mp = ip->i_mount;
484 int nimaps;
485 int zero_offset;
486 int zero_len;
487 int error = 0;
488 xfs_bmbt_irec_t imap;
489
490 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
491
492 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
493 if (zero_offset == 0) {
494 /*
495 * There are no extra bytes in the last block on disk to
496 * zero, so return.
497 */
498 return 0;
499 }
500
501 last_fsb = XFS_B_TO_FSBT(mp, isize);
502 nimaps = 1;
503 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
504 &nimaps, NULL);
505 if (error) {
506 return error;
507 }
508 ASSERT(nimaps > 0);
509 /*
510 * If the block underlying isize is just a hole, then there
511 * is nothing to zero.
512 */
513 if (imap.br_startblock == HOLESTARTBLOCK) {
514 return 0;
515 }
516 /*
517 * Zero the part of the last block beyond the EOF, and write it
518 * out sync. We need to drop the ilock while we do this so we
519 * don't deadlock when the buffer cache calls back to us.
520 */
521 xfs_iunlock(ip, XFS_ILOCK_EXCL);
522
523 zero_len = mp->m_sb.sb_blocksize - zero_offset;
524 if (isize + zero_len > offset)
525 zero_len = offset - isize;
526 error = xfs_iozero(ip, isize, zero_len);
527
528 xfs_ilock(ip, XFS_ILOCK_EXCL);
529 ASSERT(error >= 0);
530 return error;
531}
532
533/*
534 * Zero any on disk space between the current EOF and the new,
535 * larger EOF. This handles the normal case of zeroing the remainder
536 * of the last block in the file and the unusual case of zeroing blocks
537 * out beyond the size of the file. This second case only happens
538 * with fixed size extents and when the system crashes before the inode
539 * size was updated but after blocks were allocated. If fill is set,
540 * then any holes in the range are filled and zeroed. If not, the holes
541 * are left alone as holes.
542 */
543
544int /* error (positive) */
545xfs_zero_eof(
546 xfs_inode_t *ip,
547 xfs_off_t offset, /* starting I/O offset */
548 xfs_fsize_t isize) /* current inode size */
549{
550 xfs_mount_t *mp = ip->i_mount;
551 xfs_fileoff_t start_zero_fsb;
552 xfs_fileoff_t end_zero_fsb;
553 xfs_fileoff_t zero_count_fsb;
554 xfs_fileoff_t last_fsb;
555 xfs_fileoff_t zero_off;
556 xfs_fsize_t zero_len;
557 int nimaps;
558 int error = 0;
559 xfs_bmbt_irec_t imap;
560
561 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
562 ASSERT(offset > isize);
563
564 /*
565 * First handle zeroing the block on which isize resides.
566 * We only zero a part of that block so it is handled specially.
567 */
568 error = xfs_zero_last_block(ip, offset, isize);
569 if (error) {
570 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
571 return error;
572 }
573
574 /*
575 * Calculate the range between the new size and the old
576 * where blocks needing to be zeroed may exist. To get the
577 * block where the last byte in the file currently resides,
578 * we need to subtract one from the size and truncate back
579 * to a block boundary. We subtract 1 in case the size is
580 * exactly on a block boundary.
581 */
582 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
583 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
584 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
585 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
586 if (last_fsb == end_zero_fsb) {
587 /*
588 * The size was only incremented on its last block.
589 * We took care of that above, so just return.
590 */
591 return 0;
592 }
593
594 ASSERT(start_zero_fsb <= end_zero_fsb);
595 while (start_zero_fsb <= end_zero_fsb) {
596 nimaps = 1;
597 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
598 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
599 0, NULL, 0, &imap, &nimaps, NULL);
600 if (error) {
601 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
602 return error;
603 }
604 ASSERT(nimaps > 0);
605
606 if (imap.br_state == XFS_EXT_UNWRITTEN ||
607 imap.br_startblock == HOLESTARTBLOCK) {
608 /*
609 * This loop handles initializing pages that were
610 * partially initialized by the code below this
611 * loop. It basically zeroes the part of the page
612 * that sits on a hole and sets the page as P_HOLE
613 * and calls remapf if it is a mapped file.
614 */
615 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
616 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
617 continue;
618 }
619
620 /*
621 * There are blocks we need to zero.
622 * Drop the inode lock while we're doing the I/O.
623 * We'll still have the iolock to protect us.
624 */
625 xfs_iunlock(ip, XFS_ILOCK_EXCL);
626
627 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
628 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
629
630 if ((zero_off + zero_len) > offset)
631 zero_len = offset - zero_off;
632
633 error = xfs_iozero(ip, zero_off, zero_len);
634 if (error) {
635 goto out_lock;
636 }
637
638 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
639 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
640
641 xfs_ilock(ip, XFS_ILOCK_EXCL);
642 }
643
644 return 0;
645
646out_lock:
647 xfs_ilock(ip, XFS_ILOCK_EXCL);
648 ASSERT(error >= 0);
649 return error;
650}
651
652/*
653 * Common pre-write limit and setup checks.
654 *
655 * Returns with iolock held according to @iolock.
656 */
657STATIC ssize_t
658xfs_file_aio_write_checks(
659 struct file *file,
660 loff_t *pos,
661 size_t *count,
662 int *iolock)
663{
664 struct inode *inode = file->f_mapping->host;
665 struct xfs_inode *ip = XFS_I(inode);
666 xfs_fsize_t new_size;
667 int error = 0;
668
669 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
670 if (error) {
671 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
672 *iolock = 0;
673 return error;
674 }
675
676 new_size = *pos + *count;
677 if (new_size > ip->i_size)
678 ip->i_new_size = new_size;
679
680 if (likely(!(file->f_mode & FMODE_NOCMTIME)))
681 file_update_time(file);
682
683 /*
684 * If the offset is beyond the size of the file, we need to zero any
685 * blocks that fall between the existing EOF and the start of this
686 * write.
687 */
688 if (*pos > ip->i_size)
689 error = -xfs_zero_eof(ip, *pos, ip->i_size);
690
691 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
692 if (error)
693 return error;
694
695 /*
696 * If we're writing the file then make sure to clear the setuid and
697 * setgid bits if the process is not being run by root. This keeps
698 * people from modifying setuid and setgid binaries.
699 */
700 return file_remove_suid(file);
701
702}
703
704/*
705 * xfs_file_dio_aio_write - handle direct IO writes
706 *
707 * Lock the inode appropriately to prepare for and issue a direct IO write.
708 * By separating it from the buffered write path we remove all the tricky to
709 * follow locking changes and looping.
710 *
711 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
712 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
713 * pages are flushed out.
714 *
715 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
716 * allowing them to be done in parallel with reads and other direct IO writes.
717 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
718 * needs to do sub-block zeroing and that requires serialisation against other
719 * direct IOs to the same block. In this case we need to serialise the
720 * submission of the unaligned IOs so that we don't get racing block zeroing in
721 * the dio layer. To avoid the problem with aio, we also need to wait for
722 * outstanding IOs to complete so that unwritten extent conversion is completed
723 * before we try to map the overlapping block. This is currently implemented by
724 * hitting it with a big hammer (i.e. xfs_ioend_wait()).
725 *
726 * Returns with locks held indicated by @iolock and errors indicated by
727 * negative return values.
728 */
729STATIC ssize_t
730xfs_file_dio_aio_write(
731 struct kiocb *iocb,
732 const struct iovec *iovp,
733 unsigned long nr_segs,
734 loff_t pos,
735 size_t ocount,
736 int *iolock)
737{
738 struct file *file = iocb->ki_filp;
739 struct address_space *mapping = file->f_mapping;
740 struct inode *inode = mapping->host;
741 struct xfs_inode *ip = XFS_I(inode);
742 struct xfs_mount *mp = ip->i_mount;
743 ssize_t ret = 0;
744 size_t count = ocount;
745 int unaligned_io = 0;
746 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
747 mp->m_rtdev_targp : mp->m_ddev_targp;
748
749 *iolock = 0;
750 if ((pos & target->bt_smask) || (count & target->bt_smask))
751 return -XFS_ERROR(EINVAL);
752
753 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
754 unaligned_io = 1;
755
756 if (unaligned_io || mapping->nrpages || pos > ip->i_size)
757 *iolock = XFS_IOLOCK_EXCL;
758 else
759 *iolock = XFS_IOLOCK_SHARED;
760 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
761
762 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
763 if (ret)
764 return ret;
765
766 if (mapping->nrpages) {
767 WARN_ON(*iolock != XFS_IOLOCK_EXCL);
768 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
769 FI_REMAPF_LOCKED);
770 if (ret)
771 return ret;
772 }
773
774 /*
775 * If we are doing unaligned IO, wait for all other IO to drain,
776 * otherwise demote the lock if we had to flush cached pages
777 */
778 if (unaligned_io)
779 xfs_ioend_wait(ip);
780 else if (*iolock == XFS_IOLOCK_EXCL) {
781 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
782 *iolock = XFS_IOLOCK_SHARED;
783 }
784
785 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
786 ret = generic_file_direct_write(iocb, iovp,
787 &nr_segs, pos, &iocb->ki_pos, count, ocount);
788
789 /* No fallback to buffered IO on errors for XFS. */
790 ASSERT(ret < 0 || ret == count);
791 return ret;
792}
793
794STATIC ssize_t
795xfs_file_buffered_aio_write(
796 struct kiocb *iocb,
797 const struct iovec *iovp,
798 unsigned long nr_segs,
799 loff_t pos,
800 size_t ocount,
801 int *iolock)
802{
803 struct file *file = iocb->ki_filp;
804 struct address_space *mapping = file->f_mapping;
805 struct inode *inode = mapping->host;
806 struct xfs_inode *ip = XFS_I(inode);
807 ssize_t ret;
808 int enospc = 0;
809 size_t count = ocount;
810
811 *iolock = XFS_IOLOCK_EXCL;
812 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
813
814 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
815 if (ret)
816 return ret;
817
818 /* We can write back this queue in page reclaim */
819 current->backing_dev_info = mapping->backing_dev_info;
820
821write_retry:
822 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
823 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
824 pos, &iocb->ki_pos, count, ret);
825 /*
826 * if we just got an ENOSPC, flush the inode now we aren't holding any
827 * page locks and retry *once*
828 */
829 if (ret == -ENOSPC && !enospc) {
830 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
831 if (ret)
832 return ret;
833 enospc = 1;
834 goto write_retry;
835 }
836 current->backing_dev_info = NULL;
837 return ret;
838}
839
840STATIC ssize_t
841xfs_file_aio_write(
842 struct kiocb *iocb,
843 const struct iovec *iovp,
844 unsigned long nr_segs,
845 loff_t pos)
846{
847 struct file *file = iocb->ki_filp;
848 struct address_space *mapping = file->f_mapping;
849 struct inode *inode = mapping->host;
850 struct xfs_inode *ip = XFS_I(inode);
851 ssize_t ret;
852 int iolock;
853 size_t ocount = 0;
854
855 XFS_STATS_INC(xs_write_calls);
856
857 BUG_ON(iocb->ki_pos != pos);
858
859 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
860 if (ret)
861 return ret;
862
863 if (ocount == 0)
864 return 0;
865
866 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
867
868 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
869 return -EIO;
870
871 if (unlikely(file->f_flags & O_DIRECT))
872 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
873 ocount, &iolock);
874 else
875 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
876 ocount, &iolock);
877
878 xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
879
880 if (ret <= 0)
881 goto out_unlock;
882
883 /* Handle various SYNC-type writes */
884 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
885 loff_t end = pos + ret - 1;
886 int error;
887
888 xfs_rw_iunlock(ip, iolock);
889 error = xfs_file_fsync(file, pos, end,
890 (file->f_flags & __O_SYNC) ? 0 : 1);
891 xfs_rw_ilock(ip, iolock);
892 if (error)
893 ret = error;
894 }
895
896out_unlock:
897 xfs_aio_write_newsize_update(ip);
898 xfs_rw_iunlock(ip, iolock);
899 return ret;
900}
901
902STATIC long
903xfs_file_fallocate(
904 struct file *file,
905 int mode,
906 loff_t offset,
907 loff_t len)
908{
909 struct inode *inode = file->f_path.dentry->d_inode;
910 long error;
911 loff_t new_size = 0;
912 xfs_flock64_t bf;
913 xfs_inode_t *ip = XFS_I(inode);
914 int cmd = XFS_IOC_RESVSP;
915 int attr_flags = XFS_ATTR_NOLOCK;
916
917 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
918 return -EOPNOTSUPP;
919
920 bf.l_whence = 0;
921 bf.l_start = offset;
922 bf.l_len = len;
923
924 xfs_ilock(ip, XFS_IOLOCK_EXCL);
925
926 if (mode & FALLOC_FL_PUNCH_HOLE)
927 cmd = XFS_IOC_UNRESVSP;
928
929 /* check the new inode size is valid before allocating */
930 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
931 offset + len > i_size_read(inode)) {
932 new_size = offset + len;
933 error = inode_newsize_ok(inode, new_size);
934 if (error)
935 goto out_unlock;
936 }
937
938 if (file->f_flags & O_DSYNC)
939 attr_flags |= XFS_ATTR_SYNC;
940
941 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
942 if (error)
943 goto out_unlock;
944
945 /* Change file size if needed */
946 if (new_size) {
947 struct iattr iattr;
948
949 iattr.ia_valid = ATTR_SIZE;
950 iattr.ia_size = new_size;
951 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
952 }
953
954out_unlock:
955 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
956 return error;
957}
958
959
960STATIC int
961xfs_file_open(
962 struct inode *inode,
963 struct file *file)
964{
965 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
966 return -EFBIG;
967 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
968 return -EIO;
969 return 0;
970}
971
972STATIC int
973xfs_dir_open(
974 struct inode *inode,
975 struct file *file)
976{
977 struct xfs_inode *ip = XFS_I(inode);
978 int mode;
979 int error;
980
981 error = xfs_file_open(inode, file);
982 if (error)
983 return error;
984
985 /*
986 * If there are any blocks, read-ahead block 0 as we're almost
987 * certain to have the next operation be a read there.
988 */
989 mode = xfs_ilock_map_shared(ip);
990 if (ip->i_d.di_nextents > 0)
991 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
992 xfs_iunlock(ip, mode);
993 return 0;
994}
995
996STATIC int
997xfs_file_release(
998 struct inode *inode,
999 struct file *filp)
1000{
1001 return -xfs_release(XFS_I(inode));
1002}
1003
1004STATIC int
1005xfs_file_readdir(
1006 struct file *filp,
1007 void *dirent,
1008 filldir_t filldir)
1009{
1010 struct inode *inode = filp->f_path.dentry->d_inode;
1011 xfs_inode_t *ip = XFS_I(inode);
1012 int error;
1013 size_t bufsize;
1014
1015 /*
1016 * The Linux API doesn't pass down the total size of the buffer
1017 * we read into down to the filesystem. With the filldir concept
1018 * it's not needed for correct information, but the XFS dir2 leaf
1019 * code wants an estimate of the buffer size to calculate it's
1020 * readahead window and size the buffers used for mapping to
1021 * physical blocks.
1022 *
1023 * Try to give it an estimate that's good enough, maybe at some
1024 * point we can change the ->readdir prototype to include the
1025 * buffer size. For now we use the current glibc buffer size.
1026 */
1027 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
1028
1029 error = xfs_readdir(ip, dirent, bufsize,
1030 (xfs_off_t *)&filp->f_pos, filldir);
1031 if (error)
1032 return -error;
1033 return 0;
1034}
1035
1036STATIC int
1037xfs_file_mmap(
1038 struct file *filp,
1039 struct vm_area_struct *vma)
1040{
1041 vma->vm_ops = &xfs_file_vm_ops;
1042 vma->vm_flags |= VM_CAN_NONLINEAR;
1043
1044 file_accessed(filp);
1045 return 0;
1046}
1047
1048/*
1049 * mmap()d file has taken write protection fault and is being made
1050 * writable. We can set the page state up correctly for a writable
1051 * page, which means we can do correct delalloc accounting (ENOSPC
1052 * checking!) and unwritten extent mapping.
1053 */
1054STATIC int
1055xfs_vm_page_mkwrite(
1056 struct vm_area_struct *vma,
1057 struct vm_fault *vmf)
1058{
1059 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
1060}
1061
1062const struct file_operations xfs_file_operations = {
1063 .llseek = generic_file_llseek,
1064 .read = do_sync_read,
1065 .write = do_sync_write,
1066 .aio_read = xfs_file_aio_read,
1067 .aio_write = xfs_file_aio_write,
1068 .splice_read = xfs_file_splice_read,
1069 .splice_write = xfs_file_splice_write,
1070 .unlocked_ioctl = xfs_file_ioctl,
1071#ifdef CONFIG_COMPAT
1072 .compat_ioctl = xfs_file_compat_ioctl,
1073#endif
1074 .mmap = xfs_file_mmap,
1075 .open = xfs_file_open,
1076 .release = xfs_file_release,
1077 .fsync = xfs_file_fsync,
1078 .fallocate = xfs_file_fallocate,
1079};
1080
1081const struct file_operations xfs_dir_file_operations = {
1082 .open = xfs_dir_open,
1083 .read = generic_read_dir,
1084 .readdir = xfs_file_readdir,
1085 .llseek = generic_file_llseek,
1086 .unlocked_ioctl = xfs_file_ioctl,
1087#ifdef CONFIG_COMPAT
1088 .compat_ioctl = xfs_file_compat_ioctl,
1089#endif
1090 .fsync = xfs_file_fsync,
1091};
1092
1093static const struct vm_operations_struct xfs_file_vm_ops = {
1094 .fault = filemap_fault,
1095 .page_mkwrite = xfs_vm_page_mkwrite,
1096};
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_log.h"
21#include "xfs_sb.h"
22#include "xfs_ag.h"
23#include "xfs_trans.h"
24#include "xfs_mount.h"
25#include "xfs_bmap_btree.h"
26#include "xfs_alloc.h"
27#include "xfs_dinode.h"
28#include "xfs_inode.h"
29#include "xfs_inode_item.h"
30#include "xfs_bmap.h"
31#include "xfs_error.h"
32#include "xfs_vnodeops.h"
33#include "xfs_da_btree.h"
34#include "xfs_ioctl.h"
35#include "xfs_trace.h"
36
37#include <linux/dcache.h>
38#include <linux/falloc.h>
39
40static const struct vm_operations_struct xfs_file_vm_ops;
41
42/*
43 * Locking primitives for read and write IO paths to ensure we consistently use
44 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
45 */
46static inline void
47xfs_rw_ilock(
48 struct xfs_inode *ip,
49 int type)
50{
51 if (type & XFS_IOLOCK_EXCL)
52 mutex_lock(&VFS_I(ip)->i_mutex);
53 xfs_ilock(ip, type);
54}
55
56static inline void
57xfs_rw_iunlock(
58 struct xfs_inode *ip,
59 int type)
60{
61 xfs_iunlock(ip, type);
62 if (type & XFS_IOLOCK_EXCL)
63 mutex_unlock(&VFS_I(ip)->i_mutex);
64}
65
66static inline void
67xfs_rw_ilock_demote(
68 struct xfs_inode *ip,
69 int type)
70{
71 xfs_ilock_demote(ip, type);
72 if (type & XFS_IOLOCK_EXCL)
73 mutex_unlock(&VFS_I(ip)->i_mutex);
74}
75
76/*
77 * xfs_iozero
78 *
79 * xfs_iozero clears the specified range of buffer supplied,
80 * and marks all the affected blocks as valid and modified. If
81 * an affected block is not allocated, it will be allocated. If
82 * an affected block is not completely overwritten, and is not
83 * valid before the operation, it will be read from disk before
84 * being partially zeroed.
85 */
86STATIC int
87xfs_iozero(
88 struct xfs_inode *ip, /* inode */
89 loff_t pos, /* offset in file */
90 size_t count) /* size of data to zero */
91{
92 struct page *page;
93 struct address_space *mapping;
94 int status;
95
96 mapping = VFS_I(ip)->i_mapping;
97 do {
98 unsigned offset, bytes;
99 void *fsdata;
100
101 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
102 bytes = PAGE_CACHE_SIZE - offset;
103 if (bytes > count)
104 bytes = count;
105
106 status = pagecache_write_begin(NULL, mapping, pos, bytes,
107 AOP_FLAG_UNINTERRUPTIBLE,
108 &page, &fsdata);
109 if (status)
110 break;
111
112 zero_user(page, offset, bytes);
113
114 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
115 page, fsdata);
116 WARN_ON(status <= 0); /* can't return less than zero! */
117 pos += bytes;
118 count -= bytes;
119 status = 0;
120 } while (count);
121
122 return (-status);
123}
124
125/*
126 * Fsync operations on directories are much simpler than on regular files,
127 * as there is no file data to flush, and thus also no need for explicit
128 * cache flush operations, and there are no non-transaction metadata updates
129 * on directories either.
130 */
131STATIC int
132xfs_dir_fsync(
133 struct file *file,
134 loff_t start,
135 loff_t end,
136 int datasync)
137{
138 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
139 struct xfs_mount *mp = ip->i_mount;
140 xfs_lsn_t lsn = 0;
141
142 trace_xfs_dir_fsync(ip);
143
144 xfs_ilock(ip, XFS_ILOCK_SHARED);
145 if (xfs_ipincount(ip))
146 lsn = ip->i_itemp->ili_last_lsn;
147 xfs_iunlock(ip, XFS_ILOCK_SHARED);
148
149 if (!lsn)
150 return 0;
151 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
152}
153
154STATIC int
155xfs_file_fsync(
156 struct file *file,
157 loff_t start,
158 loff_t end,
159 int datasync)
160{
161 struct inode *inode = file->f_mapping->host;
162 struct xfs_inode *ip = XFS_I(inode);
163 struct xfs_mount *mp = ip->i_mount;
164 int error = 0;
165 int log_flushed = 0;
166 xfs_lsn_t lsn = 0;
167
168 trace_xfs_file_fsync(ip);
169
170 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
171 if (error)
172 return error;
173
174 if (XFS_FORCED_SHUTDOWN(mp))
175 return -XFS_ERROR(EIO);
176
177 xfs_iflags_clear(ip, XFS_ITRUNCATED);
178
179 if (mp->m_flags & XFS_MOUNT_BARRIER) {
180 /*
181 * If we have an RT and/or log subvolume we need to make sure
182 * to flush the write cache the device used for file data
183 * first. This is to ensure newly written file data make
184 * it to disk before logging the new inode size in case of
185 * an extending write.
186 */
187 if (XFS_IS_REALTIME_INODE(ip))
188 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
189 else if (mp->m_logdev_targp != mp->m_ddev_targp)
190 xfs_blkdev_issue_flush(mp->m_ddev_targp);
191 }
192
193 /*
194 * All metadata updates are logged, which means that we just have
195 * to flush the log up to the latest LSN that touched the inode.
196 */
197 xfs_ilock(ip, XFS_ILOCK_SHARED);
198 if (xfs_ipincount(ip)) {
199 if (!datasync ||
200 (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP))
201 lsn = ip->i_itemp->ili_last_lsn;
202 }
203 xfs_iunlock(ip, XFS_ILOCK_SHARED);
204
205 if (lsn)
206 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
207
208 /*
209 * If we only have a single device, and the log force about was
210 * a no-op we might have to flush the data device cache here.
211 * This can only happen for fdatasync/O_DSYNC if we were overwriting
212 * an already allocated file and thus do not have any metadata to
213 * commit.
214 */
215 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
216 mp->m_logdev_targp == mp->m_ddev_targp &&
217 !XFS_IS_REALTIME_INODE(ip) &&
218 !log_flushed)
219 xfs_blkdev_issue_flush(mp->m_ddev_targp);
220
221 return -error;
222}
223
224STATIC ssize_t
225xfs_file_aio_read(
226 struct kiocb *iocb,
227 const struct iovec *iovp,
228 unsigned long nr_segs,
229 loff_t pos)
230{
231 struct file *file = iocb->ki_filp;
232 struct inode *inode = file->f_mapping->host;
233 struct xfs_inode *ip = XFS_I(inode);
234 struct xfs_mount *mp = ip->i_mount;
235 size_t size = 0;
236 ssize_t ret = 0;
237 int ioflags = 0;
238 xfs_fsize_t n;
239 unsigned long seg;
240
241 XFS_STATS_INC(xs_read_calls);
242
243 BUG_ON(iocb->ki_pos != pos);
244
245 if (unlikely(file->f_flags & O_DIRECT))
246 ioflags |= IO_ISDIRECT;
247 if (file->f_mode & FMODE_NOCMTIME)
248 ioflags |= IO_INVIS;
249
250 /* START copy & waste from filemap.c */
251 for (seg = 0; seg < nr_segs; seg++) {
252 const struct iovec *iv = &iovp[seg];
253
254 /*
255 * If any segment has a negative length, or the cumulative
256 * length ever wraps negative then return -EINVAL.
257 */
258 size += iv->iov_len;
259 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
260 return XFS_ERROR(-EINVAL);
261 }
262 /* END copy & waste from filemap.c */
263
264 if (unlikely(ioflags & IO_ISDIRECT)) {
265 xfs_buftarg_t *target =
266 XFS_IS_REALTIME_INODE(ip) ?
267 mp->m_rtdev_targp : mp->m_ddev_targp;
268 if ((iocb->ki_pos & target->bt_smask) ||
269 (size & target->bt_smask)) {
270 if (iocb->ki_pos == i_size_read(inode))
271 return 0;
272 return -XFS_ERROR(EINVAL);
273 }
274 }
275
276 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
277 if (n <= 0 || size == 0)
278 return 0;
279
280 if (n < size)
281 size = n;
282
283 if (XFS_FORCED_SHUTDOWN(mp))
284 return -EIO;
285
286 /*
287 * Locking is a bit tricky here. If we take an exclusive lock
288 * for direct IO, we effectively serialise all new concurrent
289 * read IO to this file and block it behind IO that is currently in
290 * progress because IO in progress holds the IO lock shared. We only
291 * need to hold the lock exclusive to blow away the page cache, so
292 * only take lock exclusively if the page cache needs invalidation.
293 * This allows the normal direct IO case of no page cache pages to
294 * proceeed concurrently without serialisation.
295 */
296 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
297 if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
298 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
299 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
300
301 if (inode->i_mapping->nrpages) {
302 ret = -xfs_flushinval_pages(ip,
303 (iocb->ki_pos & PAGE_CACHE_MASK),
304 -1, FI_REMAPF_LOCKED);
305 if (ret) {
306 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
307 return ret;
308 }
309 }
310 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
311 }
312
313 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
314
315 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
316 if (ret > 0)
317 XFS_STATS_ADD(xs_read_bytes, ret);
318
319 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
320 return ret;
321}
322
323STATIC ssize_t
324xfs_file_splice_read(
325 struct file *infilp,
326 loff_t *ppos,
327 struct pipe_inode_info *pipe,
328 size_t count,
329 unsigned int flags)
330{
331 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
332 int ioflags = 0;
333 ssize_t ret;
334
335 XFS_STATS_INC(xs_read_calls);
336
337 if (infilp->f_mode & FMODE_NOCMTIME)
338 ioflags |= IO_INVIS;
339
340 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
341 return -EIO;
342
343 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
344
345 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
346
347 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
348 if (ret > 0)
349 XFS_STATS_ADD(xs_read_bytes, ret);
350
351 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
352 return ret;
353}
354
355/*
356 * xfs_file_splice_write() does not use xfs_rw_ilock() because
357 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
358 * couuld cause lock inversions between the aio_write path and the splice path
359 * if someone is doing concurrent splice(2) based writes and write(2) based
360 * writes to the same inode. The only real way to fix this is to re-implement
361 * the generic code here with correct locking orders.
362 */
363STATIC ssize_t
364xfs_file_splice_write(
365 struct pipe_inode_info *pipe,
366 struct file *outfilp,
367 loff_t *ppos,
368 size_t count,
369 unsigned int flags)
370{
371 struct inode *inode = outfilp->f_mapping->host;
372 struct xfs_inode *ip = XFS_I(inode);
373 int ioflags = 0;
374 ssize_t ret;
375
376 XFS_STATS_INC(xs_write_calls);
377
378 if (outfilp->f_mode & FMODE_NOCMTIME)
379 ioflags |= IO_INVIS;
380
381 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
382 return -EIO;
383
384 xfs_ilock(ip, XFS_IOLOCK_EXCL);
385
386 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
387
388 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
389 if (ret > 0)
390 XFS_STATS_ADD(xs_write_bytes, ret);
391
392 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
393 return ret;
394}
395
396/*
397 * This routine is called to handle zeroing any space in the last block of the
398 * file that is beyond the EOF. We do this since the size is being increased
399 * without writing anything to that block and we don't want to read the
400 * garbage on the disk.
401 */
402STATIC int /* error (positive) */
403xfs_zero_last_block(
404 struct xfs_inode *ip,
405 xfs_fsize_t offset,
406 xfs_fsize_t isize)
407{
408 struct xfs_mount *mp = ip->i_mount;
409 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
410 int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
411 int zero_len;
412 int nimaps = 1;
413 int error = 0;
414 struct xfs_bmbt_irec imap;
415
416 xfs_ilock(ip, XFS_ILOCK_EXCL);
417 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
418 xfs_iunlock(ip, XFS_ILOCK_EXCL);
419 if (error)
420 return error;
421
422 ASSERT(nimaps > 0);
423
424 /*
425 * If the block underlying isize is just a hole, then there
426 * is nothing to zero.
427 */
428 if (imap.br_startblock == HOLESTARTBLOCK)
429 return 0;
430
431 zero_len = mp->m_sb.sb_blocksize - zero_offset;
432 if (isize + zero_len > offset)
433 zero_len = offset - isize;
434 return xfs_iozero(ip, isize, zero_len);
435}
436
437/*
438 * Zero any on disk space between the current EOF and the new, larger EOF.
439 *
440 * This handles the normal case of zeroing the remainder of the last block in
441 * the file and the unusual case of zeroing blocks out beyond the size of the
442 * file. This second case only happens with fixed size extents and when the
443 * system crashes before the inode size was updated but after blocks were
444 * allocated.
445 *
446 * Expects the iolock to be held exclusive, and will take the ilock internally.
447 */
448int /* error (positive) */
449xfs_zero_eof(
450 struct xfs_inode *ip,
451 xfs_off_t offset, /* starting I/O offset */
452 xfs_fsize_t isize) /* current inode size */
453{
454 struct xfs_mount *mp = ip->i_mount;
455 xfs_fileoff_t start_zero_fsb;
456 xfs_fileoff_t end_zero_fsb;
457 xfs_fileoff_t zero_count_fsb;
458 xfs_fileoff_t last_fsb;
459 xfs_fileoff_t zero_off;
460 xfs_fsize_t zero_len;
461 int nimaps;
462 int error = 0;
463 struct xfs_bmbt_irec imap;
464
465 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
466 ASSERT(offset > isize);
467
468 /*
469 * First handle zeroing the block on which isize resides.
470 *
471 * We only zero a part of that block so it is handled specially.
472 */
473 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
474 error = xfs_zero_last_block(ip, offset, isize);
475 if (error)
476 return error;
477 }
478
479 /*
480 * Calculate the range between the new size and the old where blocks
481 * needing to be zeroed may exist.
482 *
483 * To get the block where the last byte in the file currently resides,
484 * we need to subtract one from the size and truncate back to a block
485 * boundary. We subtract 1 in case the size is exactly on a block
486 * boundary.
487 */
488 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
489 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
490 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
491 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
492 if (last_fsb == end_zero_fsb) {
493 /*
494 * The size was only incremented on its last block.
495 * We took care of that above, so just return.
496 */
497 return 0;
498 }
499
500 ASSERT(start_zero_fsb <= end_zero_fsb);
501 while (start_zero_fsb <= end_zero_fsb) {
502 nimaps = 1;
503 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
504
505 xfs_ilock(ip, XFS_ILOCK_EXCL);
506 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
507 &imap, &nimaps, 0);
508 xfs_iunlock(ip, XFS_ILOCK_EXCL);
509 if (error)
510 return error;
511
512 ASSERT(nimaps > 0);
513
514 if (imap.br_state == XFS_EXT_UNWRITTEN ||
515 imap.br_startblock == HOLESTARTBLOCK) {
516 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
517 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
518 continue;
519 }
520
521 /*
522 * There are blocks we need to zero.
523 */
524 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
525 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
526
527 if ((zero_off + zero_len) > offset)
528 zero_len = offset - zero_off;
529
530 error = xfs_iozero(ip, zero_off, zero_len);
531 if (error)
532 return error;
533
534 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
535 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
536 }
537
538 return 0;
539}
540
541/*
542 * Common pre-write limit and setup checks.
543 *
544 * Called with the iolocked held either shared and exclusive according to
545 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
546 * if called for a direct write beyond i_size.
547 */
548STATIC ssize_t
549xfs_file_aio_write_checks(
550 struct file *file,
551 loff_t *pos,
552 size_t *count,
553 int *iolock)
554{
555 struct inode *inode = file->f_mapping->host;
556 struct xfs_inode *ip = XFS_I(inode);
557 int error = 0;
558
559restart:
560 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
561 if (error)
562 return error;
563
564 /*
565 * If the offset is beyond the size of the file, we need to zero any
566 * blocks that fall between the existing EOF and the start of this
567 * write. If zeroing is needed and we are currently holding the
568 * iolock shared, we need to update it to exclusive which implies
569 * having to redo all checks before.
570 */
571 if (*pos > i_size_read(inode)) {
572 if (*iolock == XFS_IOLOCK_SHARED) {
573 xfs_rw_iunlock(ip, *iolock);
574 *iolock = XFS_IOLOCK_EXCL;
575 xfs_rw_ilock(ip, *iolock);
576 goto restart;
577 }
578 error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
579 if (error)
580 return error;
581 }
582
583 /*
584 * Updating the timestamps will grab the ilock again from
585 * xfs_fs_dirty_inode, so we have to call it after dropping the
586 * lock above. Eventually we should look into a way to avoid
587 * the pointless lock roundtrip.
588 */
589 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
590 error = file_update_time(file);
591 if (error)
592 return error;
593 }
594
595 /*
596 * If we're writing the file then make sure to clear the setuid and
597 * setgid bits if the process is not being run by root. This keeps
598 * people from modifying setuid and setgid binaries.
599 */
600 return file_remove_suid(file);
601}
602
603/*
604 * xfs_file_dio_aio_write - handle direct IO writes
605 *
606 * Lock the inode appropriately to prepare for and issue a direct IO write.
607 * By separating it from the buffered write path we remove all the tricky to
608 * follow locking changes and looping.
609 *
610 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
611 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
612 * pages are flushed out.
613 *
614 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
615 * allowing them to be done in parallel with reads and other direct IO writes.
616 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
617 * needs to do sub-block zeroing and that requires serialisation against other
618 * direct IOs to the same block. In this case we need to serialise the
619 * submission of the unaligned IOs so that we don't get racing block zeroing in
620 * the dio layer. To avoid the problem with aio, we also need to wait for
621 * outstanding IOs to complete so that unwritten extent conversion is completed
622 * before we try to map the overlapping block. This is currently implemented by
623 * hitting it with a big hammer (i.e. inode_dio_wait()).
624 *
625 * Returns with locks held indicated by @iolock and errors indicated by
626 * negative return values.
627 */
628STATIC ssize_t
629xfs_file_dio_aio_write(
630 struct kiocb *iocb,
631 const struct iovec *iovp,
632 unsigned long nr_segs,
633 loff_t pos,
634 size_t ocount)
635{
636 struct file *file = iocb->ki_filp;
637 struct address_space *mapping = file->f_mapping;
638 struct inode *inode = mapping->host;
639 struct xfs_inode *ip = XFS_I(inode);
640 struct xfs_mount *mp = ip->i_mount;
641 ssize_t ret = 0;
642 size_t count = ocount;
643 int unaligned_io = 0;
644 int iolock;
645 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
646 mp->m_rtdev_targp : mp->m_ddev_targp;
647
648 if ((pos & target->bt_smask) || (count & target->bt_smask))
649 return -XFS_ERROR(EINVAL);
650
651 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
652 unaligned_io = 1;
653
654 /*
655 * We don't need to take an exclusive lock unless there page cache needs
656 * to be invalidated or unaligned IO is being executed. We don't need to
657 * consider the EOF extension case here because
658 * xfs_file_aio_write_checks() will relock the inode as necessary for
659 * EOF zeroing cases and fill out the new inode size as appropriate.
660 */
661 if (unaligned_io || mapping->nrpages)
662 iolock = XFS_IOLOCK_EXCL;
663 else
664 iolock = XFS_IOLOCK_SHARED;
665 xfs_rw_ilock(ip, iolock);
666
667 /*
668 * Recheck if there are cached pages that need invalidate after we got
669 * the iolock to protect against other threads adding new pages while
670 * we were waiting for the iolock.
671 */
672 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
673 xfs_rw_iunlock(ip, iolock);
674 iolock = XFS_IOLOCK_EXCL;
675 xfs_rw_ilock(ip, iolock);
676 }
677
678 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
679 if (ret)
680 goto out;
681
682 if (mapping->nrpages) {
683 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
684 FI_REMAPF_LOCKED);
685 if (ret)
686 goto out;
687 }
688
689 /*
690 * If we are doing unaligned IO, wait for all other IO to drain,
691 * otherwise demote the lock if we had to flush cached pages
692 */
693 if (unaligned_io)
694 inode_dio_wait(inode);
695 else if (iolock == XFS_IOLOCK_EXCL) {
696 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
697 iolock = XFS_IOLOCK_SHARED;
698 }
699
700 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
701 ret = generic_file_direct_write(iocb, iovp,
702 &nr_segs, pos, &iocb->ki_pos, count, ocount);
703
704out:
705 xfs_rw_iunlock(ip, iolock);
706
707 /* No fallback to buffered IO on errors for XFS. */
708 ASSERT(ret < 0 || ret == count);
709 return ret;
710}
711
712STATIC ssize_t
713xfs_file_buffered_aio_write(
714 struct kiocb *iocb,
715 const struct iovec *iovp,
716 unsigned long nr_segs,
717 loff_t pos,
718 size_t ocount)
719{
720 struct file *file = iocb->ki_filp;
721 struct address_space *mapping = file->f_mapping;
722 struct inode *inode = mapping->host;
723 struct xfs_inode *ip = XFS_I(inode);
724 ssize_t ret;
725 int enospc = 0;
726 int iolock = XFS_IOLOCK_EXCL;
727 size_t count = ocount;
728
729 xfs_rw_ilock(ip, iolock);
730
731 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
732 if (ret)
733 goto out;
734
735 /* We can write back this queue in page reclaim */
736 current->backing_dev_info = mapping->backing_dev_info;
737
738write_retry:
739 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
740 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
741 pos, &iocb->ki_pos, count, ret);
742 /*
743 * if we just got an ENOSPC, flush the inode now we aren't holding any
744 * page locks and retry *once*
745 */
746 if (ret == -ENOSPC && !enospc) {
747 enospc = 1;
748 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
749 if (!ret)
750 goto write_retry;
751 }
752
753 current->backing_dev_info = NULL;
754out:
755 xfs_rw_iunlock(ip, iolock);
756 return ret;
757}
758
759STATIC ssize_t
760xfs_file_aio_write(
761 struct kiocb *iocb,
762 const struct iovec *iovp,
763 unsigned long nr_segs,
764 loff_t pos)
765{
766 struct file *file = iocb->ki_filp;
767 struct address_space *mapping = file->f_mapping;
768 struct inode *inode = mapping->host;
769 struct xfs_inode *ip = XFS_I(inode);
770 ssize_t ret;
771 size_t ocount = 0;
772
773 XFS_STATS_INC(xs_write_calls);
774
775 BUG_ON(iocb->ki_pos != pos);
776
777 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
778 if (ret)
779 return ret;
780
781 if (ocount == 0)
782 return 0;
783
784 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
785
786 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
787 return -EIO;
788
789 if (unlikely(file->f_flags & O_DIRECT))
790 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
791 else
792 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
793 ocount);
794
795 if (ret > 0) {
796 ssize_t err;
797
798 XFS_STATS_ADD(xs_write_bytes, ret);
799
800 /* Handle various SYNC-type writes */
801 err = generic_write_sync(file, pos, ret);
802 if (err < 0)
803 ret = err;
804 }
805
806 return ret;
807}
808
809STATIC long
810xfs_file_fallocate(
811 struct file *file,
812 int mode,
813 loff_t offset,
814 loff_t len)
815{
816 struct inode *inode = file->f_path.dentry->d_inode;
817 long error;
818 loff_t new_size = 0;
819 xfs_flock64_t bf;
820 xfs_inode_t *ip = XFS_I(inode);
821 int cmd = XFS_IOC_RESVSP;
822 int attr_flags = XFS_ATTR_NOLOCK;
823
824 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
825 return -EOPNOTSUPP;
826
827 bf.l_whence = 0;
828 bf.l_start = offset;
829 bf.l_len = len;
830
831 xfs_ilock(ip, XFS_IOLOCK_EXCL);
832
833 if (mode & FALLOC_FL_PUNCH_HOLE)
834 cmd = XFS_IOC_UNRESVSP;
835
836 /* check the new inode size is valid before allocating */
837 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
838 offset + len > i_size_read(inode)) {
839 new_size = offset + len;
840 error = inode_newsize_ok(inode, new_size);
841 if (error)
842 goto out_unlock;
843 }
844
845 if (file->f_flags & O_DSYNC)
846 attr_flags |= XFS_ATTR_SYNC;
847
848 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
849 if (error)
850 goto out_unlock;
851
852 /* Change file size if needed */
853 if (new_size) {
854 struct iattr iattr;
855
856 iattr.ia_valid = ATTR_SIZE;
857 iattr.ia_size = new_size;
858 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
859 }
860
861out_unlock:
862 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
863 return error;
864}
865
866
867STATIC int
868xfs_file_open(
869 struct inode *inode,
870 struct file *file)
871{
872 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
873 return -EFBIG;
874 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
875 return -EIO;
876 return 0;
877}
878
879STATIC int
880xfs_dir_open(
881 struct inode *inode,
882 struct file *file)
883{
884 struct xfs_inode *ip = XFS_I(inode);
885 int mode;
886 int error;
887
888 error = xfs_file_open(inode, file);
889 if (error)
890 return error;
891
892 /*
893 * If there are any blocks, read-ahead block 0 as we're almost
894 * certain to have the next operation be a read there.
895 */
896 mode = xfs_ilock_map_shared(ip);
897 if (ip->i_d.di_nextents > 0)
898 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
899 xfs_iunlock(ip, mode);
900 return 0;
901}
902
903STATIC int
904xfs_file_release(
905 struct inode *inode,
906 struct file *filp)
907{
908 return -xfs_release(XFS_I(inode));
909}
910
911STATIC int
912xfs_file_readdir(
913 struct file *filp,
914 void *dirent,
915 filldir_t filldir)
916{
917 struct inode *inode = filp->f_path.dentry->d_inode;
918 xfs_inode_t *ip = XFS_I(inode);
919 int error;
920 size_t bufsize;
921
922 /*
923 * The Linux API doesn't pass down the total size of the buffer
924 * we read into down to the filesystem. With the filldir concept
925 * it's not needed for correct information, but the XFS dir2 leaf
926 * code wants an estimate of the buffer size to calculate it's
927 * readahead window and size the buffers used for mapping to
928 * physical blocks.
929 *
930 * Try to give it an estimate that's good enough, maybe at some
931 * point we can change the ->readdir prototype to include the
932 * buffer size. For now we use the current glibc buffer size.
933 */
934 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
935
936 error = xfs_readdir(ip, dirent, bufsize,
937 (xfs_off_t *)&filp->f_pos, filldir);
938 if (error)
939 return -error;
940 return 0;
941}
942
943STATIC int
944xfs_file_mmap(
945 struct file *filp,
946 struct vm_area_struct *vma)
947{
948 vma->vm_ops = &xfs_file_vm_ops;
949 vma->vm_flags |= VM_CAN_NONLINEAR;
950
951 file_accessed(filp);
952 return 0;
953}
954
955/*
956 * mmap()d file has taken write protection fault and is being made
957 * writable. We can set the page state up correctly for a writable
958 * page, which means we can do correct delalloc accounting (ENOSPC
959 * checking!) and unwritten extent mapping.
960 */
961STATIC int
962xfs_vm_page_mkwrite(
963 struct vm_area_struct *vma,
964 struct vm_fault *vmf)
965{
966 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
967}
968
969STATIC loff_t
970xfs_seek_data(
971 struct file *file,
972 loff_t start,
973 u32 type)
974{
975 struct inode *inode = file->f_mapping->host;
976 struct xfs_inode *ip = XFS_I(inode);
977 struct xfs_mount *mp = ip->i_mount;
978 struct xfs_bmbt_irec map[2];
979 int nmap = 2;
980 loff_t uninitialized_var(offset);
981 xfs_fsize_t isize;
982 xfs_fileoff_t fsbno;
983 xfs_filblks_t end;
984 uint lock;
985 int error;
986
987 lock = xfs_ilock_map_shared(ip);
988
989 isize = i_size_read(inode);
990 if (start >= isize) {
991 error = ENXIO;
992 goto out_unlock;
993 }
994
995 fsbno = XFS_B_TO_FSBT(mp, start);
996
997 /*
998 * Try to read extents from the first block indicated
999 * by fsbno to the end block of the file.
1000 */
1001 end = XFS_B_TO_FSB(mp, isize);
1002
1003 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1004 XFS_BMAPI_ENTIRE);
1005 if (error)
1006 goto out_unlock;
1007
1008 /*
1009 * Treat unwritten extent as data extent since it might
1010 * contains dirty data in page cache.
1011 */
1012 if (map[0].br_startblock != HOLESTARTBLOCK) {
1013 offset = max_t(loff_t, start,
1014 XFS_FSB_TO_B(mp, map[0].br_startoff));
1015 } else {
1016 if (nmap == 1) {
1017 error = ENXIO;
1018 goto out_unlock;
1019 }
1020
1021 offset = max_t(loff_t, start,
1022 XFS_FSB_TO_B(mp, map[1].br_startoff));
1023 }
1024
1025 if (offset != file->f_pos)
1026 file->f_pos = offset;
1027
1028out_unlock:
1029 xfs_iunlock_map_shared(ip, lock);
1030
1031 if (error)
1032 return -error;
1033 return offset;
1034}
1035
1036STATIC loff_t
1037xfs_seek_hole(
1038 struct file *file,
1039 loff_t start,
1040 u32 type)
1041{
1042 struct inode *inode = file->f_mapping->host;
1043 struct xfs_inode *ip = XFS_I(inode);
1044 struct xfs_mount *mp = ip->i_mount;
1045 loff_t uninitialized_var(offset);
1046 loff_t holeoff;
1047 xfs_fsize_t isize;
1048 xfs_fileoff_t fsbno;
1049 uint lock;
1050 int error;
1051
1052 if (XFS_FORCED_SHUTDOWN(mp))
1053 return -XFS_ERROR(EIO);
1054
1055 lock = xfs_ilock_map_shared(ip);
1056
1057 isize = i_size_read(inode);
1058 if (start >= isize) {
1059 error = ENXIO;
1060 goto out_unlock;
1061 }
1062
1063 fsbno = XFS_B_TO_FSBT(mp, start);
1064 error = xfs_bmap_first_unused(NULL, ip, 1, &fsbno, XFS_DATA_FORK);
1065 if (error)
1066 goto out_unlock;
1067
1068 holeoff = XFS_FSB_TO_B(mp, fsbno);
1069 if (holeoff <= start)
1070 offset = start;
1071 else {
1072 /*
1073 * xfs_bmap_first_unused() could return a value bigger than
1074 * isize if there are no more holes past the supplied offset.
1075 */
1076 offset = min_t(loff_t, holeoff, isize);
1077 }
1078
1079 if (offset != file->f_pos)
1080 file->f_pos = offset;
1081
1082out_unlock:
1083 xfs_iunlock_map_shared(ip, lock);
1084
1085 if (error)
1086 return -error;
1087 return offset;
1088}
1089
1090STATIC loff_t
1091xfs_file_llseek(
1092 struct file *file,
1093 loff_t offset,
1094 int origin)
1095{
1096 switch (origin) {
1097 case SEEK_END:
1098 case SEEK_CUR:
1099 case SEEK_SET:
1100 return generic_file_llseek(file, offset, origin);
1101 case SEEK_DATA:
1102 return xfs_seek_data(file, offset, origin);
1103 case SEEK_HOLE:
1104 return xfs_seek_hole(file, offset, origin);
1105 default:
1106 return -EINVAL;
1107 }
1108}
1109
1110const struct file_operations xfs_file_operations = {
1111 .llseek = xfs_file_llseek,
1112 .read = do_sync_read,
1113 .write = do_sync_write,
1114 .aio_read = xfs_file_aio_read,
1115 .aio_write = xfs_file_aio_write,
1116 .splice_read = xfs_file_splice_read,
1117 .splice_write = xfs_file_splice_write,
1118 .unlocked_ioctl = xfs_file_ioctl,
1119#ifdef CONFIG_COMPAT
1120 .compat_ioctl = xfs_file_compat_ioctl,
1121#endif
1122 .mmap = xfs_file_mmap,
1123 .open = xfs_file_open,
1124 .release = xfs_file_release,
1125 .fsync = xfs_file_fsync,
1126 .fallocate = xfs_file_fallocate,
1127};
1128
1129const struct file_operations xfs_dir_file_operations = {
1130 .open = xfs_dir_open,
1131 .read = generic_read_dir,
1132 .readdir = xfs_file_readdir,
1133 .llseek = generic_file_llseek,
1134 .unlocked_ioctl = xfs_file_ioctl,
1135#ifdef CONFIG_COMPAT
1136 .compat_ioctl = xfs_file_compat_ioctl,
1137#endif
1138 .fsync = xfs_dir_fsync,
1139};
1140
1141static const struct vm_operations_struct xfs_file_vm_ops = {
1142 .fault = filemap_fault,
1143 .page_mkwrite = xfs_vm_page_mkwrite,
1144};