Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/file.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/file.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * ext4 fs regular file handling primitives
17 *
18 * 64-bit file support on 64-bit platforms by Jakub Jelinek
19 * (jj@sunsite.ms.mff.cuni.cz)
20 */
21
22#include <linux/time.h>
23#include <linux/fs.h>
24#include <linux/iomap.h>
25#include <linux/mount.h>
26#include <linux/path.h>
27#include <linux/dax.h>
28#include <linux/quotaops.h>
29#include <linux/pagevec.h>
30#include <linux/uio.h>
31#include <linux/mman.h>
32#include <linux/backing-dev.h>
33#include "ext4.h"
34#include "ext4_jbd2.h"
35#include "xattr.h"
36#include "acl.h"
37#include "truncate.h"
38
39static bool ext4_dio_supported(struct inode *inode)
40{
41 if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
42 return false;
43 if (fsverity_active(inode))
44 return false;
45 if (ext4_should_journal_data(inode))
46 return false;
47 if (ext4_has_inline_data(inode))
48 return false;
49 return true;
50}
51
52static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
53{
54 ssize_t ret;
55 struct inode *inode = file_inode(iocb->ki_filp);
56
57 if (iocb->ki_flags & IOCB_NOWAIT) {
58 if (!inode_trylock_shared(inode))
59 return -EAGAIN;
60 } else {
61 inode_lock_shared(inode);
62 }
63
64 if (!ext4_dio_supported(inode)) {
65 inode_unlock_shared(inode);
66 /*
67 * Fallback to buffered I/O if the operation being performed on
68 * the inode is not supported by direct I/O. The IOCB_DIRECT
69 * flag needs to be cleared here in order to ensure that the
70 * direct I/O path within generic_file_read_iter() is not
71 * taken.
72 */
73 iocb->ki_flags &= ~IOCB_DIRECT;
74 return generic_file_read_iter(iocb, to);
75 }
76
77 ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
78 is_sync_kiocb(iocb));
79 inode_unlock_shared(inode);
80
81 file_accessed(iocb->ki_filp);
82 return ret;
83}
84
85#ifdef CONFIG_FS_DAX
86static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
87{
88 struct inode *inode = file_inode(iocb->ki_filp);
89 ssize_t ret;
90
91 if (iocb->ki_flags & IOCB_NOWAIT) {
92 if (!inode_trylock_shared(inode))
93 return -EAGAIN;
94 } else {
95 inode_lock_shared(inode);
96 }
97 /*
98 * Recheck under inode lock - at this point we are sure it cannot
99 * change anymore
100 */
101 if (!IS_DAX(inode)) {
102 inode_unlock_shared(inode);
103 /* Fallback to buffered IO in case we cannot support DAX */
104 return generic_file_read_iter(iocb, to);
105 }
106 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
107 inode_unlock_shared(inode);
108
109 file_accessed(iocb->ki_filp);
110 return ret;
111}
112#endif
113
114static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
115{
116 struct inode *inode = file_inode(iocb->ki_filp);
117
118 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
119 return -EIO;
120
121 if (!iov_iter_count(to))
122 return 0; /* skip atime */
123
124#ifdef CONFIG_FS_DAX
125 if (IS_DAX(inode))
126 return ext4_dax_read_iter(iocb, to);
127#endif
128 if (iocb->ki_flags & IOCB_DIRECT)
129 return ext4_dio_read_iter(iocb, to);
130
131 return generic_file_read_iter(iocb, to);
132}
133
134/*
135 * Called when an inode is released. Note that this is different
136 * from ext4_file_open: open gets called at every open, but release
137 * gets called only when /all/ the files are closed.
138 */
139static int ext4_release_file(struct inode *inode, struct file *filp)
140{
141 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
142 ext4_alloc_da_blocks(inode);
143 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
144 }
145 /* if we are the last writer on the inode, drop the block reservation */
146 if ((filp->f_mode & FMODE_WRITE) &&
147 (atomic_read(&inode->i_writecount) == 1) &&
148 !EXT4_I(inode)->i_reserved_data_blocks) {
149 down_write(&EXT4_I(inode)->i_data_sem);
150 ext4_discard_preallocations(inode, 0);
151 up_write(&EXT4_I(inode)->i_data_sem);
152 }
153 if (is_dx(inode) && filp->private_data)
154 ext4_htree_free_dir_info(filp->private_data);
155
156 return 0;
157}
158
159/*
160 * This tests whether the IO in question is block-aligned or not.
161 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
162 * are converted to written only after the IO is complete. Until they are
163 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
164 * it needs to zero out portions of the start and/or end block. If 2 AIO
165 * threads are at work on the same unwritten block, they must be synchronized
166 * or one thread will zero the other's data, causing corruption.
167 */
168static bool
169ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
170{
171 struct super_block *sb = inode->i_sb;
172 unsigned long blockmask = sb->s_blocksize - 1;
173
174 if ((pos | iov_iter_alignment(from)) & blockmask)
175 return true;
176
177 return false;
178}
179
180static bool
181ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
182{
183 if (offset + len > i_size_read(inode) ||
184 offset + len > EXT4_I(inode)->i_disksize)
185 return true;
186 return false;
187}
188
189/* Is IO overwriting allocated and initialized blocks? */
190static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
191{
192 struct ext4_map_blocks map;
193 unsigned int blkbits = inode->i_blkbits;
194 int err, blklen;
195
196 if (pos + len > i_size_read(inode))
197 return false;
198
199 map.m_lblk = pos >> blkbits;
200 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
201 blklen = map.m_len;
202
203 err = ext4_map_blocks(NULL, inode, &map, 0);
204 /*
205 * 'err==len' means that all of the blocks have been preallocated,
206 * regardless of whether they have been initialized or not. To exclude
207 * unwritten extents, we need to check m_flags.
208 */
209 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
210}
211
212static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
213 struct iov_iter *from)
214{
215 struct inode *inode = file_inode(iocb->ki_filp);
216 ssize_t ret;
217
218 if (unlikely(IS_IMMUTABLE(inode)))
219 return -EPERM;
220
221 ret = generic_write_checks(iocb, from);
222 if (ret <= 0)
223 return ret;
224
225 /*
226 * If we have encountered a bitmap-format file, the size limit
227 * is smaller than s_maxbytes, which is for extent-mapped files.
228 */
229 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
230 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
231
232 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
233 return -EFBIG;
234 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
235 }
236
237 return iov_iter_count(from);
238}
239
240static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
241{
242 ssize_t ret, count;
243
244 count = ext4_generic_write_checks(iocb, from);
245 if (count <= 0)
246 return count;
247
248 ret = file_modified(iocb->ki_filp);
249 if (ret)
250 return ret;
251 return count;
252}
253
254static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
255 struct iov_iter *from)
256{
257 ssize_t ret;
258 struct inode *inode = file_inode(iocb->ki_filp);
259
260 if (iocb->ki_flags & IOCB_NOWAIT)
261 return -EOPNOTSUPP;
262
263 inode_lock(inode);
264 ret = ext4_write_checks(iocb, from);
265 if (ret <= 0)
266 goto out;
267
268 current->backing_dev_info = inode_to_bdi(inode);
269 ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
270 current->backing_dev_info = NULL;
271
272out:
273 inode_unlock(inode);
274 if (likely(ret > 0)) {
275 iocb->ki_pos += ret;
276 ret = generic_write_sync(iocb, ret);
277 }
278
279 return ret;
280}
281
282static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
283 ssize_t written, size_t count)
284{
285 handle_t *handle;
286 bool truncate = false;
287 u8 blkbits = inode->i_blkbits;
288 ext4_lblk_t written_blk, end_blk;
289 int ret;
290
291 /*
292 * Note that EXT4_I(inode)->i_disksize can get extended up to
293 * inode->i_size while the I/O was running due to writeback of delalloc
294 * blocks. But, the code in ext4_iomap_alloc() is careful to use
295 * zeroed/unwritten extents if this is possible; thus we won't leave
296 * uninitialized blocks in a file even if we didn't succeed in writing
297 * as much as we intended.
298 */
299 WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
300 if (offset + count <= EXT4_I(inode)->i_disksize) {
301 /*
302 * We need to ensure that the inode is removed from the orphan
303 * list if it has been added prematurely, due to writeback of
304 * delalloc blocks.
305 */
306 if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
307 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
308
309 if (IS_ERR(handle)) {
310 ext4_orphan_del(NULL, inode);
311 return PTR_ERR(handle);
312 }
313
314 ext4_orphan_del(handle, inode);
315 ext4_journal_stop(handle);
316 }
317
318 return written;
319 }
320
321 if (written < 0)
322 goto truncate;
323
324 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
325 if (IS_ERR(handle)) {
326 written = PTR_ERR(handle);
327 goto truncate;
328 }
329
330 if (ext4_update_inode_size(inode, offset + written)) {
331 ret = ext4_mark_inode_dirty(handle, inode);
332 if (unlikely(ret)) {
333 written = ret;
334 ext4_journal_stop(handle);
335 goto truncate;
336 }
337 }
338
339 /*
340 * We may need to truncate allocated but not written blocks beyond EOF.
341 */
342 written_blk = ALIGN(offset + written, 1 << blkbits);
343 end_blk = ALIGN(offset + count, 1 << blkbits);
344 if (written_blk < end_blk && ext4_can_truncate(inode))
345 truncate = true;
346
347 /*
348 * Remove the inode from the orphan list if it has been extended and
349 * everything went OK.
350 */
351 if (!truncate && inode->i_nlink)
352 ext4_orphan_del(handle, inode);
353 ext4_journal_stop(handle);
354
355 if (truncate) {
356truncate:
357 ext4_truncate_failed_write(inode);
358 /*
359 * If the truncate operation failed early, then the inode may
360 * still be on the orphan list. In that case, we need to try
361 * remove the inode from the in-memory linked list.
362 */
363 if (inode->i_nlink)
364 ext4_orphan_del(NULL, inode);
365 }
366
367 return written;
368}
369
370static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
371 int error, unsigned int flags)
372{
373 loff_t offset = iocb->ki_pos;
374 struct inode *inode = file_inode(iocb->ki_filp);
375
376 if (error)
377 return error;
378
379 if (size && flags & IOMAP_DIO_UNWRITTEN)
380 return ext4_convert_unwritten_extents(NULL, inode,
381 offset, size);
382
383 return 0;
384}
385
386static const struct iomap_dio_ops ext4_dio_write_ops = {
387 .end_io = ext4_dio_write_end_io,
388};
389
390/*
391 * The intention here is to start with shared lock acquired then see if any
392 * condition requires an exclusive inode lock. If yes, then we restart the
393 * whole operation by releasing the shared lock and acquiring exclusive lock.
394 *
395 * - For unaligned_io we never take shared lock as it may cause data corruption
396 * when two unaligned IO tries to modify the same block e.g. while zeroing.
397 *
398 * - For extending writes case we don't take the shared lock, since it requires
399 * updating inode i_disksize and/or orphan handling with exclusive lock.
400 *
401 * - shared locking will only be true mostly with overwrites. Otherwise we will
402 * switch to exclusive i_rwsem lock.
403 */
404static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
405 bool *ilock_shared, bool *extend)
406{
407 struct file *file = iocb->ki_filp;
408 struct inode *inode = file_inode(file);
409 loff_t offset;
410 size_t count;
411 ssize_t ret;
412
413restart:
414 ret = ext4_generic_write_checks(iocb, from);
415 if (ret <= 0)
416 goto out;
417
418 offset = iocb->ki_pos;
419 count = ret;
420 if (ext4_extending_io(inode, offset, count))
421 *extend = true;
422 /*
423 * Determine whether the IO operation will overwrite allocated
424 * and initialized blocks.
425 * We need exclusive i_rwsem for changing security info
426 * in file_modified().
427 */
428 if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
429 !ext4_overwrite_io(inode, offset, count))) {
430 if (iocb->ki_flags & IOCB_NOWAIT) {
431 ret = -EAGAIN;
432 goto out;
433 }
434 inode_unlock_shared(inode);
435 *ilock_shared = false;
436 inode_lock(inode);
437 goto restart;
438 }
439
440 ret = file_modified(file);
441 if (ret < 0)
442 goto out;
443
444 return count;
445out:
446 if (*ilock_shared)
447 inode_unlock_shared(inode);
448 else
449 inode_unlock(inode);
450 return ret;
451}
452
453static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
454{
455 ssize_t ret;
456 handle_t *handle;
457 struct inode *inode = file_inode(iocb->ki_filp);
458 loff_t offset = iocb->ki_pos;
459 size_t count = iov_iter_count(from);
460 const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
461 bool extend = false, unaligned_io = false;
462 bool ilock_shared = true;
463
464 /*
465 * We initially start with shared inode lock unless it is
466 * unaligned IO which needs exclusive lock anyways.
467 */
468 if (ext4_unaligned_io(inode, from, offset)) {
469 unaligned_io = true;
470 ilock_shared = false;
471 }
472 /*
473 * Quick check here without any i_rwsem lock to see if it is extending
474 * IO. A more reliable check is done in ext4_dio_write_checks() with
475 * proper locking in place.
476 */
477 if (offset + count > i_size_read(inode))
478 ilock_shared = false;
479
480 if (iocb->ki_flags & IOCB_NOWAIT) {
481 if (ilock_shared) {
482 if (!inode_trylock_shared(inode))
483 return -EAGAIN;
484 } else {
485 if (!inode_trylock(inode))
486 return -EAGAIN;
487 }
488 } else {
489 if (ilock_shared)
490 inode_lock_shared(inode);
491 else
492 inode_lock(inode);
493 }
494
495 /* Fallback to buffered I/O if the inode does not support direct I/O. */
496 if (!ext4_dio_supported(inode)) {
497 if (ilock_shared)
498 inode_unlock_shared(inode);
499 else
500 inode_unlock(inode);
501 return ext4_buffered_write_iter(iocb, from);
502 }
503
504 ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
505 if (ret <= 0)
506 return ret;
507
508 /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
509 if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
510 ret = -EAGAIN;
511 goto out;
512 }
513
514 offset = iocb->ki_pos;
515 count = ret;
516
517 /*
518 * Unaligned direct IO must be serialized among each other as zeroing
519 * of partial blocks of two competing unaligned IOs can result in data
520 * corruption.
521 *
522 * So we make sure we don't allow any unaligned IO in flight.
523 * For IOs where we need not wait (like unaligned non-AIO DIO),
524 * below inode_dio_wait() may anyway become a no-op, since we start
525 * with exclusive lock.
526 */
527 if (unaligned_io)
528 inode_dio_wait(inode);
529
530 if (extend) {
531 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
532 if (IS_ERR(handle)) {
533 ret = PTR_ERR(handle);
534 goto out;
535 }
536
537 ret = ext4_orphan_add(handle, inode);
538 if (ret) {
539 ext4_journal_stop(handle);
540 goto out;
541 }
542
543 ext4_journal_stop(handle);
544 }
545
546 if (ilock_shared)
547 iomap_ops = &ext4_iomap_overwrite_ops;
548 ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
549 is_sync_kiocb(iocb) || unaligned_io || extend);
550 if (ret == -ENOTBLK)
551 ret = 0;
552
553 if (extend)
554 ret = ext4_handle_inode_extension(inode, offset, ret, count);
555
556out:
557 if (ilock_shared)
558 inode_unlock_shared(inode);
559 else
560 inode_unlock(inode);
561
562 if (ret >= 0 && iov_iter_count(from)) {
563 ssize_t err;
564 loff_t endbyte;
565
566 offset = iocb->ki_pos;
567 err = ext4_buffered_write_iter(iocb, from);
568 if (err < 0)
569 return err;
570
571 /*
572 * We need to ensure that the pages within the page cache for
573 * the range covered by this I/O are written to disk and
574 * invalidated. This is in attempt to preserve the expected
575 * direct I/O semantics in the case we fallback to buffered I/O
576 * to complete off the I/O request.
577 */
578 ret += err;
579 endbyte = offset + err - 1;
580 err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
581 offset, endbyte);
582 if (!err)
583 invalidate_mapping_pages(iocb->ki_filp->f_mapping,
584 offset >> PAGE_SHIFT,
585 endbyte >> PAGE_SHIFT);
586 }
587
588 return ret;
589}
590
591#ifdef CONFIG_FS_DAX
592static ssize_t
593ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
594{
595 ssize_t ret;
596 size_t count;
597 loff_t offset;
598 handle_t *handle;
599 bool extend = false;
600 struct inode *inode = file_inode(iocb->ki_filp);
601
602 if (iocb->ki_flags & IOCB_NOWAIT) {
603 if (!inode_trylock(inode))
604 return -EAGAIN;
605 } else {
606 inode_lock(inode);
607 }
608
609 ret = ext4_write_checks(iocb, from);
610 if (ret <= 0)
611 goto out;
612
613 offset = iocb->ki_pos;
614 count = iov_iter_count(from);
615
616 if (offset + count > EXT4_I(inode)->i_disksize) {
617 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
618 if (IS_ERR(handle)) {
619 ret = PTR_ERR(handle);
620 goto out;
621 }
622
623 ret = ext4_orphan_add(handle, inode);
624 if (ret) {
625 ext4_journal_stop(handle);
626 goto out;
627 }
628
629 extend = true;
630 ext4_journal_stop(handle);
631 }
632
633 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
634
635 if (extend)
636 ret = ext4_handle_inode_extension(inode, offset, ret, count);
637out:
638 inode_unlock(inode);
639 if (ret > 0)
640 ret = generic_write_sync(iocb, ret);
641 return ret;
642}
643#endif
644
645static ssize_t
646ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
647{
648 struct inode *inode = file_inode(iocb->ki_filp);
649
650 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
651 return -EIO;
652
653#ifdef CONFIG_FS_DAX
654 if (IS_DAX(inode))
655 return ext4_dax_write_iter(iocb, from);
656#endif
657 if (iocb->ki_flags & IOCB_DIRECT)
658 return ext4_dio_write_iter(iocb, from);
659
660 return ext4_buffered_write_iter(iocb, from);
661}
662
663#ifdef CONFIG_FS_DAX
664static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
665 enum page_entry_size pe_size)
666{
667 int error = 0;
668 vm_fault_t result;
669 int retries = 0;
670 handle_t *handle = NULL;
671 struct inode *inode = file_inode(vmf->vma->vm_file);
672 struct super_block *sb = inode->i_sb;
673
674 /*
675 * We have to distinguish real writes from writes which will result in a
676 * COW page; COW writes should *not* poke the journal (the file will not
677 * be changed). Doing so would cause unintended failures when mounted
678 * read-only.
679 *
680 * We check for VM_SHARED rather than vmf->cow_page since the latter is
681 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
682 * other sizes, dax_iomap_fault will handle splitting / fallback so that
683 * we eventually come back with a COW page.
684 */
685 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
686 (vmf->vma->vm_flags & VM_SHARED);
687 pfn_t pfn;
688
689 if (write) {
690 sb_start_pagefault(sb);
691 file_update_time(vmf->vma->vm_file);
692 down_read(&EXT4_I(inode)->i_mmap_sem);
693retry:
694 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
695 EXT4_DATA_TRANS_BLOCKS(sb));
696 if (IS_ERR(handle)) {
697 up_read(&EXT4_I(inode)->i_mmap_sem);
698 sb_end_pagefault(sb);
699 return VM_FAULT_SIGBUS;
700 }
701 } else {
702 down_read(&EXT4_I(inode)->i_mmap_sem);
703 }
704 result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
705 if (write) {
706 ext4_journal_stop(handle);
707
708 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
709 ext4_should_retry_alloc(sb, &retries))
710 goto retry;
711 /* Handling synchronous page fault? */
712 if (result & VM_FAULT_NEEDDSYNC)
713 result = dax_finish_sync_fault(vmf, pe_size, pfn);
714 up_read(&EXT4_I(inode)->i_mmap_sem);
715 sb_end_pagefault(sb);
716 } else {
717 up_read(&EXT4_I(inode)->i_mmap_sem);
718 }
719
720 return result;
721}
722
723static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
724{
725 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
726}
727
728static const struct vm_operations_struct ext4_dax_vm_ops = {
729 .fault = ext4_dax_fault,
730 .huge_fault = ext4_dax_huge_fault,
731 .page_mkwrite = ext4_dax_fault,
732 .pfn_mkwrite = ext4_dax_fault,
733};
734#else
735#define ext4_dax_vm_ops ext4_file_vm_ops
736#endif
737
738static const struct vm_operations_struct ext4_file_vm_ops = {
739 .fault = ext4_filemap_fault,
740 .map_pages = filemap_map_pages,
741 .page_mkwrite = ext4_page_mkwrite,
742};
743
744static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
745{
746 struct inode *inode = file->f_mapping->host;
747 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
748 struct dax_device *dax_dev = sbi->s_daxdev;
749
750 if (unlikely(ext4_forced_shutdown(sbi)))
751 return -EIO;
752
753 /*
754 * We don't support synchronous mappings for non-DAX files and
755 * for DAX files if underneath dax_device is not synchronous.
756 */
757 if (!daxdev_mapping_supported(vma, dax_dev))
758 return -EOPNOTSUPP;
759
760 file_accessed(file);
761 if (IS_DAX(file_inode(file))) {
762 vma->vm_ops = &ext4_dax_vm_ops;
763 vma->vm_flags |= VM_HUGEPAGE;
764 } else {
765 vma->vm_ops = &ext4_file_vm_ops;
766 }
767 return 0;
768}
769
770static int ext4_sample_last_mounted(struct super_block *sb,
771 struct vfsmount *mnt)
772{
773 struct ext4_sb_info *sbi = EXT4_SB(sb);
774 struct path path;
775 char buf[64], *cp;
776 handle_t *handle;
777 int err;
778
779 if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
780 return 0;
781
782 if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
783 return 0;
784
785 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
786 /*
787 * Sample where the filesystem has been mounted and
788 * store it in the superblock for sysadmin convenience
789 * when trying to sort through large numbers of block
790 * devices or filesystem images.
791 */
792 memset(buf, 0, sizeof(buf));
793 path.mnt = mnt;
794 path.dentry = mnt->mnt_root;
795 cp = d_path(&path, buf, sizeof(buf));
796 err = 0;
797 if (IS_ERR(cp))
798 goto out;
799
800 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
801 err = PTR_ERR(handle);
802 if (IS_ERR(handle))
803 goto out;
804 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
805 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
806 if (err)
807 goto out_journal;
808 strlcpy(sbi->s_es->s_last_mounted, cp,
809 sizeof(sbi->s_es->s_last_mounted));
810 ext4_handle_dirty_super(handle, sb);
811out_journal:
812 ext4_journal_stop(handle);
813out:
814 sb_end_intwrite(sb);
815 return err;
816}
817
818static int ext4_file_open(struct inode *inode, struct file *filp)
819{
820 int ret;
821
822 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
823 return -EIO;
824
825 ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
826 if (ret)
827 return ret;
828
829 ret = fscrypt_file_open(inode, filp);
830 if (ret)
831 return ret;
832
833 ret = fsverity_file_open(inode, filp);
834 if (ret)
835 return ret;
836
837 /*
838 * Set up the jbd2_inode if we are opening the inode for
839 * writing and the journal is present
840 */
841 if (filp->f_mode & FMODE_WRITE) {
842 ret = ext4_inode_attach_jinode(inode);
843 if (ret < 0)
844 return ret;
845 }
846
847 filp->f_mode |= FMODE_NOWAIT;
848 return dquot_file_open(inode, filp);
849}
850
851/*
852 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
853 * by calling generic_file_llseek_size() with the appropriate maxbytes
854 * value for each.
855 */
856loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
857{
858 struct inode *inode = file->f_mapping->host;
859 loff_t maxbytes;
860
861 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
862 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
863 else
864 maxbytes = inode->i_sb->s_maxbytes;
865
866 switch (whence) {
867 default:
868 return generic_file_llseek_size(file, offset, whence,
869 maxbytes, i_size_read(inode));
870 case SEEK_HOLE:
871 inode_lock_shared(inode);
872 offset = iomap_seek_hole(inode, offset,
873 &ext4_iomap_report_ops);
874 inode_unlock_shared(inode);
875 break;
876 case SEEK_DATA:
877 inode_lock_shared(inode);
878 offset = iomap_seek_data(inode, offset,
879 &ext4_iomap_report_ops);
880 inode_unlock_shared(inode);
881 break;
882 }
883
884 if (offset < 0)
885 return offset;
886 return vfs_setpos(file, offset, maxbytes);
887}
888
889const struct file_operations ext4_file_operations = {
890 .llseek = ext4_llseek,
891 .read_iter = ext4_file_read_iter,
892 .write_iter = ext4_file_write_iter,
893 .iopoll = iomap_dio_iopoll,
894 .unlocked_ioctl = ext4_ioctl,
895#ifdef CONFIG_COMPAT
896 .compat_ioctl = ext4_compat_ioctl,
897#endif
898 .mmap = ext4_file_mmap,
899 .mmap_supported_flags = MAP_SYNC,
900 .open = ext4_file_open,
901 .release = ext4_release_file,
902 .fsync = ext4_sync_file,
903 .get_unmapped_area = thp_get_unmapped_area,
904 .splice_read = generic_file_splice_read,
905 .splice_write = iter_file_splice_write,
906 .fallocate = ext4_fallocate,
907};
908
909const struct inode_operations ext4_file_inode_operations = {
910 .setattr = ext4_setattr,
911 .getattr = ext4_file_getattr,
912 .listxattr = ext4_listxattr,
913 .get_acl = ext4_get_acl,
914 .set_acl = ext4_set_acl,
915 .fiemap = ext4_fiemap,
916};
917
1/*
2 * linux/fs/ext4/file.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * ext4 fs regular file handling primitives
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
23#include <linux/jbd2.h>
24#include <linux/mount.h>
25#include <linux/path.h>
26#include <linux/aio.h>
27#include <linux/quotaops.h>
28#include <linux/pagevec.h>
29#include "ext4.h"
30#include "ext4_jbd2.h"
31#include "xattr.h"
32#include "acl.h"
33
34/*
35 * Called when an inode is released. Note that this is different
36 * from ext4_file_open: open gets called at every open, but release
37 * gets called only when /all/ the files are closed.
38 */
39static int ext4_release_file(struct inode *inode, struct file *filp)
40{
41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 ext4_alloc_da_blocks(inode);
43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44 }
45 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
47 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
49 {
50 down_write(&EXT4_I(inode)->i_data_sem);
51 ext4_discard_preallocations(inode);
52 up_write(&EXT4_I(inode)->i_data_sem);
53 }
54 if (is_dx(inode) && filp->private_data)
55 ext4_htree_free_dir_info(filp->private_data);
56
57 return 0;
58}
59
60void ext4_unwritten_wait(struct inode *inode)
61{
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65}
66
67/*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76static int
77ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
78 unsigned long nr_segs, loff_t pos)
79{
80 struct super_block *sb = inode->i_sb;
81 int blockmask = sb->s_blocksize - 1;
82 size_t count = iov_length(iov, nr_segs);
83 loff_t final_size = pos + count;
84
85 if (pos >= i_size_read(inode))
86 return 0;
87
88 if ((pos & blockmask) || (final_size & blockmask))
89 return 1;
90
91 return 0;
92}
93
94static ssize_t
95ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
96 unsigned long nr_segs, loff_t pos)
97{
98 struct file *file = iocb->ki_filp;
99 struct inode *inode = file->f_mapping->host;
100 struct blk_plug plug;
101 int unaligned_aio = 0;
102 ssize_t ret;
103 int overwrite = 0;
104 size_t length = iov_length(iov, nr_segs);
105
106 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
107 !is_sync_kiocb(iocb))
108 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
109
110 /* Unaligned direct AIO must be serialized; see comment above */
111 if (unaligned_aio) {
112 mutex_lock(ext4_aio_mutex(inode));
113 ext4_unwritten_wait(inode);
114 }
115
116 BUG_ON(iocb->ki_pos != pos);
117
118 mutex_lock(&inode->i_mutex);
119 blk_start_plug(&plug);
120
121 iocb->private = &overwrite;
122
123 /* check whether we do a DIO overwrite or not */
124 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
125 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
126 struct ext4_map_blocks map;
127 unsigned int blkbits = inode->i_blkbits;
128 int err, len;
129
130 map.m_lblk = pos >> blkbits;
131 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
132 - map.m_lblk;
133 len = map.m_len;
134
135 err = ext4_map_blocks(NULL, inode, &map, 0);
136 /*
137 * 'err==len' means that all of blocks has been preallocated no
138 * matter they are initialized or not. For excluding
139 * uninitialized extents, we need to check m_flags. There are
140 * two conditions that indicate for initialized extents.
141 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
142 * 2) If we do a real lookup, non-flags are returned.
143 * So we should check these two conditions.
144 */
145 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
146 overwrite = 1;
147 }
148
149 ret = __generic_file_aio_write(iocb, iov, nr_segs);
150 mutex_unlock(&inode->i_mutex);
151
152 if (ret > 0) {
153 ssize_t err;
154
155 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
156 if (err < 0)
157 ret = err;
158 }
159 blk_finish_plug(&plug);
160
161 if (unaligned_aio)
162 mutex_unlock(ext4_aio_mutex(inode));
163
164 return ret;
165}
166
167static ssize_t
168ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
169 unsigned long nr_segs, loff_t pos)
170{
171 struct inode *inode = file_inode(iocb->ki_filp);
172 ssize_t ret;
173
174 /*
175 * If we have encountered a bitmap-format file, the size limit
176 * is smaller than s_maxbytes, which is for extent-mapped files.
177 */
178
179 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
180 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
181 size_t length = iov_length(iov, nr_segs);
182
183 if ((pos > sbi->s_bitmap_maxbytes ||
184 (pos == sbi->s_bitmap_maxbytes && length > 0)))
185 return -EFBIG;
186
187 if (pos + length > sbi->s_bitmap_maxbytes) {
188 nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
189 sbi->s_bitmap_maxbytes - pos);
190 }
191 }
192
193 if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
194 ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
195 else
196 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
197
198 return ret;
199}
200
201static const struct vm_operations_struct ext4_file_vm_ops = {
202 .fault = filemap_fault,
203 .map_pages = filemap_map_pages,
204 .page_mkwrite = ext4_page_mkwrite,
205 .remap_pages = generic_file_remap_pages,
206};
207
208static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
209{
210 struct address_space *mapping = file->f_mapping;
211
212 if (!mapping->a_ops->readpage)
213 return -ENOEXEC;
214 file_accessed(file);
215 vma->vm_ops = &ext4_file_vm_ops;
216 return 0;
217}
218
219static int ext4_file_open(struct inode * inode, struct file * filp)
220{
221 struct super_block *sb = inode->i_sb;
222 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
223 struct vfsmount *mnt = filp->f_path.mnt;
224 struct path path;
225 char buf[64], *cp;
226
227 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
228 !(sb->s_flags & MS_RDONLY))) {
229 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
230 /*
231 * Sample where the filesystem has been mounted and
232 * store it in the superblock for sysadmin convenience
233 * when trying to sort through large numbers of block
234 * devices or filesystem images.
235 */
236 memset(buf, 0, sizeof(buf));
237 path.mnt = mnt;
238 path.dentry = mnt->mnt_root;
239 cp = d_path(&path, buf, sizeof(buf));
240 if (!IS_ERR(cp)) {
241 handle_t *handle;
242 int err;
243
244 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
245 if (IS_ERR(handle))
246 return PTR_ERR(handle);
247 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
248 if (err) {
249 ext4_journal_stop(handle);
250 return err;
251 }
252 strlcpy(sbi->s_es->s_last_mounted, cp,
253 sizeof(sbi->s_es->s_last_mounted));
254 ext4_handle_dirty_super(handle, sb);
255 ext4_journal_stop(handle);
256 }
257 }
258 /*
259 * Set up the jbd2_inode if we are opening the inode for
260 * writing and the journal is present
261 */
262 if (filp->f_mode & FMODE_WRITE) {
263 int ret = ext4_inode_attach_jinode(inode);
264 if (ret < 0)
265 return ret;
266 }
267 return dquot_file_open(inode, filp);
268}
269
270/*
271 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
272 * file rather than ext4_ext_walk_space() because we can introduce
273 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
274 * function. When extent status tree has been fully implemented, it will
275 * track all extent status for a file and we can directly use it to
276 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
277 */
278
279/*
280 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
281 * lookup page cache to check whether or not there has some data between
282 * [startoff, endoff] because, if this range contains an unwritten extent,
283 * we determine this extent as a data or a hole according to whether the
284 * page cache has data or not.
285 */
286static int ext4_find_unwritten_pgoff(struct inode *inode,
287 int whence,
288 struct ext4_map_blocks *map,
289 loff_t *offset)
290{
291 struct pagevec pvec;
292 unsigned int blkbits;
293 pgoff_t index;
294 pgoff_t end;
295 loff_t endoff;
296 loff_t startoff;
297 loff_t lastoff;
298 int found = 0;
299
300 blkbits = inode->i_sb->s_blocksize_bits;
301 startoff = *offset;
302 lastoff = startoff;
303 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
304
305 index = startoff >> PAGE_CACHE_SHIFT;
306 end = endoff >> PAGE_CACHE_SHIFT;
307
308 pagevec_init(&pvec, 0);
309 do {
310 int i, num;
311 unsigned long nr_pages;
312
313 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
314 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
315 (pgoff_t)num);
316 if (nr_pages == 0) {
317 if (whence == SEEK_DATA)
318 break;
319
320 BUG_ON(whence != SEEK_HOLE);
321 /*
322 * If this is the first time to go into the loop and
323 * offset is not beyond the end offset, it will be a
324 * hole at this offset
325 */
326 if (lastoff == startoff || lastoff < endoff)
327 found = 1;
328 break;
329 }
330
331 /*
332 * If this is the first time to go into the loop and
333 * offset is smaller than the first page offset, it will be a
334 * hole at this offset.
335 */
336 if (lastoff == startoff && whence == SEEK_HOLE &&
337 lastoff < page_offset(pvec.pages[0])) {
338 found = 1;
339 break;
340 }
341
342 for (i = 0; i < nr_pages; i++) {
343 struct page *page = pvec.pages[i];
344 struct buffer_head *bh, *head;
345
346 /*
347 * If the current offset is not beyond the end of given
348 * range, it will be a hole.
349 */
350 if (lastoff < endoff && whence == SEEK_HOLE &&
351 page->index > end) {
352 found = 1;
353 *offset = lastoff;
354 goto out;
355 }
356
357 lock_page(page);
358
359 if (unlikely(page->mapping != inode->i_mapping)) {
360 unlock_page(page);
361 continue;
362 }
363
364 if (!page_has_buffers(page)) {
365 unlock_page(page);
366 continue;
367 }
368
369 if (page_has_buffers(page)) {
370 lastoff = page_offset(page);
371 bh = head = page_buffers(page);
372 do {
373 if (buffer_uptodate(bh) ||
374 buffer_unwritten(bh)) {
375 if (whence == SEEK_DATA)
376 found = 1;
377 } else {
378 if (whence == SEEK_HOLE)
379 found = 1;
380 }
381 if (found) {
382 *offset = max_t(loff_t,
383 startoff, lastoff);
384 unlock_page(page);
385 goto out;
386 }
387 lastoff += bh->b_size;
388 bh = bh->b_this_page;
389 } while (bh != head);
390 }
391
392 lastoff = page_offset(page) + PAGE_SIZE;
393 unlock_page(page);
394 }
395
396 /*
397 * The no. of pages is less than our desired, that would be a
398 * hole in there.
399 */
400 if (nr_pages < num && whence == SEEK_HOLE) {
401 found = 1;
402 *offset = lastoff;
403 break;
404 }
405
406 index = pvec.pages[i - 1]->index + 1;
407 pagevec_release(&pvec);
408 } while (index <= end);
409
410out:
411 pagevec_release(&pvec);
412 return found;
413}
414
415/*
416 * ext4_seek_data() retrieves the offset for SEEK_DATA.
417 */
418static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
419{
420 struct inode *inode = file->f_mapping->host;
421 struct ext4_map_blocks map;
422 struct extent_status es;
423 ext4_lblk_t start, last, end;
424 loff_t dataoff, isize;
425 int blkbits;
426 int ret = 0;
427
428 mutex_lock(&inode->i_mutex);
429
430 isize = i_size_read(inode);
431 if (offset >= isize) {
432 mutex_unlock(&inode->i_mutex);
433 return -ENXIO;
434 }
435
436 blkbits = inode->i_sb->s_blocksize_bits;
437 start = offset >> blkbits;
438 last = start;
439 end = isize >> blkbits;
440 dataoff = offset;
441
442 do {
443 map.m_lblk = last;
444 map.m_len = end - last + 1;
445 ret = ext4_map_blocks(NULL, inode, &map, 0);
446 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
447 if (last != start)
448 dataoff = (loff_t)last << blkbits;
449 break;
450 }
451
452 /*
453 * If there is a delay extent at this offset,
454 * it will be as a data.
455 */
456 ext4_es_find_delayed_extent_range(inode, last, last, &es);
457 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
458 if (last != start)
459 dataoff = (loff_t)last << blkbits;
460 break;
461 }
462
463 /*
464 * If there is a unwritten extent at this offset,
465 * it will be as a data or a hole according to page
466 * cache that has data or not.
467 */
468 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
469 int unwritten;
470 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
471 &map, &dataoff);
472 if (unwritten)
473 break;
474 }
475
476 last++;
477 dataoff = (loff_t)last << blkbits;
478 } while (last <= end);
479
480 mutex_unlock(&inode->i_mutex);
481
482 if (dataoff > isize)
483 return -ENXIO;
484
485 return vfs_setpos(file, dataoff, maxsize);
486}
487
488/*
489 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
490 */
491static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
492{
493 struct inode *inode = file->f_mapping->host;
494 struct ext4_map_blocks map;
495 struct extent_status es;
496 ext4_lblk_t start, last, end;
497 loff_t holeoff, isize;
498 int blkbits;
499 int ret = 0;
500
501 mutex_lock(&inode->i_mutex);
502
503 isize = i_size_read(inode);
504 if (offset >= isize) {
505 mutex_unlock(&inode->i_mutex);
506 return -ENXIO;
507 }
508
509 blkbits = inode->i_sb->s_blocksize_bits;
510 start = offset >> blkbits;
511 last = start;
512 end = isize >> blkbits;
513 holeoff = offset;
514
515 do {
516 map.m_lblk = last;
517 map.m_len = end - last + 1;
518 ret = ext4_map_blocks(NULL, inode, &map, 0);
519 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
520 last += ret;
521 holeoff = (loff_t)last << blkbits;
522 continue;
523 }
524
525 /*
526 * If there is a delay extent at this offset,
527 * we will skip this extent.
528 */
529 ext4_es_find_delayed_extent_range(inode, last, last, &es);
530 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
531 last = es.es_lblk + es.es_len;
532 holeoff = (loff_t)last << blkbits;
533 continue;
534 }
535
536 /*
537 * If there is a unwritten extent at this offset,
538 * it will be as a data or a hole according to page
539 * cache that has data or not.
540 */
541 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
542 int unwritten;
543 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
544 &map, &holeoff);
545 if (!unwritten) {
546 last += ret;
547 holeoff = (loff_t)last << blkbits;
548 continue;
549 }
550 }
551
552 /* find a hole */
553 break;
554 } while (last <= end);
555
556 mutex_unlock(&inode->i_mutex);
557
558 if (holeoff > isize)
559 holeoff = isize;
560
561 return vfs_setpos(file, holeoff, maxsize);
562}
563
564/*
565 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
566 * by calling generic_file_llseek_size() with the appropriate maxbytes
567 * value for each.
568 */
569loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
570{
571 struct inode *inode = file->f_mapping->host;
572 loff_t maxbytes;
573
574 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
575 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
576 else
577 maxbytes = inode->i_sb->s_maxbytes;
578
579 switch (whence) {
580 case SEEK_SET:
581 case SEEK_CUR:
582 case SEEK_END:
583 return generic_file_llseek_size(file, offset, whence,
584 maxbytes, i_size_read(inode));
585 case SEEK_DATA:
586 return ext4_seek_data(file, offset, maxbytes);
587 case SEEK_HOLE:
588 return ext4_seek_hole(file, offset, maxbytes);
589 }
590
591 return -EINVAL;
592}
593
594const struct file_operations ext4_file_operations = {
595 .llseek = ext4_llseek,
596 .read = do_sync_read,
597 .write = do_sync_write,
598 .aio_read = generic_file_aio_read,
599 .aio_write = ext4_file_write,
600 .unlocked_ioctl = ext4_ioctl,
601#ifdef CONFIG_COMPAT
602 .compat_ioctl = ext4_compat_ioctl,
603#endif
604 .mmap = ext4_file_mmap,
605 .open = ext4_file_open,
606 .release = ext4_release_file,
607 .fsync = ext4_sync_file,
608 .splice_read = generic_file_splice_read,
609 .splice_write = generic_file_splice_write,
610 .fallocate = ext4_fallocate,
611};
612
613const struct inode_operations ext4_file_inode_operations = {
614 .setattr = ext4_setattr,
615 .getattr = ext4_getattr,
616 .setxattr = generic_setxattr,
617 .getxattr = generic_getxattr,
618 .listxattr = ext4_listxattr,
619 .removexattr = generic_removexattr,
620 .get_acl = ext4_get_acl,
621 .set_acl = ext4_set_acl,
622 .fiemap = ext4_fiemap,
623};
624