Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/ext4/file.c
  4 *
  5 * Copyright (C) 1992, 1993, 1994, 1995
  6 * Remy Card (card@masi.ibp.fr)
  7 * Laboratoire MASI - Institut Blaise Pascal
  8 * Universite Pierre et Marie Curie (Paris VI)
  9 *
 10 *  from
 11 *
 12 *  linux/fs/minix/file.c
 13 *
 14 *  Copyright (C) 1991, 1992  Linus Torvalds
 15 *
 16 *  ext4 fs regular file handling primitives
 17 *
 18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 19 *	(jj@sunsite.ms.mff.cuni.cz)
 20 */
 21
 22#include <linux/time.h>
 23#include <linux/fs.h>
 24#include <linux/iomap.h>
 25#include <linux/mount.h>
 26#include <linux/path.h>
 27#include <linux/dax.h>
 28#include <linux/quotaops.h>
 29#include <linux/pagevec.h>
 30#include <linux/uio.h>
 31#include <linux/mman.h>
 32#include <linux/backing-dev.h>
 33#include "ext4.h"
 34#include "ext4_jbd2.h"
 35#include "xattr.h"
 36#include "acl.h"
 37#include "truncate.h"
 38
 39/*
 40 * Returns %true if the given DIO request should be attempted with DIO, or
 41 * %false if it should fall back to buffered I/O.
 42 *
 43 * DIO isn't well specified; when it's unsupported (either due to the request
 44 * being misaligned, or due to the file not supporting DIO at all), filesystems
 45 * either fall back to buffered I/O or return EINVAL.  For files that don't use
 46 * any special features like encryption or verity, ext4 has traditionally
 47 * returned EINVAL for misaligned DIO.  iomap_dio_rw() uses this convention too.
 48 * In this case, we should attempt the DIO, *not* fall back to buffered I/O.
 49 *
 50 * In contrast, in cases where DIO is unsupported due to ext4 features, ext4
 51 * traditionally falls back to buffered I/O.
 52 *
 53 * This function implements the traditional ext4 behavior in all these cases.
 54 */
 55static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter)
 56{
 57	struct inode *inode = file_inode(iocb->ki_filp);
 58	u32 dio_align = ext4_dio_alignment(inode);
 59
 60	if (dio_align == 0)
 61		return false;
 62
 63	if (dio_align == 1)
 64		return true;
 65
 66	return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align);
 
 
 67}
 68
 69static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
 70{
 71	ssize_t ret;
 72	struct inode *inode = file_inode(iocb->ki_filp);
 73
 74	if (iocb->ki_flags & IOCB_NOWAIT) {
 75		if (!inode_trylock_shared(inode))
 76			return -EAGAIN;
 77	} else {
 78		inode_lock_shared(inode);
 79	}
 80
 81	if (!ext4_should_use_dio(iocb, to)) {
 82		inode_unlock_shared(inode);
 83		/*
 84		 * Fallback to buffered I/O if the operation being performed on
 85		 * the inode is not supported by direct I/O. The IOCB_DIRECT
 86		 * flag needs to be cleared here in order to ensure that the
 87		 * direct I/O path within generic_file_read_iter() is not
 88		 * taken.
 89		 */
 90		iocb->ki_flags &= ~IOCB_DIRECT;
 91		return generic_file_read_iter(iocb, to);
 92	}
 93
 94	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0);
 95	inode_unlock_shared(inode);
 96
 97	file_accessed(iocb->ki_filp);
 98	return ret;
 99}
100
101#ifdef CONFIG_FS_DAX
102static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
103{
104	struct inode *inode = file_inode(iocb->ki_filp);
105	ssize_t ret;
106
107	if (iocb->ki_flags & IOCB_NOWAIT) {
108		if (!inode_trylock_shared(inode))
109			return -EAGAIN;
110	} else {
111		inode_lock_shared(inode);
112	}
113	/*
114	 * Recheck under inode lock - at this point we are sure it cannot
115	 * change anymore
116	 */
117	if (!IS_DAX(inode)) {
118		inode_unlock_shared(inode);
119		/* Fallback to buffered IO in case we cannot support DAX */
120		return generic_file_read_iter(iocb, to);
121	}
122	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
123	inode_unlock_shared(inode);
124
125	file_accessed(iocb->ki_filp);
126	return ret;
127}
128#endif
129
130static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
131{
132	struct inode *inode = file_inode(iocb->ki_filp);
133
134	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
135		return -EIO;
136
137	if (!iov_iter_count(to))
138		return 0; /* skip atime */
139
140#ifdef CONFIG_FS_DAX
141	if (IS_DAX(inode))
142		return ext4_dax_read_iter(iocb, to);
143#endif
144	if (iocb->ki_flags & IOCB_DIRECT)
145		return ext4_dio_read_iter(iocb, to);
146
147	return generic_file_read_iter(iocb, to);
148}
149
150static ssize_t ext4_file_splice_read(struct file *in, loff_t *ppos,
151				     struct pipe_inode_info *pipe,
152				     size_t len, unsigned int flags)
153{
154	struct inode *inode = file_inode(in);
155
156	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
157		return -EIO;
158	return filemap_splice_read(in, ppos, pipe, len, flags);
159}
160
161/*
162 * Called when an inode is released. Note that this is different
163 * from ext4_file_open: open gets called at every open, but release
164 * gets called only when /all/ the files are closed.
165 */
166static int ext4_release_file(struct inode *inode, struct file *filp)
167{
168	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
169		ext4_alloc_da_blocks(inode);
170		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
171	}
172	/* if we are the last writer on the inode, drop the block reservation */
173	if ((filp->f_mode & FMODE_WRITE) &&
174			(atomic_read(&inode->i_writecount) == 1) &&
175			!EXT4_I(inode)->i_reserved_data_blocks) {
176		down_write(&EXT4_I(inode)->i_data_sem);
177		ext4_discard_preallocations(inode);
178		up_write(&EXT4_I(inode)->i_data_sem);
179	}
180	if (is_dx(inode) && filp->private_data)
181		ext4_htree_free_dir_info(filp->private_data);
182
183	return 0;
184}
185
186/*
187 * This tests whether the IO in question is block-aligned or not.
188 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
189 * are converted to written only after the IO is complete.  Until they are
190 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
191 * it needs to zero out portions of the start and/or end block.  If 2 AIO
192 * threads are at work on the same unwritten block, they must be synchronized
193 * or one thread will zero the other's data, causing corruption.
194 */
195static bool
196ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
197{
198	struct super_block *sb = inode->i_sb;
199	unsigned long blockmask = sb->s_blocksize - 1;
200
201	if ((pos | iov_iter_alignment(from)) & blockmask)
202		return true;
203
204	return false;
205}
206
207static bool
208ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
209{
210	if (offset + len > i_size_read(inode) ||
211	    offset + len > EXT4_I(inode)->i_disksize)
212		return true;
213	return false;
214}
215
216/* Is IO overwriting allocated or initialized blocks? */
217static bool ext4_overwrite_io(struct inode *inode,
218			      loff_t pos, loff_t len, bool *unwritten)
219{
220	struct ext4_map_blocks map;
221	unsigned int blkbits = inode->i_blkbits;
222	int err, blklen;
223
224	if (pos + len > i_size_read(inode))
225		return false;
226
227	map.m_lblk = pos >> blkbits;
228	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
229	blklen = map.m_len;
230
231	err = ext4_map_blocks(NULL, inode, &map, 0);
232	if (err != blklen)
233		return false;
234	/*
235	 * 'err==len' means that all of the blocks have been preallocated,
236	 * regardless of whether they have been initialized or not. We need to
237	 * check m_flags to distinguish the unwritten extents.
238	 */
239	*unwritten = !(map.m_flags & EXT4_MAP_MAPPED);
240	return true;
241}
242
243static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
244					 struct iov_iter *from)
245{
246	struct inode *inode = file_inode(iocb->ki_filp);
247	ssize_t ret;
248
249	if (unlikely(IS_IMMUTABLE(inode)))
250		return -EPERM;
251
252	ret = generic_write_checks(iocb, from);
253	if (ret <= 0)
254		return ret;
255
256	/*
257	 * If we have encountered a bitmap-format file, the size limit
258	 * is smaller than s_maxbytes, which is for extent-mapped files.
259	 */
260	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
261		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
262
263		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
264			return -EFBIG;
265		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
266	}
267
268	return iov_iter_count(from);
269}
270
271static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
272{
273	ssize_t ret, count;
274
275	count = ext4_generic_write_checks(iocb, from);
276	if (count <= 0)
277		return count;
278
279	ret = file_modified(iocb->ki_filp);
280	if (ret)
281		return ret;
282	return count;
283}
284
285static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
286					struct iov_iter *from)
287{
288	ssize_t ret;
289	struct inode *inode = file_inode(iocb->ki_filp);
290
291	if (iocb->ki_flags & IOCB_NOWAIT)
292		return -EOPNOTSUPP;
293
 
294	inode_lock(inode);
295	ret = ext4_write_checks(iocb, from);
296	if (ret <= 0)
297		goto out;
298
299	ret = generic_perform_write(iocb, from);
 
 
300
301out:
302	inode_unlock(inode);
303	if (unlikely(ret <= 0))
304		return ret;
305	return generic_write_sync(iocb, ret);
 
 
 
 
306}
307
308static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
309					   ssize_t written, ssize_t count)
310{
311	handle_t *handle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
313	lockdep_assert_held_write(&inode->i_rwsem);
314	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
315	if (IS_ERR(handle))
316		return PTR_ERR(handle);
 
 
317
318	if (ext4_update_inode_size(inode, offset + written)) {
319		int ret = ext4_mark_inode_dirty(handle, inode);
320		if (unlikely(ret)) {
 
321			ext4_journal_stop(handle);
322			return ret;
323		}
324	}
325
326	if ((written == count) && inode->i_nlink)
 
 
 
 
 
 
 
 
 
 
 
 
327		ext4_orphan_del(handle, inode);
328	ext4_journal_stop(handle);
329
330	return written;
331}
332
333/*
334 * Clean up the inode after DIO or DAX extending write has completed and the
335 * inode size has been updated using ext4_handle_inode_extension().
336 */
337static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
338{
339	lockdep_assert_held_write(&inode->i_rwsem);
340	if (need_trunc) {
341		ext4_truncate_failed_write(inode);
342		/*
343		 * If the truncate operation failed early, then the inode may
344		 * still be on the orphan list. In that case, we need to try
345		 * remove the inode from the in-memory linked list.
346		 */
347		if (inode->i_nlink)
348			ext4_orphan_del(NULL, inode);
349		return;
350	}
351	/*
352	 * If i_disksize got extended either due to writeback of delalloc
353	 * blocks or extending truncate while the DIO was running we could fail
354	 * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
355	 * now.
356	 */
357	if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
358		handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
359
360		if (IS_ERR(handle)) {
361			/*
362			 * The write has successfully completed. Not much to
363			 * do with the error here so just cleanup the orphan
364			 * list and hope for the best.
365			 */
366			ext4_orphan_del(NULL, inode);
367			return;
368		}
369		ext4_orphan_del(handle, inode);
370		ext4_journal_stop(handle);
371	}
372}
373
374static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
375				 int error, unsigned int flags)
376{
377	loff_t pos = iocb->ki_pos;
378	struct inode *inode = file_inode(iocb->ki_filp);
379
380	if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
381		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
382	if (error)
383		return error;
 
 
 
 
 
 
384	/*
385	 * Note that EXT4_I(inode)->i_disksize can get extended up to
386	 * inode->i_size while the I/O was running due to writeback of delalloc
387	 * blocks. But the code in ext4_iomap_alloc() is careful to use
388	 * zeroed/unwritten extents if this is possible; thus we won't leave
389	 * uninitialized blocks in a file even if we didn't succeed in writing
390	 * as much as we intended. Also we can race with truncate or write
391	 * expanding the file so we have to be a bit careful here.
392	 */
393	if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
394	    pos + size <= i_size_read(inode))
395		return 0;
396	error = ext4_handle_inode_extension(inode, pos, size, size);
397	return error < 0 ? error : 0;
 
 
 
398}
399
400static const struct iomap_dio_ops ext4_dio_write_ops = {
401	.end_io = ext4_dio_write_end_io,
402};
403
404/*
405 * The intention here is to start with shared lock acquired then see if any
406 * condition requires an exclusive inode lock. If yes, then we restart the
407 * whole operation by releasing the shared lock and acquiring exclusive lock.
408 *
409 * - For unaligned_io we never take shared lock as it may cause data corruption
410 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
411 *
412 * - For extending writes case we don't take the shared lock, since it requires
413 *   updating inode i_disksize and/or orphan handling with exclusive lock.
414 *
415 * - shared locking will only be true mostly with overwrites, including
416 *   initialized blocks and unwritten blocks. For overwrite unwritten blocks
417 *   we protect splitting extents by i_data_sem in ext4_inode_info, so we can
418 *   also release exclusive i_rwsem lock.
419 *
420 * - Otherwise we will switch to exclusive i_rwsem lock.
421 */
422static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
423				     bool *ilock_shared, bool *extend,
424				     bool *unwritten, int *dio_flags)
425{
426	struct file *file = iocb->ki_filp;
427	struct inode *inode = file_inode(file);
428	loff_t offset;
429	size_t count;
430	ssize_t ret;
431	bool overwrite, unaligned_io;
432
433restart:
434	ret = ext4_generic_write_checks(iocb, from);
435	if (ret <= 0)
436		goto out;
437
438	offset = iocb->ki_pos;
439	count = ret;
440
441	unaligned_io = ext4_unaligned_io(inode, from, offset);
442	*extend = ext4_extending_io(inode, offset, count);
443	overwrite = ext4_overwrite_io(inode, offset, count, unwritten);
444
445	/*
446	 * Determine whether we need to upgrade to an exclusive lock. This is
447	 * required to change security info in file_modified(), for extending
448	 * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten
449	 * extents (as partial block zeroing may be required).
450	 *
451	 * Note that unaligned writes are allowed under shared lock so long as
452	 * they are pure overwrites. Otherwise, concurrent unaligned writes risk
453	 * data corruption due to partial block zeroing in the dio layer, and so
454	 * the I/O must occur exclusively.
455	 */
456	if (*ilock_shared &&
457	    ((!IS_NOSEC(inode) || *extend || !overwrite ||
458	     (unaligned_io && *unwritten)))) {
459		if (iocb->ki_flags & IOCB_NOWAIT) {
460			ret = -EAGAIN;
461			goto out;
462		}
463		inode_unlock_shared(inode);
464		*ilock_shared = false;
465		inode_lock(inode);
466		goto restart;
467	}
468
469	/*
470	 * Now that locking is settled, determine dio flags and exclusivity
471	 * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce
472	 * behavior already. The inode lock is already held exclusive if the
473	 * write is non-overwrite or extending, so drain all outstanding dio and
474	 * set the force wait dio flag.
475	 */
476	if (!*ilock_shared && (unaligned_io || *extend)) {
477		if (iocb->ki_flags & IOCB_NOWAIT) {
478			ret = -EAGAIN;
479			goto out;
480		}
481		if (unaligned_io && (!overwrite || *unwritten))
482			inode_dio_wait(inode);
483		*dio_flags = IOMAP_DIO_FORCE_WAIT;
484	}
485
486	ret = file_modified(file);
487	if (ret < 0)
488		goto out;
489
490	return count;
491out:
492	if (*ilock_shared)
493		inode_unlock_shared(inode);
494	else
495		inode_unlock(inode);
496	return ret;
497}
498
499static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
500{
501	ssize_t ret;
502	handle_t *handle;
503	struct inode *inode = file_inode(iocb->ki_filp);
504	loff_t offset = iocb->ki_pos;
505	size_t count = iov_iter_count(from);
506	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
507	bool extend = false, unwritten = false;
508	bool ilock_shared = true;
509	int dio_flags = 0;
510
511	/*
 
 
 
 
 
 
 
 
512	 * Quick check here without any i_rwsem lock to see if it is extending
513	 * IO. A more reliable check is done in ext4_dio_write_checks() with
514	 * proper locking in place.
515	 */
516	if (offset + count > i_size_read(inode))
517		ilock_shared = false;
518
519	if (iocb->ki_flags & IOCB_NOWAIT) {
520		if (ilock_shared) {
521			if (!inode_trylock_shared(inode))
522				return -EAGAIN;
523		} else {
524			if (!inode_trylock(inode))
525				return -EAGAIN;
526		}
527	} else {
528		if (ilock_shared)
529			inode_lock_shared(inode);
530		else
531			inode_lock(inode);
532	}
533
534	/* Fallback to buffered I/O if the inode does not support direct I/O. */
535	if (!ext4_should_use_dio(iocb, from)) {
536		if (ilock_shared)
537			inode_unlock_shared(inode);
538		else
539			inode_unlock(inode);
540		return ext4_buffered_write_iter(iocb, from);
541	}
542
543	/*
544	 * Prevent inline data from being created since we are going to allocate
545	 * blocks for DIO. We know the inode does not currently have inline data
546	 * because ext4_should_use_dio() checked for it, but we have to clear
547	 * the state flag before the write checks because a lock cycle could
548	 * introduce races with other writers.
549	 */
550	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
551
552	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
553				    &unwritten, &dio_flags);
554	if (ret <= 0)
555		return ret;
556
 
 
 
 
 
 
557	offset = iocb->ki_pos;
558	count = ret;
559
 
 
 
 
 
 
 
 
 
 
 
 
 
560	if (extend) {
561		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
562		if (IS_ERR(handle)) {
563			ret = PTR_ERR(handle);
564			goto out;
565		}
566
 
567		ret = ext4_orphan_add(handle, inode);
568		ext4_journal_stop(handle);
569		if (ret)
 
570			goto out;
 
 
 
571	}
572
573	if (ilock_shared && !unwritten)
574		iomap_ops = &ext4_iomap_overwrite_ops;
575	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
576			   dio_flags, NULL, 0);
577	if (ret == -ENOTBLK)
578		ret = 0;
579	if (extend) {
580		/*
581		 * We always perform extending DIO write synchronously so by
582		 * now the IO is completed and ext4_handle_inode_extension()
583		 * was called. Cleanup the inode in case of error or race with
584		 * writeback of delalloc blocks.
585		 */
586		WARN_ON_ONCE(ret == -EIOCBQUEUED);
587		ext4_inode_extension_cleanup(inode, ret < 0);
588	}
589
590out:
591	if (ilock_shared)
592		inode_unlock_shared(inode);
593	else
594		inode_unlock(inode);
595
596	if (ret >= 0 && iov_iter_count(from)) {
597		ssize_t err;
598		loff_t endbyte;
599
600		/*
601		 * There is no support for atomic writes on buffered-io yet,
602		 * we should never fallback to buffered-io for DIO atomic
603		 * writes.
604		 */
605		WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC);
606
607		offset = iocb->ki_pos;
608		err = ext4_buffered_write_iter(iocb, from);
609		if (err < 0)
610			return err;
611
612		/*
613		 * We need to ensure that the pages within the page cache for
614		 * the range covered by this I/O are written to disk and
615		 * invalidated. This is in attempt to preserve the expected
616		 * direct I/O semantics in the case we fallback to buffered I/O
617		 * to complete off the I/O request.
618		 */
619		ret += err;
620		endbyte = offset + err - 1;
621		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
622						   offset, endbyte);
623		if (!err)
624			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
625						 offset >> PAGE_SHIFT,
626						 endbyte >> PAGE_SHIFT);
627	}
628
629	return ret;
630}
631
632#ifdef CONFIG_FS_DAX
633static ssize_t
634ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
635{
636	ssize_t ret;
637	size_t count;
638	loff_t offset;
639	handle_t *handle;
640	bool extend = false;
641	struct inode *inode = file_inode(iocb->ki_filp);
642
643	if (iocb->ki_flags & IOCB_NOWAIT) {
644		if (!inode_trylock(inode))
645			return -EAGAIN;
646	} else {
647		inode_lock(inode);
648	}
649
650	ret = ext4_write_checks(iocb, from);
651	if (ret <= 0)
652		goto out;
653
654	offset = iocb->ki_pos;
655	count = iov_iter_count(from);
656
657	if (offset + count > EXT4_I(inode)->i_disksize) {
658		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
659		if (IS_ERR(handle)) {
660			ret = PTR_ERR(handle);
661			goto out;
662		}
663
664		ret = ext4_orphan_add(handle, inode);
665		if (ret) {
666			ext4_journal_stop(handle);
667			goto out;
668		}
669
670		extend = true;
671		ext4_journal_stop(handle);
672	}
673
674	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
675
676	if (extend) {
677		ret = ext4_handle_inode_extension(inode, offset, ret, count);
678		ext4_inode_extension_cleanup(inode, ret < (ssize_t)count);
679	}
680out:
681	inode_unlock(inode);
682	if (ret > 0)
683		ret = generic_write_sync(iocb, ret);
684	return ret;
685}
686#endif
687
688static ssize_t
689ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
690{
691	struct inode *inode = file_inode(iocb->ki_filp);
692
693	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
694		return -EIO;
695
696#ifdef CONFIG_FS_DAX
697	if (IS_DAX(inode))
698		return ext4_dax_write_iter(iocb, from);
699#endif
700
701	if (iocb->ki_flags & IOCB_ATOMIC) {
702		size_t len = iov_iter_count(from);
703		int ret;
704
705		if (len < EXT4_SB(inode->i_sb)->s_awu_min ||
706		    len > EXT4_SB(inode->i_sb)->s_awu_max)
707			return -EINVAL;
708
709		ret = generic_atomic_write_valid(iocb, from);
710		if (ret)
711			return ret;
712	}
713
714	if (iocb->ki_flags & IOCB_DIRECT)
715		return ext4_dio_write_iter(iocb, from);
716	else
717		return ext4_buffered_write_iter(iocb, from);
718}
719
720#ifdef CONFIG_FS_DAX
721static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
 
722{
723	int error = 0;
724	vm_fault_t result;
725	int retries = 0;
726	handle_t *handle = NULL;
727	struct inode *inode = file_inode(vmf->vma->vm_file);
728	struct super_block *sb = inode->i_sb;
729
730	/*
731	 * We have to distinguish real writes from writes which will result in a
732	 * COW page; COW writes should *not* poke the journal (the file will not
733	 * be changed). Doing so would cause unintended failures when mounted
734	 * read-only.
735	 *
736	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
737	 * unset for order != 0 (i.e. only in do_cow_fault); for
738	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
739	 * we eventually come back with a COW page.
740	 */
741	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
742		(vmf->vma->vm_flags & VM_SHARED);
743	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
744	pfn_t pfn;
745
746	if (write) {
747		sb_start_pagefault(sb);
748		file_update_time(vmf->vma->vm_file);
749		filemap_invalidate_lock_shared(mapping);
750retry:
751		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
752					       EXT4_DATA_TRANS_BLOCKS(sb));
753		if (IS_ERR(handle)) {
754			filemap_invalidate_unlock_shared(mapping);
755			sb_end_pagefault(sb);
756			return VM_FAULT_SIGBUS;
757		}
758	} else {
759		filemap_invalidate_lock_shared(mapping);
760	}
761	result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
762	if (write) {
763		ext4_journal_stop(handle);
764
765		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
766		    ext4_should_retry_alloc(sb, &retries))
767			goto retry;
768		/* Handling synchronous page fault? */
769		if (result & VM_FAULT_NEEDDSYNC)
770			result = dax_finish_sync_fault(vmf, order, pfn);
771		filemap_invalidate_unlock_shared(mapping);
772		sb_end_pagefault(sb);
773	} else {
774		filemap_invalidate_unlock_shared(mapping);
775	}
776
777	return result;
778}
779
780static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
781{
782	return ext4_dax_huge_fault(vmf, 0);
783}
784
785static const struct vm_operations_struct ext4_dax_vm_ops = {
786	.fault		= ext4_dax_fault,
787	.huge_fault	= ext4_dax_huge_fault,
788	.page_mkwrite	= ext4_dax_fault,
789	.pfn_mkwrite	= ext4_dax_fault,
790};
791#else
792#define ext4_dax_vm_ops	ext4_file_vm_ops
793#endif
794
795static const struct vm_operations_struct ext4_file_vm_ops = {
796	.fault		= filemap_fault,
797	.map_pages	= filemap_map_pages,
798	.page_mkwrite   = ext4_page_mkwrite,
799};
800
801static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
802{
803	struct inode *inode = file->f_mapping->host;
804	struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
 
805
806	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
807		return -EIO;
808
809	/*
810	 * We don't support synchronous mappings for non-DAX files and
811	 * for DAX files if underneath dax_device is not synchronous.
812	 */
813	if (!daxdev_mapping_supported(vma, dax_dev))
814		return -EOPNOTSUPP;
815
816	file_accessed(file);
817	if (IS_DAX(file_inode(file))) {
818		vma->vm_ops = &ext4_dax_vm_ops;
819		vm_flags_set(vma, VM_HUGEPAGE);
820	} else {
821		vma->vm_ops = &ext4_file_vm_ops;
822	}
823	return 0;
824}
825
826static int ext4_sample_last_mounted(struct super_block *sb,
827				    struct vfsmount *mnt)
828{
829	struct ext4_sb_info *sbi = EXT4_SB(sb);
830	struct path path;
831	char buf[64], *cp;
832	handle_t *handle;
833	int err;
834
835	if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
836		return 0;
837
838	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
839		return 0;
840
841	ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
842	/*
843	 * Sample where the filesystem has been mounted and
844	 * store it in the superblock for sysadmin convenience
845	 * when trying to sort through large numbers of block
846	 * devices or filesystem images.
847	 */
848	memset(buf, 0, sizeof(buf));
849	path.mnt = mnt;
850	path.dentry = mnt->mnt_root;
851	cp = d_path(&path, buf, sizeof(buf));
852	err = 0;
853	if (IS_ERR(cp))
854		goto out;
855
856	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
857	err = PTR_ERR(handle);
858	if (IS_ERR(handle))
859		goto out;
860	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
861	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
862					    EXT4_JTR_NONE);
863	if (err)
864		goto out_journal;
865	lock_buffer(sbi->s_sbh);
866	strtomem_pad(sbi->s_es->s_last_mounted, cp, 0);
 
867	ext4_superblock_csum_set(sb);
868	unlock_buffer(sbi->s_sbh);
869	ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
870out_journal:
871	ext4_journal_stop(handle);
872out:
873	sb_end_intwrite(sb);
874	return err;
875}
876
877static int ext4_file_open(struct inode *inode, struct file *filp)
878{
879	int ret;
880
881	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
882		return -EIO;
883
884	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
885	if (ret)
886		return ret;
887
888	ret = fscrypt_file_open(inode, filp);
889	if (ret)
890		return ret;
891
892	ret = fsverity_file_open(inode, filp);
893	if (ret)
894		return ret;
895
896	/*
897	 * Set up the jbd2_inode if we are opening the inode for
898	 * writing and the journal is present
899	 */
900	if (filp->f_mode & FMODE_WRITE) {
901		ret = ext4_inode_attach_jinode(inode);
902		if (ret < 0)
903			return ret;
904	}
905
906	if (ext4_inode_can_atomic_write(inode))
907		filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
908
909	filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
910	return dquot_file_open(inode, filp);
911}
912
913/*
914 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
915 * by calling generic_file_llseek_size() with the appropriate maxbytes
916 * value for each.
917 */
918loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
919{
920	struct inode *inode = file->f_mapping->host;
921	loff_t maxbytes;
922
923	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
924		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
925	else
926		maxbytes = inode->i_sb->s_maxbytes;
927
928	switch (whence) {
929	default:
930		return generic_file_llseek_size(file, offset, whence,
931						maxbytes, i_size_read(inode));
932	case SEEK_HOLE:
933		inode_lock_shared(inode);
934		offset = iomap_seek_hole(inode, offset,
935					 &ext4_iomap_report_ops);
936		inode_unlock_shared(inode);
937		break;
938	case SEEK_DATA:
939		inode_lock_shared(inode);
940		offset = iomap_seek_data(inode, offset,
941					 &ext4_iomap_report_ops);
942		inode_unlock_shared(inode);
943		break;
944	}
945
946	if (offset < 0)
947		return offset;
948	return vfs_setpos(file, offset, maxbytes);
949}
950
951const struct file_operations ext4_file_operations = {
952	.llseek		= ext4_llseek,
953	.read_iter	= ext4_file_read_iter,
954	.write_iter	= ext4_file_write_iter,
955	.iopoll		= iocb_bio_iopoll,
956	.unlocked_ioctl = ext4_ioctl,
957#ifdef CONFIG_COMPAT
958	.compat_ioctl	= ext4_compat_ioctl,
959#endif
960	.mmap		= ext4_file_mmap,
 
961	.open		= ext4_file_open,
962	.release	= ext4_release_file,
963	.fsync		= ext4_sync_file,
964	.get_unmapped_area = thp_get_unmapped_area,
965	.splice_read	= ext4_file_splice_read,
966	.splice_write	= iter_file_splice_write,
967	.fallocate	= ext4_fallocate,
968	.fop_flags	= FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
969			  FOP_DIO_PARALLEL_WRITE,
970};
971
972const struct inode_operations ext4_file_inode_operations = {
973	.setattr	= ext4_setattr,
974	.getattr	= ext4_file_getattr,
975	.listxattr	= ext4_listxattr,
976	.get_inode_acl	= ext4_get_acl,
977	.set_acl	= ext4_set_acl,
978	.fiemap		= ext4_fiemap,
979	.fileattr_get	= ext4_fileattr_get,
980	.fileattr_set	= ext4_fileattr_set,
981};
982
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/ext4/file.c
  4 *
  5 * Copyright (C) 1992, 1993, 1994, 1995
  6 * Remy Card (card@masi.ibp.fr)
  7 * Laboratoire MASI - Institut Blaise Pascal
  8 * Universite Pierre et Marie Curie (Paris VI)
  9 *
 10 *  from
 11 *
 12 *  linux/fs/minix/file.c
 13 *
 14 *  Copyright (C) 1991, 1992  Linus Torvalds
 15 *
 16 *  ext4 fs regular file handling primitives
 17 *
 18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 19 *	(jj@sunsite.ms.mff.cuni.cz)
 20 */
 21
 22#include <linux/time.h>
 23#include <linux/fs.h>
 24#include <linux/iomap.h>
 25#include <linux/mount.h>
 26#include <linux/path.h>
 27#include <linux/dax.h>
 28#include <linux/quotaops.h>
 29#include <linux/pagevec.h>
 30#include <linux/uio.h>
 31#include <linux/mman.h>
 32#include <linux/backing-dev.h>
 33#include "ext4.h"
 34#include "ext4_jbd2.h"
 35#include "xattr.h"
 36#include "acl.h"
 37#include "truncate.h"
 38
 39static bool ext4_dio_supported(struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40{
 41	if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
 
 
 
 42		return false;
 43	if (fsverity_active(inode))
 44		return false;
 45	if (ext4_should_journal_data(inode))
 46		return false;
 47	if (ext4_has_inline_data(inode))
 48		return false;
 49	return true;
 50}
 51
 52static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
 53{
 54	ssize_t ret;
 55	struct inode *inode = file_inode(iocb->ki_filp);
 56
 57	if (iocb->ki_flags & IOCB_NOWAIT) {
 58		if (!inode_trylock_shared(inode))
 59			return -EAGAIN;
 60	} else {
 61		inode_lock_shared(inode);
 62	}
 63
 64	if (!ext4_dio_supported(inode)) {
 65		inode_unlock_shared(inode);
 66		/*
 67		 * Fallback to buffered I/O if the operation being performed on
 68		 * the inode is not supported by direct I/O. The IOCB_DIRECT
 69		 * flag needs to be cleared here in order to ensure that the
 70		 * direct I/O path within generic_file_read_iter() is not
 71		 * taken.
 72		 */
 73		iocb->ki_flags &= ~IOCB_DIRECT;
 74		return generic_file_read_iter(iocb, to);
 75	}
 76
 77	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0);
 78	inode_unlock_shared(inode);
 79
 80	file_accessed(iocb->ki_filp);
 81	return ret;
 82}
 83
 84#ifdef CONFIG_FS_DAX
 85static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 86{
 87	struct inode *inode = file_inode(iocb->ki_filp);
 88	ssize_t ret;
 89
 90	if (iocb->ki_flags & IOCB_NOWAIT) {
 91		if (!inode_trylock_shared(inode))
 92			return -EAGAIN;
 93	} else {
 94		inode_lock_shared(inode);
 95	}
 96	/*
 97	 * Recheck under inode lock - at this point we are sure it cannot
 98	 * change anymore
 99	 */
100	if (!IS_DAX(inode)) {
101		inode_unlock_shared(inode);
102		/* Fallback to buffered IO in case we cannot support DAX */
103		return generic_file_read_iter(iocb, to);
104	}
105	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
106	inode_unlock_shared(inode);
107
108	file_accessed(iocb->ki_filp);
109	return ret;
110}
111#endif
112
113static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
114{
115	struct inode *inode = file_inode(iocb->ki_filp);
116
117	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
118		return -EIO;
119
120	if (!iov_iter_count(to))
121		return 0; /* skip atime */
122
123#ifdef CONFIG_FS_DAX
124	if (IS_DAX(inode))
125		return ext4_dax_read_iter(iocb, to);
126#endif
127	if (iocb->ki_flags & IOCB_DIRECT)
128		return ext4_dio_read_iter(iocb, to);
129
130	return generic_file_read_iter(iocb, to);
131}
132
 
 
 
 
 
 
 
 
 
 
 
133/*
134 * Called when an inode is released. Note that this is different
135 * from ext4_file_open: open gets called at every open, but release
136 * gets called only when /all/ the files are closed.
137 */
138static int ext4_release_file(struct inode *inode, struct file *filp)
139{
140	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
141		ext4_alloc_da_blocks(inode);
142		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
143	}
144	/* if we are the last writer on the inode, drop the block reservation */
145	if ((filp->f_mode & FMODE_WRITE) &&
146			(atomic_read(&inode->i_writecount) == 1) &&
147			!EXT4_I(inode)->i_reserved_data_blocks) {
148		down_write(&EXT4_I(inode)->i_data_sem);
149		ext4_discard_preallocations(inode, 0);
150		up_write(&EXT4_I(inode)->i_data_sem);
151	}
152	if (is_dx(inode) && filp->private_data)
153		ext4_htree_free_dir_info(filp->private_data);
154
155	return 0;
156}
157
158/*
159 * This tests whether the IO in question is block-aligned or not.
160 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
161 * are converted to written only after the IO is complete.  Until they are
162 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
163 * it needs to zero out portions of the start and/or end block.  If 2 AIO
164 * threads are at work on the same unwritten block, they must be synchronized
165 * or one thread will zero the other's data, causing corruption.
166 */
167static bool
168ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
169{
170	struct super_block *sb = inode->i_sb;
171	unsigned long blockmask = sb->s_blocksize - 1;
172
173	if ((pos | iov_iter_alignment(from)) & blockmask)
174		return true;
175
176	return false;
177}
178
179static bool
180ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
181{
182	if (offset + len > i_size_read(inode) ||
183	    offset + len > EXT4_I(inode)->i_disksize)
184		return true;
185	return false;
186}
187
188/* Is IO overwriting allocated and initialized blocks? */
189static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 
190{
191	struct ext4_map_blocks map;
192	unsigned int blkbits = inode->i_blkbits;
193	int err, blklen;
194
195	if (pos + len > i_size_read(inode))
196		return false;
197
198	map.m_lblk = pos >> blkbits;
199	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
200	blklen = map.m_len;
201
202	err = ext4_map_blocks(NULL, inode, &map, 0);
 
 
203	/*
204	 * 'err==len' means that all of the blocks have been preallocated,
205	 * regardless of whether they have been initialized or not. To exclude
206	 * unwritten extents, we need to check m_flags.
207	 */
208	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 
209}
210
211static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
212					 struct iov_iter *from)
213{
214	struct inode *inode = file_inode(iocb->ki_filp);
215	ssize_t ret;
216
217	if (unlikely(IS_IMMUTABLE(inode)))
218		return -EPERM;
219
220	ret = generic_write_checks(iocb, from);
221	if (ret <= 0)
222		return ret;
223
224	/*
225	 * If we have encountered a bitmap-format file, the size limit
226	 * is smaller than s_maxbytes, which is for extent-mapped files.
227	 */
228	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
229		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
230
231		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
232			return -EFBIG;
233		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
234	}
235
236	return iov_iter_count(from);
237}
238
239static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
240{
241	ssize_t ret, count;
242
243	count = ext4_generic_write_checks(iocb, from);
244	if (count <= 0)
245		return count;
246
247	ret = file_modified(iocb->ki_filp);
248	if (ret)
249		return ret;
250	return count;
251}
252
253static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
254					struct iov_iter *from)
255{
256	ssize_t ret;
257	struct inode *inode = file_inode(iocb->ki_filp);
258
259	if (iocb->ki_flags & IOCB_NOWAIT)
260		return -EOPNOTSUPP;
261
262	ext4_fc_start_update(inode);
263	inode_lock(inode);
264	ret = ext4_write_checks(iocb, from);
265	if (ret <= 0)
266		goto out;
267
268	current->backing_dev_info = inode_to_bdi(inode);
269	ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
270	current->backing_dev_info = NULL;
271
272out:
273	inode_unlock(inode);
274	ext4_fc_stop_update(inode);
275	if (likely(ret > 0)) {
276		iocb->ki_pos += ret;
277		ret = generic_write_sync(iocb, ret);
278	}
279
280	return ret;
281}
282
283static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
284					   ssize_t written, size_t count)
285{
286	handle_t *handle;
287	bool truncate = false;
288	u8 blkbits = inode->i_blkbits;
289	ext4_lblk_t written_blk, end_blk;
290	int ret;
291
292	/*
293	 * Note that EXT4_I(inode)->i_disksize can get extended up to
294	 * inode->i_size while the I/O was running due to writeback of delalloc
295	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
296	 * zeroed/unwritten extents if this is possible; thus we won't leave
297	 * uninitialized blocks in a file even if we didn't succeed in writing
298	 * as much as we intended.
299	 */
300	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
301	if (offset + count <= EXT4_I(inode)->i_disksize) {
302		/*
303		 * We need to ensure that the inode is removed from the orphan
304		 * list if it has been added prematurely, due to writeback of
305		 * delalloc blocks.
306		 */
307		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
308			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
309
310			if (IS_ERR(handle)) {
311				ext4_orphan_del(NULL, inode);
312				return PTR_ERR(handle);
313			}
314
315			ext4_orphan_del(handle, inode);
316			ext4_journal_stop(handle);
317		}
318
319		return written;
320	}
321
322	if (written < 0)
323		goto truncate;
324
 
325	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
326	if (IS_ERR(handle)) {
327		written = PTR_ERR(handle);
328		goto truncate;
329	}
330
331	if (ext4_update_inode_size(inode, offset + written)) {
332		ret = ext4_mark_inode_dirty(handle, inode);
333		if (unlikely(ret)) {
334			written = ret;
335			ext4_journal_stop(handle);
336			goto truncate;
337		}
338	}
339
340	/*
341	 * We may need to truncate allocated but not written blocks beyond EOF.
342	 */
343	written_blk = ALIGN(offset + written, 1 << blkbits);
344	end_blk = ALIGN(offset + count, 1 << blkbits);
345	if (written_blk < end_blk && ext4_can_truncate(inode))
346		truncate = true;
347
348	/*
349	 * Remove the inode from the orphan list if it has been extended and
350	 * everything went OK.
351	 */
352	if (!truncate && inode->i_nlink)
353		ext4_orphan_del(handle, inode);
354	ext4_journal_stop(handle);
355
356	if (truncate) {
357truncate:
 
 
 
 
 
 
 
 
 
358		ext4_truncate_failed_write(inode);
359		/*
360		 * If the truncate operation failed early, then the inode may
361		 * still be on the orphan list. In that case, we need to try
362		 * remove the inode from the in-memory linked list.
363		 */
364		if (inode->i_nlink)
365			ext4_orphan_del(NULL, inode);
 
366	}
 
 
 
 
 
 
 
 
367
368	return written;
 
 
 
 
 
 
 
 
 
 
 
369}
370
371static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
372				 int error, unsigned int flags)
373{
374	loff_t pos = iocb->ki_pos;
375	struct inode *inode = file_inode(iocb->ki_filp);
376
 
 
377	if (error)
378		return error;
379
380	if (size && flags & IOMAP_DIO_UNWRITTEN) {
381		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
382		if (error < 0)
383			return error;
384	}
385	/*
386	 * If we are extending the file, we have to update i_size here before
387	 * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
388	 * buffered reads could zero out too much from page cache pages. Update
389	 * of on-disk size will happen later in ext4_dio_write_iter() where
390	 * we have enough information to also perform orphan list handling etc.
391	 * Note that we perform all extending writes synchronously under
392	 * i_rwsem held exclusively so i_size update is safe here in that case.
393	 * If the write was not extending, we cannot see pos > i_size here
394	 * because operations reducing i_size like truncate wait for all
395	 * outstanding DIO before updating i_size.
396	 */
397	pos += size;
398	if (pos > i_size_read(inode))
399		i_size_write(inode, pos);
400
401	return 0;
402}
403
404static const struct iomap_dio_ops ext4_dio_write_ops = {
405	.end_io = ext4_dio_write_end_io,
406};
407
408/*
409 * The intention here is to start with shared lock acquired then see if any
410 * condition requires an exclusive inode lock. If yes, then we restart the
411 * whole operation by releasing the shared lock and acquiring exclusive lock.
412 *
413 * - For unaligned_io we never take shared lock as it may cause data corruption
414 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
415 *
416 * - For extending writes case we don't take the shared lock, since it requires
417 *   updating inode i_disksize and/or orphan handling with exclusive lock.
418 *
419 * - shared locking will only be true mostly with overwrites. Otherwise we will
420 *   switch to exclusive i_rwsem lock.
 
 
 
 
421 */
422static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
423				     bool *ilock_shared, bool *extend)
 
424{
425	struct file *file = iocb->ki_filp;
426	struct inode *inode = file_inode(file);
427	loff_t offset;
428	size_t count;
429	ssize_t ret;
 
430
431restart:
432	ret = ext4_generic_write_checks(iocb, from);
433	if (ret <= 0)
434		goto out;
435
436	offset = iocb->ki_pos;
437	count = ret;
438	if (ext4_extending_io(inode, offset, count))
439		*extend = true;
 
 
 
440	/*
441	 * Determine whether the IO operation will overwrite allocated
442	 * and initialized blocks.
443	 * We need exclusive i_rwsem for changing security info
444	 * in file_modified().
445	 */
446	if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
447	     !ext4_overwrite_io(inode, offset, count))) {
 
 
 
 
 
 
448		if (iocb->ki_flags & IOCB_NOWAIT) {
449			ret = -EAGAIN;
450			goto out;
451		}
452		inode_unlock_shared(inode);
453		*ilock_shared = false;
454		inode_lock(inode);
455		goto restart;
456	}
457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458	ret = file_modified(file);
459	if (ret < 0)
460		goto out;
461
462	return count;
463out:
464	if (*ilock_shared)
465		inode_unlock_shared(inode);
466	else
467		inode_unlock(inode);
468	return ret;
469}
470
471static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
472{
473	ssize_t ret;
474	handle_t *handle;
475	struct inode *inode = file_inode(iocb->ki_filp);
476	loff_t offset = iocb->ki_pos;
477	size_t count = iov_iter_count(from);
478	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
479	bool extend = false, unaligned_io = false;
480	bool ilock_shared = true;
 
481
482	/*
483	 * We initially start with shared inode lock unless it is
484	 * unaligned IO which needs exclusive lock anyways.
485	 */
486	if (ext4_unaligned_io(inode, from, offset)) {
487		unaligned_io = true;
488		ilock_shared = false;
489	}
490	/*
491	 * Quick check here without any i_rwsem lock to see if it is extending
492	 * IO. A more reliable check is done in ext4_dio_write_checks() with
493	 * proper locking in place.
494	 */
495	if (offset + count > i_size_read(inode))
496		ilock_shared = false;
497
498	if (iocb->ki_flags & IOCB_NOWAIT) {
499		if (ilock_shared) {
500			if (!inode_trylock_shared(inode))
501				return -EAGAIN;
502		} else {
503			if (!inode_trylock(inode))
504				return -EAGAIN;
505		}
506	} else {
507		if (ilock_shared)
508			inode_lock_shared(inode);
509		else
510			inode_lock(inode);
511	}
512
513	/* Fallback to buffered I/O if the inode does not support direct I/O. */
514	if (!ext4_dio_supported(inode)) {
515		if (ilock_shared)
516			inode_unlock_shared(inode);
517		else
518			inode_unlock(inode);
519		return ext4_buffered_write_iter(iocb, from);
520	}
521
522	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
 
 
 
 
 
 
 
 
 
 
523	if (ret <= 0)
524		return ret;
525
526	/* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
527	if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
528		ret = -EAGAIN;
529		goto out;
530	}
531
532	offset = iocb->ki_pos;
533	count = ret;
534
535	/*
536	 * Unaligned direct IO must be serialized among each other as zeroing
537	 * of partial blocks of two competing unaligned IOs can result in data
538	 * corruption.
539	 *
540	 * So we make sure we don't allow any unaligned IO in flight.
541	 * For IOs where we need not wait (like unaligned non-AIO DIO),
542	 * below inode_dio_wait() may anyway become a no-op, since we start
543	 * with exclusive lock.
544	 */
545	if (unaligned_io)
546		inode_dio_wait(inode);
547
548	if (extend) {
549		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
550		if (IS_ERR(handle)) {
551			ret = PTR_ERR(handle);
552			goto out;
553		}
554
555		ext4_fc_start_update(inode);
556		ret = ext4_orphan_add(handle, inode);
557		ext4_fc_stop_update(inode);
558		if (ret) {
559			ext4_journal_stop(handle);
560			goto out;
561		}
562
563		ext4_journal_stop(handle);
564	}
565
566	if (ilock_shared)
567		iomap_ops = &ext4_iomap_overwrite_ops;
568	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
569			   (unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0);
570	if (ret == -ENOTBLK)
571		ret = 0;
572
573	if (extend)
574		ret = ext4_handle_inode_extension(inode, offset, ret, count);
 
 
 
 
 
 
 
575
576out:
577	if (ilock_shared)
578		inode_unlock_shared(inode);
579	else
580		inode_unlock(inode);
581
582	if (ret >= 0 && iov_iter_count(from)) {
583		ssize_t err;
584		loff_t endbyte;
585
 
 
 
 
 
 
 
586		offset = iocb->ki_pos;
587		err = ext4_buffered_write_iter(iocb, from);
588		if (err < 0)
589			return err;
590
591		/*
592		 * We need to ensure that the pages within the page cache for
593		 * the range covered by this I/O are written to disk and
594		 * invalidated. This is in attempt to preserve the expected
595		 * direct I/O semantics in the case we fallback to buffered I/O
596		 * to complete off the I/O request.
597		 */
598		ret += err;
599		endbyte = offset + err - 1;
600		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
601						   offset, endbyte);
602		if (!err)
603			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
604						 offset >> PAGE_SHIFT,
605						 endbyte >> PAGE_SHIFT);
606	}
607
608	return ret;
609}
610
611#ifdef CONFIG_FS_DAX
612static ssize_t
613ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
614{
615	ssize_t ret;
616	size_t count;
617	loff_t offset;
618	handle_t *handle;
619	bool extend = false;
620	struct inode *inode = file_inode(iocb->ki_filp);
621
622	if (iocb->ki_flags & IOCB_NOWAIT) {
623		if (!inode_trylock(inode))
624			return -EAGAIN;
625	} else {
626		inode_lock(inode);
627	}
628
629	ret = ext4_write_checks(iocb, from);
630	if (ret <= 0)
631		goto out;
632
633	offset = iocb->ki_pos;
634	count = iov_iter_count(from);
635
636	if (offset + count > EXT4_I(inode)->i_disksize) {
637		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
638		if (IS_ERR(handle)) {
639			ret = PTR_ERR(handle);
640			goto out;
641		}
642
643		ret = ext4_orphan_add(handle, inode);
644		if (ret) {
645			ext4_journal_stop(handle);
646			goto out;
647		}
648
649		extend = true;
650		ext4_journal_stop(handle);
651	}
652
653	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
654
655	if (extend)
656		ret = ext4_handle_inode_extension(inode, offset, ret, count);
 
 
657out:
658	inode_unlock(inode);
659	if (ret > 0)
660		ret = generic_write_sync(iocb, ret);
661	return ret;
662}
663#endif
664
665static ssize_t
666ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
667{
668	struct inode *inode = file_inode(iocb->ki_filp);
669
670	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
671		return -EIO;
672
673#ifdef CONFIG_FS_DAX
674	if (IS_DAX(inode))
675		return ext4_dax_write_iter(iocb, from);
676#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677	if (iocb->ki_flags & IOCB_DIRECT)
678		return ext4_dio_write_iter(iocb, from);
679	else
680		return ext4_buffered_write_iter(iocb, from);
681}
682
683#ifdef CONFIG_FS_DAX
684static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
685		enum page_entry_size pe_size)
686{
687	int error = 0;
688	vm_fault_t result;
689	int retries = 0;
690	handle_t *handle = NULL;
691	struct inode *inode = file_inode(vmf->vma->vm_file);
692	struct super_block *sb = inode->i_sb;
693
694	/*
695	 * We have to distinguish real writes from writes which will result in a
696	 * COW page; COW writes should *not* poke the journal (the file will not
697	 * be changed). Doing so would cause unintended failures when mounted
698	 * read-only.
699	 *
700	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
701	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
702	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
703	 * we eventually come back with a COW page.
704	 */
705	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
706		(vmf->vma->vm_flags & VM_SHARED);
 
707	pfn_t pfn;
708
709	if (write) {
710		sb_start_pagefault(sb);
711		file_update_time(vmf->vma->vm_file);
712		down_read(&EXT4_I(inode)->i_mmap_sem);
713retry:
714		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
715					       EXT4_DATA_TRANS_BLOCKS(sb));
716		if (IS_ERR(handle)) {
717			up_read(&EXT4_I(inode)->i_mmap_sem);
718			sb_end_pagefault(sb);
719			return VM_FAULT_SIGBUS;
720		}
721	} else {
722		down_read(&EXT4_I(inode)->i_mmap_sem);
723	}
724	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
725	if (write) {
726		ext4_journal_stop(handle);
727
728		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
729		    ext4_should_retry_alloc(sb, &retries))
730			goto retry;
731		/* Handling synchronous page fault? */
732		if (result & VM_FAULT_NEEDDSYNC)
733			result = dax_finish_sync_fault(vmf, pe_size, pfn);
734		up_read(&EXT4_I(inode)->i_mmap_sem);
735		sb_end_pagefault(sb);
736	} else {
737		up_read(&EXT4_I(inode)->i_mmap_sem);
738	}
739
740	return result;
741}
742
743static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
744{
745	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
746}
747
748static const struct vm_operations_struct ext4_dax_vm_ops = {
749	.fault		= ext4_dax_fault,
750	.huge_fault	= ext4_dax_huge_fault,
751	.page_mkwrite	= ext4_dax_fault,
752	.pfn_mkwrite	= ext4_dax_fault,
753};
754#else
755#define ext4_dax_vm_ops	ext4_file_vm_ops
756#endif
757
758static const struct vm_operations_struct ext4_file_vm_ops = {
759	.fault		= ext4_filemap_fault,
760	.map_pages	= filemap_map_pages,
761	.page_mkwrite   = ext4_page_mkwrite,
762};
763
764static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
765{
766	struct inode *inode = file->f_mapping->host;
767	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
768	struct dax_device *dax_dev = sbi->s_daxdev;
769
770	if (unlikely(ext4_forced_shutdown(sbi)))
771		return -EIO;
772
773	/*
774	 * We don't support synchronous mappings for non-DAX files and
775	 * for DAX files if underneath dax_device is not synchronous.
776	 */
777	if (!daxdev_mapping_supported(vma, dax_dev))
778		return -EOPNOTSUPP;
779
780	file_accessed(file);
781	if (IS_DAX(file_inode(file))) {
782		vma->vm_ops = &ext4_dax_vm_ops;
783		vma->vm_flags |= VM_HUGEPAGE;
784	} else {
785		vma->vm_ops = &ext4_file_vm_ops;
786	}
787	return 0;
788}
789
790static int ext4_sample_last_mounted(struct super_block *sb,
791				    struct vfsmount *mnt)
792{
793	struct ext4_sb_info *sbi = EXT4_SB(sb);
794	struct path path;
795	char buf[64], *cp;
796	handle_t *handle;
797	int err;
798
799	if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
800		return 0;
801
802	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
803		return 0;
804
805	ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
806	/*
807	 * Sample where the filesystem has been mounted and
808	 * store it in the superblock for sysadmin convenience
809	 * when trying to sort through large numbers of block
810	 * devices or filesystem images.
811	 */
812	memset(buf, 0, sizeof(buf));
813	path.mnt = mnt;
814	path.dentry = mnt->mnt_root;
815	cp = d_path(&path, buf, sizeof(buf));
816	err = 0;
817	if (IS_ERR(cp))
818		goto out;
819
820	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
821	err = PTR_ERR(handle);
822	if (IS_ERR(handle))
823		goto out;
824	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
825	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 
826	if (err)
827		goto out_journal;
828	lock_buffer(sbi->s_sbh);
829	strncpy(sbi->s_es->s_last_mounted, cp,
830		sizeof(sbi->s_es->s_last_mounted));
831	ext4_superblock_csum_set(sb);
832	unlock_buffer(sbi->s_sbh);
833	ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
834out_journal:
835	ext4_journal_stop(handle);
836out:
837	sb_end_intwrite(sb);
838	return err;
839}
840
841static int ext4_file_open(struct inode *inode, struct file *filp)
842{
843	int ret;
844
845	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
846		return -EIO;
847
848	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
849	if (ret)
850		return ret;
851
852	ret = fscrypt_file_open(inode, filp);
853	if (ret)
854		return ret;
855
856	ret = fsverity_file_open(inode, filp);
857	if (ret)
858		return ret;
859
860	/*
861	 * Set up the jbd2_inode if we are opening the inode for
862	 * writing and the journal is present
863	 */
864	if (filp->f_mode & FMODE_WRITE) {
865		ret = ext4_inode_attach_jinode(inode);
866		if (ret < 0)
867			return ret;
868	}
869
870	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
 
 
 
871	return dquot_file_open(inode, filp);
872}
873
874/*
875 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
876 * by calling generic_file_llseek_size() with the appropriate maxbytes
877 * value for each.
878 */
879loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
880{
881	struct inode *inode = file->f_mapping->host;
882	loff_t maxbytes;
883
884	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
885		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
886	else
887		maxbytes = inode->i_sb->s_maxbytes;
888
889	switch (whence) {
890	default:
891		return generic_file_llseek_size(file, offset, whence,
892						maxbytes, i_size_read(inode));
893	case SEEK_HOLE:
894		inode_lock_shared(inode);
895		offset = iomap_seek_hole(inode, offset,
896					 &ext4_iomap_report_ops);
897		inode_unlock_shared(inode);
898		break;
899	case SEEK_DATA:
900		inode_lock_shared(inode);
901		offset = iomap_seek_data(inode, offset,
902					 &ext4_iomap_report_ops);
903		inode_unlock_shared(inode);
904		break;
905	}
906
907	if (offset < 0)
908		return offset;
909	return vfs_setpos(file, offset, maxbytes);
910}
911
912const struct file_operations ext4_file_operations = {
913	.llseek		= ext4_llseek,
914	.read_iter	= ext4_file_read_iter,
915	.write_iter	= ext4_file_write_iter,
916	.iopoll		= iomap_dio_iopoll,
917	.unlocked_ioctl = ext4_ioctl,
918#ifdef CONFIG_COMPAT
919	.compat_ioctl	= ext4_compat_ioctl,
920#endif
921	.mmap		= ext4_file_mmap,
922	.mmap_supported_flags = MAP_SYNC,
923	.open		= ext4_file_open,
924	.release	= ext4_release_file,
925	.fsync		= ext4_sync_file,
926	.get_unmapped_area = thp_get_unmapped_area,
927	.splice_read	= generic_file_splice_read,
928	.splice_write	= iter_file_splice_write,
929	.fallocate	= ext4_fallocate,
 
 
930};
931
932const struct inode_operations ext4_file_inode_operations = {
933	.setattr	= ext4_setattr,
934	.getattr	= ext4_file_getattr,
935	.listxattr	= ext4_listxattr,
936	.get_acl	= ext4_get_acl,
937	.set_acl	= ext4_set_acl,
938	.fiemap		= ext4_fiemap,
939	.fileattr_get	= ext4_fileattr_get,
940	.fileattr_set	= ext4_fileattr_set,
941};
942