Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/fs/ext4/file.c
  3 *
  4 * Copyright (C) 1992, 1993, 1994, 1995
  5 * Remy Card (card@masi.ibp.fr)
  6 * Laboratoire MASI - Institut Blaise Pascal
  7 * Universite Pierre et Marie Curie (Paris VI)
  8 *
  9 *  from
 10 *
 11 *  linux/fs/minix/file.c
 12 *
 13 *  Copyright (C) 1991, 1992  Linus Torvalds
 14 *
 15 *  ext4 fs regular file handling primitives
 16 *
 17 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 18 *	(jj@sunsite.ms.mff.cuni.cz)
 19 */
 20
 21#include <linux/time.h>
 22#include <linux/fs.h>
 23#include <linux/jbd2.h>
 24#include <linux/mount.h>
 25#include <linux/path.h>
 
 26#include <linux/quotaops.h>
 
 
 
 27#include "ext4.h"
 28#include "ext4_jbd2.h"
 29#include "xattr.h"
 30#include "acl.h"
 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32/*
 33 * Called when an inode is released. Note that this is different
 34 * from ext4_file_open: open gets called at every open, but release
 35 * gets called only when /all/ the files are closed.
 36 */
 37static int ext4_release_file(struct inode *inode, struct file *filp)
 38{
 39	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
 40		ext4_alloc_da_blocks(inode);
 41		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 42	}
 43	/* if we are the last writer on the inode, drop the block reservation */
 44	if ((filp->f_mode & FMODE_WRITE) &&
 45			(atomic_read(&inode->i_writecount) == 1) &&
 46		        !EXT4_I(inode)->i_reserved_data_blocks)
 47	{
 48		down_write(&EXT4_I(inode)->i_data_sem);
 49		ext4_discard_preallocations(inode);
 50		up_write(&EXT4_I(inode)->i_data_sem);
 51	}
 52	if (is_dx(inode) && filp->private_data)
 53		ext4_htree_free_dir_info(filp->private_data);
 54
 55	return 0;
 56}
 57
 58static void ext4_aiodio_wait(struct inode *inode)
 59{
 60	wait_queue_head_t *wq = ext4_ioend_wq(inode);
 61
 62	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
 63}
 64
 65/*
 66 * This tests whether the IO in question is block-aligned or not.
 67 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 68 * are converted to written only after the IO is complete.  Until they are
 69 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 70 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 71 * threads are at work on the same unwritten block, they must be synchronized
 72 * or one thread will zero the other's data, causing corruption.
 73 */
 74static int
 75ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
 76		   unsigned long nr_segs, loff_t pos)
 77{
 78	struct super_block *sb = inode->i_sb;
 79	int blockmask = sb->s_blocksize - 1;
 80	size_t count = iov_length(iov, nr_segs);
 81	loff_t final_size = pos + count;
 82
 83	if (pos >= inode->i_size)
 84		return 0;
 85
 86	if ((pos & blockmask) || (final_size & blockmask))
 87		return 1;
 88
 89	return 0;
 90}
 91
 92static ssize_t
 93ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 94		unsigned long nr_segs, loff_t pos)
 95{
 96	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
 97	int unaligned_aio = 0;
 98	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99
100	/*
101	 * If we have encountered a bitmap-format file, the size limit
102	 * is smaller than s_maxbytes, which is for extent-mapped files.
103	 */
104
105	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
106		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
107		size_t length = iov_length(iov, nr_segs);
108
109		if ((pos > sbi->s_bitmap_maxbytes ||
110		    (pos == sbi->s_bitmap_maxbytes && length > 0)))
111			return -EFBIG;
 
 
 
 
112
113		if (pos + length > sbi->s_bitmap_maxbytes) {
114			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
115					      sbi->s_bitmap_maxbytes - pos);
116		}
117	} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
118		   !is_sync_kiocb(iocb))) {
119		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
 
 
 
 
120	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122	/* Unaligned direct AIO must be serialized; see comment above */
123	if (unaligned_aio) {
124		static unsigned long unaligned_warn_time;
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126		/* Warn about this once per day */
127		if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
128			ext4_msg(inode->i_sb, KERN_WARNING,
129				 "Unaligned AIO/DIO on inode %ld by %s; "
130				 "performance will be poor.",
131				 inode->i_ino, current->comm);
132		mutex_lock(ext4_aio_mutex(inode));
133		ext4_aiodio_wait(inode);
134	}
135
136	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
 
 
137
138	if (unaligned_aio)
139		mutex_unlock(ext4_aio_mutex(inode));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
141	return ret;
 
 
 
 
142}
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144static const struct vm_operations_struct ext4_file_vm_ops = {
145	.fault		= filemap_fault,
 
146	.page_mkwrite   = ext4_page_mkwrite,
147};
148
149static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
150{
151	struct address_space *mapping = file->f_mapping;
 
 
 
 
 
 
 
 
 
 
 
 
152
153	if (!mapping->a_ops->readpage)
154		return -ENOEXEC;
155	file_accessed(file);
156	vma->vm_ops = &ext4_file_vm_ops;
157	vma->vm_flags |= VM_CAN_NONLINEAR;
 
 
 
 
158	return 0;
159}
160
161static int ext4_file_open(struct inode * inode, struct file * filp)
 
162{
163	struct super_block *sb = inode->i_sb;
164	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
165	struct ext4_inode_info *ei = EXT4_I(inode);
166	struct vfsmount *mnt = filp->f_path.mnt;
167	struct path path;
168	char buf[64], *cp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
170	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
171		     !(sb->s_flags & MS_RDONLY))) {
172		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
173		/*
174		 * Sample where the filesystem has been mounted and
175		 * store it in the superblock for sysadmin convenience
176		 * when trying to sort through large numbers of block
177		 * devices or filesystem images.
178		 */
179		memset(buf, 0, sizeof(buf));
180		path.mnt = mnt;
181		path.dentry = mnt->mnt_root;
182		cp = d_path(&path, buf, sizeof(buf));
183		if (!IS_ERR(cp)) {
184			memcpy(sbi->s_es->s_last_mounted, cp,
185			       sizeof(sbi->s_es->s_last_mounted));
186			ext4_mark_super_dirty(sb);
187		}
188	}
189	/*
190	 * Set up the jbd2_inode if we are opening the inode for
191	 * writing and the journal is present
192	 */
193	if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
194		struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
195
196		spin_lock(&inode->i_lock);
197		if (!ei->jinode) {
198			if (!jinode) {
199				spin_unlock(&inode->i_lock);
200				return -ENOMEM;
201			}
202			ei->jinode = jinode;
203			jbd2_journal_init_jbd_inode(ei->jinode, inode);
204			jinode = NULL;
205		}
206		spin_unlock(&inode->i_lock);
207		if (unlikely(jinode != NULL))
208			jbd2_free_inode(jinode);
209	}
 
 
210	return dquot_file_open(inode, filp);
211}
212
213/*
214 * ext4_llseek() copied from generic_file_llseek() to handle both
215 * block-mapped and extent-mapped maxbytes values. This should
216 * otherwise be identical with generic_file_llseek().
217 */
218loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
219{
220	struct inode *inode = file->f_mapping->host;
221	loff_t maxbytes;
222
223	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
224		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
225	else
226		maxbytes = inode->i_sb->s_maxbytes;
227	mutex_lock(&inode->i_mutex);
228	switch (origin) {
229	case SEEK_END:
230		offset += inode->i_size;
231		break;
232	case SEEK_CUR:
233		if (offset == 0) {
234			mutex_unlock(&inode->i_mutex);
235			return file->f_pos;
236		}
237		offset += file->f_pos;
238		break;
239	case SEEK_DATA:
240		/*
241		 * In the generic case the entire file is data, so as long as
242		 * offset isn't at the end of the file then the offset is data.
243		 */
244		if (offset >= inode->i_size) {
245			mutex_unlock(&inode->i_mutex);
246			return -ENXIO;
247		}
248		break;
249	case SEEK_HOLE:
250		/*
251		 * There is a virtual hole at the end of the file, so as long as
252		 * offset isn't i_size or larger, return i_size.
253		 */
254		if (offset >= inode->i_size) {
255			mutex_unlock(&inode->i_mutex);
256			return -ENXIO;
257		}
258		offset = inode->i_size;
259		break;
260	}
261
262	if (offset < 0 || offset > maxbytes) {
263		mutex_unlock(&inode->i_mutex);
264		return -EINVAL;
265	}
266
267	if (offset != file->f_pos) {
268		file->f_pos = offset;
269		file->f_version = 0;
270	}
271	mutex_unlock(&inode->i_mutex);
272
273	return offset;
274}
275
276const struct file_operations ext4_file_operations = {
277	.llseek		= ext4_llseek,
278	.read		= do_sync_read,
279	.write		= do_sync_write,
280	.aio_read	= generic_file_aio_read,
281	.aio_write	= ext4_file_write,
282	.unlocked_ioctl = ext4_ioctl,
283#ifdef CONFIG_COMPAT
284	.compat_ioctl	= ext4_compat_ioctl,
285#endif
286	.mmap		= ext4_file_mmap,
 
287	.open		= ext4_file_open,
288	.release	= ext4_release_file,
289	.fsync		= ext4_sync_file,
 
290	.splice_read	= generic_file_splice_read,
291	.splice_write	= generic_file_splice_write,
292	.fallocate	= ext4_fallocate,
293};
294
295const struct inode_operations ext4_file_inode_operations = {
296	.setattr	= ext4_setattr,
297	.getattr	= ext4_getattr,
298#ifdef CONFIG_EXT4_FS_XATTR
299	.setxattr	= generic_setxattr,
300	.getxattr	= generic_getxattr,
301	.listxattr	= ext4_listxattr,
302	.removexattr	= generic_removexattr,
303#endif
304	.get_acl	= ext4_get_acl,
 
305	.fiemap		= ext4_fiemap,
306};
307
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/ext4/file.c
  4 *
  5 * Copyright (C) 1992, 1993, 1994, 1995
  6 * Remy Card (card@masi.ibp.fr)
  7 * Laboratoire MASI - Institut Blaise Pascal
  8 * Universite Pierre et Marie Curie (Paris VI)
  9 *
 10 *  from
 11 *
 12 *  linux/fs/minix/file.c
 13 *
 14 *  Copyright (C) 1991, 1992  Linus Torvalds
 15 *
 16 *  ext4 fs regular file handling primitives
 17 *
 18 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 19 *	(jj@sunsite.ms.mff.cuni.cz)
 20 */
 21
 22#include <linux/time.h>
 23#include <linux/fs.h>
 24#include <linux/iomap.h>
 25#include <linux/mount.h>
 26#include <linux/path.h>
 27#include <linux/dax.h>
 28#include <linux/quotaops.h>
 29#include <linux/pagevec.h>
 30#include <linux/uio.h>
 31#include <linux/mman.h>
 32#include "ext4.h"
 33#include "ext4_jbd2.h"
 34#include "xattr.h"
 35#include "acl.h"
 36
 37#ifdef CONFIG_FS_DAX
 38static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 39{
 40	struct inode *inode = file_inode(iocb->ki_filp);
 41	ssize_t ret;
 42
 43	if (!inode_trylock_shared(inode)) {
 44		if (iocb->ki_flags & IOCB_NOWAIT)
 45			return -EAGAIN;
 46		inode_lock_shared(inode);
 47	}
 48	/*
 49	 * Recheck under inode lock - at this point we are sure it cannot
 50	 * change anymore
 51	 */
 52	if (!IS_DAX(inode)) {
 53		inode_unlock_shared(inode);
 54		/* Fallback to buffered IO in case we cannot support DAX */
 55		return generic_file_read_iter(iocb, to);
 56	}
 57	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
 58	inode_unlock_shared(inode);
 59
 60	file_accessed(iocb->ki_filp);
 61	return ret;
 62}
 63#endif
 64
 65static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 66{
 67	if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
 68		return -EIO;
 69
 70	if (!iov_iter_count(to))
 71		return 0; /* skip atime */
 72
 73#ifdef CONFIG_FS_DAX
 74	if (IS_DAX(file_inode(iocb->ki_filp)))
 75		return ext4_dax_read_iter(iocb, to);
 76#endif
 77	return generic_file_read_iter(iocb, to);
 78}
 79
 80/*
 81 * Called when an inode is released. Note that this is different
 82 * from ext4_file_open: open gets called at every open, but release
 83 * gets called only when /all/ the files are closed.
 84 */
 85static int ext4_release_file(struct inode *inode, struct file *filp)
 86{
 87	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
 88		ext4_alloc_da_blocks(inode);
 89		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 90	}
 91	/* if we are the last writer on the inode, drop the block reservation */
 92	if ((filp->f_mode & FMODE_WRITE) &&
 93			(atomic_read(&inode->i_writecount) == 1) &&
 94		        !EXT4_I(inode)->i_reserved_data_blocks)
 95	{
 96		down_write(&EXT4_I(inode)->i_data_sem);
 97		ext4_discard_preallocations(inode);
 98		up_write(&EXT4_I(inode)->i_data_sem);
 99	}
100	if (is_dx(inode) && filp->private_data)
101		ext4_htree_free_dir_info(filp->private_data);
102
103	return 0;
104}
105
106static void ext4_unwritten_wait(struct inode *inode)
107{
108	wait_queue_head_t *wq = ext4_ioend_wq(inode);
109
110	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
111}
112
113/*
114 * This tests whether the IO in question is block-aligned or not.
115 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
116 * are converted to written only after the IO is complete.  Until they are
117 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
118 * it needs to zero out portions of the start and/or end block.  If 2 AIO
119 * threads are at work on the same unwritten block, they must be synchronized
120 * or one thread will zero the other's data, causing corruption.
121 */
122static int
123ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
 
124{
125	struct super_block *sb = inode->i_sb;
126	int blockmask = sb->s_blocksize - 1;
 
 
127
128	if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
129		return 0;
130
131	if ((pos | iov_iter_alignment(from)) & blockmask)
132		return 1;
133
134	return 0;
135}
136
137/* Is IO overwriting allocated and initialized blocks? */
138static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 
139{
140	struct ext4_map_blocks map;
141	unsigned int blkbits = inode->i_blkbits;
142	int err, blklen;
143
144	if (pos + len > i_size_read(inode))
145		return false;
146
147	map.m_lblk = pos >> blkbits;
148	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
149	blklen = map.m_len;
150
151	err = ext4_map_blocks(NULL, inode, &map, 0);
152	/*
153	 * 'err==len' means that all of the blocks have been preallocated,
154	 * regardless of whether they have been initialized or not. To exclude
155	 * unwritten extents, we need to check m_flags.
156	 */
157	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
158}
159
160static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
161{
162	struct inode *inode = file_inode(iocb->ki_filp);
163	ssize_t ret;
164
165	ret = generic_write_checks(iocb, from);
166	if (ret <= 0)
167		return ret;
168
169	if (unlikely(IS_IMMUTABLE(inode)))
170		return -EPERM;
171
172	/*
173	 * If we have encountered a bitmap-format file, the size limit
174	 * is smaller than s_maxbytes, which is for extent-mapped files.
175	 */
 
176	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
177		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
178
179		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 
180			return -EFBIG;
181		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
182	}
183	return iov_iter_count(from);
184}
185
186#ifdef CONFIG_FS_DAX
187static ssize_t
188ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
189{
190	struct inode *inode = file_inode(iocb->ki_filp);
191	ssize_t ret;
192
193	if (!inode_trylock(inode)) {
194		if (iocb->ki_flags & IOCB_NOWAIT)
195			return -EAGAIN;
196		inode_lock(inode);
197	}
198	ret = ext4_write_checks(iocb, from);
199	if (ret <= 0)
200		goto out;
201	ret = file_remove_privs(iocb->ki_filp);
202	if (ret)
203		goto out;
204	ret = file_update_time(iocb->ki_filp);
205	if (ret)
206		goto out;
207
208	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
209out:
210	inode_unlock(inode);
211	if (ret > 0)
212		ret = generic_write_sync(iocb, ret);
213	return ret;
214}
215#endif
216
217static ssize_t
218ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
219{
220	struct inode *inode = file_inode(iocb->ki_filp);
221	int o_direct = iocb->ki_flags & IOCB_DIRECT;
222	int unaligned_aio = 0;
223	int overwrite = 0;
224	ssize_t ret;
225
226	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
227		return -EIO;
228
229#ifdef CONFIG_FS_DAX
230	if (IS_DAX(inode))
231		return ext4_dax_write_iter(iocb, from);
232#endif
233
234	if (!inode_trylock(inode)) {
235		if (iocb->ki_flags & IOCB_NOWAIT)
236			return -EAGAIN;
237		inode_lock(inode);
 
 
 
 
238	}
239
240	ret = ext4_write_checks(iocb, from);
241	if (ret <= 0)
242		goto out;
243
244	/*
245	 * Unaligned direct AIO must be serialized among each other as zeroing
246	 * of partial blocks of two competing unaligned AIOs can result in data
247	 * corruption.
248	 */
249	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
250	    !is_sync_kiocb(iocb) &&
251	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
252		unaligned_aio = 1;
253		ext4_unwritten_wait(inode);
254	}
255
256	iocb->private = &overwrite;
257	/* Check whether we do a DIO overwrite or not */
258	if (o_direct && !unaligned_aio) {
259		if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
260			if (ext4_should_dioread_nolock(inode))
261				overwrite = 1;
262		} else if (iocb->ki_flags & IOCB_NOWAIT) {
263			ret = -EAGAIN;
264			goto out;
265		}
266	}
267
268	ret = __generic_file_write_iter(iocb, from);
269	/*
270	 * Unaligned direct AIO must be the only IO in flight. Otherwise
271	 * overlapping aligned IO after unaligned might result in data
272	 * corruption.
273	 */
274	if (ret == -EIOCBQUEUED && unaligned_aio)
275		ext4_unwritten_wait(inode);
276	inode_unlock(inode);
277
278	if (ret > 0)
279		ret = generic_write_sync(iocb, ret);
280
281	return ret;
282
283out:
284	inode_unlock(inode);
285	return ret;
286}
287
288#ifdef CONFIG_FS_DAX
289static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
290		enum page_entry_size pe_size)
291{
292	int error = 0;
293	vm_fault_t result;
294	int retries = 0;
295	handle_t *handle = NULL;
296	struct inode *inode = file_inode(vmf->vma->vm_file);
297	struct super_block *sb = inode->i_sb;
298
299	/*
300	 * We have to distinguish real writes from writes which will result in a
301	 * COW page; COW writes should *not* poke the journal (the file will not
302	 * be changed). Doing so would cause unintended failures when mounted
303	 * read-only.
304	 *
305	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
306	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
307	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
308	 * we eventually come back with a COW page.
309	 */
310	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
311		(vmf->vma->vm_flags & VM_SHARED);
312	pfn_t pfn;
313
314	if (write) {
315		sb_start_pagefault(sb);
316		file_update_time(vmf->vma->vm_file);
317		down_read(&EXT4_I(inode)->i_mmap_sem);
318retry:
319		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
320					       EXT4_DATA_TRANS_BLOCKS(sb));
321		if (IS_ERR(handle)) {
322			up_read(&EXT4_I(inode)->i_mmap_sem);
323			sb_end_pagefault(sb);
324			return VM_FAULT_SIGBUS;
325		}
326	} else {
327		down_read(&EXT4_I(inode)->i_mmap_sem);
328	}
329	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
330	if (write) {
331		ext4_journal_stop(handle);
332
333		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
334		    ext4_should_retry_alloc(sb, &retries))
335			goto retry;
336		/* Handling synchronous page fault? */
337		if (result & VM_FAULT_NEEDDSYNC)
338			result = dax_finish_sync_fault(vmf, pe_size, pfn);
339		up_read(&EXT4_I(inode)->i_mmap_sem);
340		sb_end_pagefault(sb);
341	} else {
342		up_read(&EXT4_I(inode)->i_mmap_sem);
343	}
344
345	return result;
346}
347
348static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
349{
350	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
351}
352
353static const struct vm_operations_struct ext4_dax_vm_ops = {
354	.fault		= ext4_dax_fault,
355	.huge_fault	= ext4_dax_huge_fault,
356	.page_mkwrite	= ext4_dax_fault,
357	.pfn_mkwrite	= ext4_dax_fault,
358};
359#else
360#define ext4_dax_vm_ops	ext4_file_vm_ops
361#endif
362
363static const struct vm_operations_struct ext4_file_vm_ops = {
364	.fault		= ext4_filemap_fault,
365	.map_pages	= filemap_map_pages,
366	.page_mkwrite   = ext4_page_mkwrite,
367};
368
369static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
370{
371	struct inode *inode = file->f_mapping->host;
372	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
373	struct dax_device *dax_dev = sbi->s_daxdev;
374
375	if (unlikely(ext4_forced_shutdown(sbi)))
376		return -EIO;
377
378	/*
379	 * We don't support synchronous mappings for non-DAX files and
380	 * for DAX files if underneath dax_device is not synchronous.
381	 */
382	if (!daxdev_mapping_supported(vma, dax_dev))
383		return -EOPNOTSUPP;
384
 
 
385	file_accessed(file);
386	if (IS_DAX(file_inode(file))) {
387		vma->vm_ops = &ext4_dax_vm_ops;
388		vma->vm_flags |= VM_HUGEPAGE;
389	} else {
390		vma->vm_ops = &ext4_file_vm_ops;
391	}
392	return 0;
393}
394
395static int ext4_sample_last_mounted(struct super_block *sb,
396				    struct vfsmount *mnt)
397{
398	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
 
 
399	struct path path;
400	char buf[64], *cp;
401	handle_t *handle;
402	int err;
403
404	if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
405		return 0;
406
407	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
408		return 0;
409
410	sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
411	/*
412	 * Sample where the filesystem has been mounted and
413	 * store it in the superblock for sysadmin convenience
414	 * when trying to sort through large numbers of block
415	 * devices or filesystem images.
416	 */
417	memset(buf, 0, sizeof(buf));
418	path.mnt = mnt;
419	path.dentry = mnt->mnt_root;
420	cp = d_path(&path, buf, sizeof(buf));
421	err = 0;
422	if (IS_ERR(cp))
423		goto out;
424
425	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
426	err = PTR_ERR(handle);
427	if (IS_ERR(handle))
428		goto out;
429	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
430	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
431	if (err)
432		goto out_journal;
433	strlcpy(sbi->s_es->s_last_mounted, cp,
434		sizeof(sbi->s_es->s_last_mounted));
435	ext4_handle_dirty_super(handle, sb);
436out_journal:
437	ext4_journal_stop(handle);
438out:
439	sb_end_intwrite(sb);
440	return err;
441}
442
443static int ext4_file_open(struct inode * inode, struct file * filp)
444{
445	int ret;
446
447	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
448		return -EIO;
449
450	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
451	if (ret)
452		return ret;
453
454	ret = fscrypt_file_open(inode, filp);
455	if (ret)
456		return ret;
457
458	ret = fsverity_file_open(inode, filp);
459	if (ret)
460		return ret;
461
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462	/*
463	 * Set up the jbd2_inode if we are opening the inode for
464	 * writing and the journal is present
465	 */
466	if (filp->f_mode & FMODE_WRITE) {
467		ret = ext4_inode_attach_jinode(inode);
468		if (ret < 0)
469			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
470	}
471
472	filp->f_mode |= FMODE_NOWAIT;
473	return dquot_file_open(inode, filp);
474}
475
476/*
477 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
478 * by calling generic_file_llseek_size() with the appropriate maxbytes
479 * value for each.
480 */
481loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
482{
483	struct inode *inode = file->f_mapping->host;
484	loff_t maxbytes;
485
486	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
487		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
488	else
489		maxbytes = inode->i_sb->s_maxbytes;
490
491	switch (whence) {
492	default:
493		return generic_file_llseek_size(file, offset, whence,
494						maxbytes, i_size_read(inode));
495	case SEEK_HOLE:
496		inode_lock_shared(inode);
497		offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
498		inode_unlock_shared(inode);
 
 
499		break;
500	case SEEK_DATA:
501		inode_lock_shared(inode);
502		offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
503		inode_unlock_shared(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504		break;
505	}
506
507	if (offset < 0)
508		return offset;
509	return vfs_setpos(file, offset, maxbytes);
 
 
 
 
 
 
 
 
 
510}
511
512const struct file_operations ext4_file_operations = {
513	.llseek		= ext4_llseek,
514	.read_iter	= ext4_file_read_iter,
515	.write_iter	= ext4_file_write_iter,
 
 
516	.unlocked_ioctl = ext4_ioctl,
517#ifdef CONFIG_COMPAT
518	.compat_ioctl	= ext4_compat_ioctl,
519#endif
520	.mmap		= ext4_file_mmap,
521	.mmap_supported_flags = MAP_SYNC,
522	.open		= ext4_file_open,
523	.release	= ext4_release_file,
524	.fsync		= ext4_sync_file,
525	.get_unmapped_area = thp_get_unmapped_area,
526	.splice_read	= generic_file_splice_read,
527	.splice_write	= iter_file_splice_write,
528	.fallocate	= ext4_fallocate,
529};
530
531const struct inode_operations ext4_file_inode_operations = {
532	.setattr	= ext4_setattr,
533	.getattr	= ext4_file_getattr,
 
 
 
534	.listxattr	= ext4_listxattr,
 
 
535	.get_acl	= ext4_get_acl,
536	.set_acl	= ext4_set_acl,
537	.fiemap		= ext4_fiemap,
538};
539