Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * file.c
  4 *
  5 * PURPOSE
  6 *  File handling routines for the OSTA-UDF(tm) filesystem.
  7 *
  8 * COPYRIGHT
 
 
 
 
 
  9 *  (C) 1998-1999 Dave Boynton
 10 *  (C) 1998-2004 Ben Fennema
 11 *  (C) 1999-2000 Stelias Computing Inc
 12 *
 13 * HISTORY
 14 *
 15 *  10/02/98 dgb  Attempt to integrate into udf.o
 16 *  10/07/98      Switched to using generic_readpage, etc., like isofs
 17 *                And it works!
 18 *  12/06/98 blf  Added udf_file_read. uses generic_file_read for all cases but
 19 *                ICBTAG_FLAG_AD_IN_ICB.
 20 *  04/06/99      64 bit file handling on 32 bit systems taken from ext2 file.c
 21 *  05/12/99      Preliminary file write support
 22 */
 23
 24#include "udfdecl.h"
 25#include <linux/fs.h>
 26#include <linux/uaccess.h>
 27#include <linux/kernel.h>
 28#include <linux/string.h> /* memset */
 29#include <linux/capability.h>
 30#include <linux/errno.h>
 31#include <linux/pagemap.h>
 32#include <linux/uio.h>
 33
 34#include "udf_i.h"
 35#include "udf_sb.h"
 36
 37static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
 38{
 39	struct vm_area_struct *vma = vmf->vma;
 40	struct inode *inode = file_inode(vma->vm_file);
 41	struct address_space *mapping = inode->i_mapping;
 42	struct folio *folio = page_folio(vmf->page);
 43	loff_t size;
 44	unsigned int end;
 45	vm_fault_t ret = VM_FAULT_LOCKED;
 46	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48	sb_start_pagefault(inode->i_sb);
 49	file_update_time(vma->vm_file);
 50	filemap_invalidate_lock_shared(mapping);
 51	folio_lock(folio);
 52	size = i_size_read(inode);
 53	if (folio->mapping != inode->i_mapping || folio_pos(folio) >= size) {
 54		folio_unlock(folio);
 55		ret = VM_FAULT_NOPAGE;
 56		goto out_unlock;
 57	}
 58	/* Space is already allocated for in-ICB file */
 59	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
 60		goto out_dirty;
 61	if (folio->index == size >> PAGE_SHIFT)
 62		end = size & ~PAGE_MASK;
 63	else
 64		end = PAGE_SIZE;
 65	err = __block_write_begin(folio, 0, end, udf_get_block);
 66	if (err) {
 67		folio_unlock(folio);
 68		ret = vmf_fs_error(err);
 69		goto out_unlock;
 70	}
 71
 72	block_commit_write(&folio->page, 0, end);
 73out_dirty:
 74	folio_mark_dirty(folio);
 75	folio_wait_stable(folio);
 76out_unlock:
 77	filemap_invalidate_unlock_shared(mapping);
 78	sb_end_pagefault(inode->i_sb);
 79	return ret;
 80}
 81
 82static const struct vm_operations_struct udf_file_vm_ops = {
 83	.fault		= filemap_fault,
 84	.map_pages	= filemap_map_pages,
 85	.page_mkwrite	= udf_page_mkwrite,
 86};
 87
 88static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 89{
 90	ssize_t retval;
 91	struct file *file = iocb->ki_filp;
 92	struct inode *inode = file_inode(file);
 93	struct udf_inode_info *iinfo = UDF_I(inode);
 
 94
 95	inode_lock(inode);
 96
 97	retval = generic_write_checks(iocb, from);
 98	if (retval <= 0)
 99		goto out;
100
101	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
102	    inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
103				 iocb->ki_pos + iov_iter_count(from))) {
104		filemap_invalidate_lock(inode->i_mapping);
105		retval = udf_expand_file_adinicb(inode);
106		filemap_invalidate_unlock(inode->i_mapping);
107		if (retval)
108			goto out;
109	}
 
 
 
 
 
 
 
 
 
110
111	retval = __generic_file_write_iter(iocb, from);
112out:
113	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
114		down_write(&iinfo->i_data_sem);
115		iinfo->i_lenAlloc = inode->i_size;
116		up_write(&iinfo->i_data_sem);
117	}
118	inode_unlock(inode);
119
120	if (retval > 0) {
121		mark_inode_dirty(inode);
122		retval = generic_write_sync(iocb, retval);
123	}
124
125	return retval;
126}
127
128long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
129{
130	struct inode *inode = file_inode(filp);
131	long old_block, new_block;
132	int result;
133
134	if (file_permission(filp, MAY_READ) != 0) {
135		udf_debug("no permission to access inode %lu\n", inode->i_ino);
136		return -EPERM;
137	}
138
139	if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
140		     (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
141		udf_debug("invalid argument to udf_ioctl\n");
142		return -EINVAL;
143	}
144
145	switch (cmd) {
146	case UDF_GETVOLIDENT:
147		if (copy_to_user((char __user *)arg,
148				 UDF_SB(inode->i_sb)->s_volume_ident, 32))
149			return -EFAULT;
150		return 0;
151	case UDF_RELOCATE_BLOCKS:
152		if (!capable(CAP_SYS_ADMIN))
153			return -EPERM;
154		if (get_user(old_block, (long __user *)arg))
155			return -EFAULT;
156		result = udf_relocate_blocks(inode->i_sb,
157						old_block, &new_block);
158		if (result == 0)
159			result = put_user(new_block, (long __user *)arg);
160		return result;
161	case UDF_GETEASIZE:
162		return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
163	case UDF_GETEABLOCK:
164		return copy_to_user((char __user *)arg,
165				    UDF_I(inode)->i_data,
166				    UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
167	default:
168		return -ENOIOCTLCMD;
169	}
170
171	return 0;
172}
173
174static int udf_release_file(struct inode *inode, struct file *filp)
175{
176	if (filp->f_mode & FMODE_WRITE &&
177	    atomic_read(&inode->i_writecount) == 1) {
178		/*
179		 * Grab i_mutex to avoid races with writes changing i_size
180		 * while we are running.
181		 */
182		inode_lock(inode);
183		down_write(&UDF_I(inode)->i_data_sem);
184		udf_discard_prealloc(inode);
185		udf_truncate_tail_extent(inode);
186		up_write(&UDF_I(inode)->i_data_sem);
187		inode_unlock(inode);
188	}
189	return 0;
190}
191
192static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
193{
194	file_accessed(file);
195	vma->vm_ops = &udf_file_vm_ops;
196
197	return 0;
198}
199
200const struct file_operations udf_file_operations = {
201	.read_iter		= generic_file_read_iter,
202	.unlocked_ioctl		= udf_ioctl,
203	.open			= generic_file_open,
204	.mmap			= udf_file_mmap,
205	.write_iter		= udf_file_write_iter,
206	.release		= udf_release_file,
207	.fsync			= generic_file_fsync,
208	.splice_read		= filemap_splice_read,
209	.splice_write		= iter_file_splice_write,
210	.llseek			= generic_file_llseek,
211};
212
213static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
214		       struct iattr *attr)
215{
216	struct inode *inode = d_inode(dentry);
217	struct super_block *sb = inode->i_sb;
218	int error;
219
220	error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
221	if (error)
222		return error;
223
224	if ((attr->ia_valid & ATTR_UID) &&
225	    UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
226	    !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
227		return -EPERM;
228	if ((attr->ia_valid & ATTR_GID) &&
229	    UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
230	    !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
231		return -EPERM;
232
233	if ((attr->ia_valid & ATTR_SIZE) &&
234	    attr->ia_size != i_size_read(inode)) {
235		filemap_invalidate_lock(inode->i_mapping);
236		error = udf_setsize(inode, attr->ia_size);
237		filemap_invalidate_unlock(inode->i_mapping);
238		if (error)
239			return error;
240	}
241
242	if (attr->ia_valid & ATTR_MODE)
243		udf_update_extra_perms(inode, attr->ia_mode);
244
245	setattr_copy(&nop_mnt_idmap, inode, attr);
246	mark_inode_dirty(inode);
247	return 0;
248}
249
250const struct inode_operations udf_file_inode_operations = {
251	.setattr		= udf_setattr,
252};
v6.2
 
  1/*
  2 * file.c
  3 *
  4 * PURPOSE
  5 *  File handling routines for the OSTA-UDF(tm) filesystem.
  6 *
  7 * COPYRIGHT
  8 *  This file is distributed under the terms of the GNU General Public
  9 *  License (GPL). Copies of the GPL can be obtained from:
 10 *    ftp://prep.ai.mit.edu/pub/gnu/GPL
 11 *  Each contributing author retains all rights to their own work.
 12 *
 13 *  (C) 1998-1999 Dave Boynton
 14 *  (C) 1998-2004 Ben Fennema
 15 *  (C) 1999-2000 Stelias Computing Inc
 16 *
 17 * HISTORY
 18 *
 19 *  10/02/98 dgb  Attempt to integrate into udf.o
 20 *  10/07/98      Switched to using generic_readpage, etc., like isofs
 21 *                And it works!
 22 *  12/06/98 blf  Added udf_file_read. uses generic_file_read for all cases but
 23 *                ICBTAG_FLAG_AD_IN_ICB.
 24 *  04/06/99      64 bit file handling on 32 bit systems taken from ext2 file.c
 25 *  05/12/99      Preliminary file write support
 26 */
 27
 28#include "udfdecl.h"
 29#include <linux/fs.h>
 30#include <linux/uaccess.h>
 31#include <linux/kernel.h>
 32#include <linux/string.h> /* memset */
 33#include <linux/capability.h>
 34#include <linux/errno.h>
 35#include <linux/pagemap.h>
 36#include <linux/uio.h>
 37
 38#include "udf_i.h"
 39#include "udf_sb.h"
 40
 41static void __udf_adinicb_readpage(struct page *page)
 42{
 43	struct inode *inode = page->mapping->host;
 44	char *kaddr;
 45	struct udf_inode_info *iinfo = UDF_I(inode);
 46	loff_t isize = i_size_read(inode);
 47
 48	/*
 49	 * We have to be careful here as truncate can change i_size under us.
 50	 * So just sample it once and use the same value everywhere.
 51	 */
 52	kaddr = kmap_atomic(page);
 53	memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, isize);
 54	memset(kaddr + isize, 0, PAGE_SIZE - isize);
 55	flush_dcache_page(page);
 56	SetPageUptodate(page);
 57	kunmap_atomic(kaddr);
 58}
 59
 60static int udf_adinicb_read_folio(struct file *file, struct folio *folio)
 61{
 62	BUG_ON(!folio_test_locked(folio));
 63	__udf_adinicb_readpage(&folio->page);
 64	folio_unlock(folio);
 65
 66	return 0;
 67}
 68
 69static int udf_adinicb_writepage(struct page *page,
 70				 struct writeback_control *wbc)
 71{
 72	struct inode *inode = page->mapping->host;
 73	char *kaddr;
 74	struct udf_inode_info *iinfo = UDF_I(inode);
 75
 76	BUG_ON(!PageLocked(page));
 77
 78	kaddr = kmap_atomic(page);
 79	memcpy(iinfo->i_data + iinfo->i_lenEAttr, kaddr, i_size_read(inode));
 80	SetPageUptodate(page);
 81	kunmap_atomic(kaddr);
 82	mark_inode_dirty(inode);
 83	unlock_page(page);
 84
 85	return 0;
 86}
 87
 88static int udf_adinicb_write_begin(struct file *file,
 89			struct address_space *mapping, loff_t pos,
 90			unsigned len, struct page **pagep,
 91			void **fsdata)
 92{
 93	struct page *page;
 94
 95	if (WARN_ON_ONCE(pos >= PAGE_SIZE))
 96		return -EIO;
 97	page = grab_cache_page_write_begin(mapping, 0);
 98	if (!page)
 99		return -ENOMEM;
100	*pagep = page;
101
102	if (!PageUptodate(page))
103		__udf_adinicb_readpage(page);
104	return 0;
105}
106
107static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
108{
109	/* Fallback to buffered I/O. */
110	return 0;
111}
112
113static int udf_adinicb_write_end(struct file *file, struct address_space *mapping,
114				 loff_t pos, unsigned len, unsigned copied,
115				 struct page *page, void *fsdata)
116{
117	struct inode *inode = page->mapping->host;
118	loff_t last_pos = pos + copied;
119	if (last_pos > inode->i_size)
120		i_size_write(inode, last_pos);
121	set_page_dirty(page);
122	unlock_page(page);
123	put_page(page);
124	return copied;
125}
126
127const struct address_space_operations udf_adinicb_aops = {
128	.dirty_folio	= block_dirty_folio,
129	.invalidate_folio = block_invalidate_folio,
130	.read_folio	= udf_adinicb_read_folio,
131	.writepage	= udf_adinicb_writepage,
132	.write_begin	= udf_adinicb_write_begin,
133	.write_end	= udf_adinicb_write_end,
134	.direct_IO	= udf_adinicb_direct_IO,
 
 
 
 
 
135};
136
137static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
138{
139	ssize_t retval;
140	struct file *file = iocb->ki_filp;
141	struct inode *inode = file_inode(file);
142	struct udf_inode_info *iinfo = UDF_I(inode);
143	int err;
144
145	inode_lock(inode);
146
147	retval = generic_write_checks(iocb, from);
148	if (retval <= 0)
149		goto out;
150
151	down_write(&iinfo->i_data_sem);
152	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
153		loff_t end = iocb->ki_pos + iov_iter_count(from);
154
155		if (inode->i_sb->s_blocksize <
156				(udf_file_entry_alloc_offset(inode) + end)) {
157			err = udf_expand_file_adinicb(inode);
158			if (err) {
159				inode_unlock(inode);
160				udf_debug("udf_expand_adinicb: err=%d\n", err);
161				return err;
162			}
163		} else {
164			iinfo->i_lenAlloc = max(end, inode->i_size);
165			up_write(&iinfo->i_data_sem);
166		}
167	} else
168		up_write(&iinfo->i_data_sem);
169
170	retval = __generic_file_write_iter(iocb, from);
171out:
 
 
 
 
 
172	inode_unlock(inode);
173
174	if (retval > 0) {
175		mark_inode_dirty(inode);
176		retval = generic_write_sync(iocb, retval);
177	}
178
179	return retval;
180}
181
182long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
183{
184	struct inode *inode = file_inode(filp);
185	long old_block, new_block;
186	int result;
187
188	if (file_permission(filp, MAY_READ) != 0) {
189		udf_debug("no permission to access inode %lu\n", inode->i_ino);
190		return -EPERM;
191	}
192
193	if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
194		     (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
195		udf_debug("invalid argument to udf_ioctl\n");
196		return -EINVAL;
197	}
198
199	switch (cmd) {
200	case UDF_GETVOLIDENT:
201		if (copy_to_user((char __user *)arg,
202				 UDF_SB(inode->i_sb)->s_volume_ident, 32))
203			return -EFAULT;
204		return 0;
205	case UDF_RELOCATE_BLOCKS:
206		if (!capable(CAP_SYS_ADMIN))
207			return -EPERM;
208		if (get_user(old_block, (long __user *)arg))
209			return -EFAULT;
210		result = udf_relocate_blocks(inode->i_sb,
211						old_block, &new_block);
212		if (result == 0)
213			result = put_user(new_block, (long __user *)arg);
214		return result;
215	case UDF_GETEASIZE:
216		return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
217	case UDF_GETEABLOCK:
218		return copy_to_user((char __user *)arg,
219				    UDF_I(inode)->i_data,
220				    UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
221	default:
222		return -ENOIOCTLCMD;
223	}
224
225	return 0;
226}
227
228static int udf_release_file(struct inode *inode, struct file *filp)
229{
230	if (filp->f_mode & FMODE_WRITE &&
231	    atomic_read(&inode->i_writecount) == 1) {
232		/*
233		 * Grab i_mutex to avoid races with writes changing i_size
234		 * while we are running.
235		 */
236		inode_lock(inode);
237		down_write(&UDF_I(inode)->i_data_sem);
238		udf_discard_prealloc(inode);
239		udf_truncate_tail_extent(inode);
240		up_write(&UDF_I(inode)->i_data_sem);
241		inode_unlock(inode);
242	}
243	return 0;
244}
245
 
 
 
 
 
 
 
 
246const struct file_operations udf_file_operations = {
247	.read_iter		= generic_file_read_iter,
248	.unlocked_ioctl		= udf_ioctl,
249	.open			= generic_file_open,
250	.mmap			= generic_file_mmap,
251	.write_iter		= udf_file_write_iter,
252	.release		= udf_release_file,
253	.fsync			= generic_file_fsync,
254	.splice_read		= generic_file_splice_read,
255	.splice_write		= iter_file_splice_write,
256	.llseek			= generic_file_llseek,
257};
258
259static int udf_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
260		       struct iattr *attr)
261{
262	struct inode *inode = d_inode(dentry);
263	struct super_block *sb = inode->i_sb;
264	int error;
265
266	error = setattr_prepare(&init_user_ns, dentry, attr);
267	if (error)
268		return error;
269
270	if ((attr->ia_valid & ATTR_UID) &&
271	    UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
272	    !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
273		return -EPERM;
274	if ((attr->ia_valid & ATTR_GID) &&
275	    UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
276	    !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
277		return -EPERM;
278
279	if ((attr->ia_valid & ATTR_SIZE) &&
280	    attr->ia_size != i_size_read(inode)) {
 
281		error = udf_setsize(inode, attr->ia_size);
 
282		if (error)
283			return error;
284	}
285
286	if (attr->ia_valid & ATTR_MODE)
287		udf_update_extra_perms(inode, attr->ia_mode);
288
289	setattr_copy(&init_user_ns, inode, attr);
290	mark_inode_dirty(inode);
291	return 0;
292}
293
294const struct inode_operations udf_file_inode_operations = {
295	.setattr		= udf_setattr,
296};