Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * file.c
4 *
5 * PURPOSE
6 * File handling routines for the OSTA-UDF(tm) filesystem.
7 *
8 * COPYRIGHT
9 * (C) 1998-1999 Dave Boynton
10 * (C) 1998-2004 Ben Fennema
11 * (C) 1999-2000 Stelias Computing Inc
12 *
13 * HISTORY
14 *
15 * 10/02/98 dgb Attempt to integrate into udf.o
16 * 10/07/98 Switched to using generic_readpage, etc., like isofs
17 * And it works!
18 * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but
19 * ICBTAG_FLAG_AD_IN_ICB.
20 * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c
21 * 05/12/99 Preliminary file write support
22 */
23
24#include "udfdecl.h"
25#include <linux/fs.h>
26#include <linux/uaccess.h>
27#include <linux/kernel.h>
28#include <linux/string.h> /* memset */
29#include <linux/capability.h>
30#include <linux/errno.h>
31#include <linux/pagemap.h>
32#include <linux/uio.h>
33
34#include "udf_i.h"
35#include "udf_sb.h"
36
37static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
38{
39 struct vm_area_struct *vma = vmf->vma;
40 struct inode *inode = file_inode(vma->vm_file);
41 struct address_space *mapping = inode->i_mapping;
42 struct folio *folio = page_folio(vmf->page);
43 loff_t size;
44 unsigned int end;
45 vm_fault_t ret = VM_FAULT_LOCKED;
46 int err;
47
48 sb_start_pagefault(inode->i_sb);
49 file_update_time(vma->vm_file);
50 filemap_invalidate_lock_shared(mapping);
51 folio_lock(folio);
52 size = i_size_read(inode);
53 if (folio->mapping != inode->i_mapping || folio_pos(folio) >= size) {
54 folio_unlock(folio);
55 ret = VM_FAULT_NOPAGE;
56 goto out_unlock;
57 }
58 /* Space is already allocated for in-ICB file */
59 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
60 goto out_dirty;
61 if (folio->index == size >> PAGE_SHIFT)
62 end = size & ~PAGE_MASK;
63 else
64 end = PAGE_SIZE;
65 err = __block_write_begin(folio, 0, end, udf_get_block);
66 if (err) {
67 folio_unlock(folio);
68 ret = vmf_fs_error(err);
69 goto out_unlock;
70 }
71
72 block_commit_write(&folio->page, 0, end);
73out_dirty:
74 folio_mark_dirty(folio);
75 folio_wait_stable(folio);
76out_unlock:
77 filemap_invalidate_unlock_shared(mapping);
78 sb_end_pagefault(inode->i_sb);
79 return ret;
80}
81
82static const struct vm_operations_struct udf_file_vm_ops = {
83 .fault = filemap_fault,
84 .map_pages = filemap_map_pages,
85 .page_mkwrite = udf_page_mkwrite,
86};
87
88static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
89{
90 ssize_t retval;
91 struct file *file = iocb->ki_filp;
92 struct inode *inode = file_inode(file);
93 struct udf_inode_info *iinfo = UDF_I(inode);
94
95 inode_lock(inode);
96
97 retval = generic_write_checks(iocb, from);
98 if (retval <= 0)
99 goto out;
100
101 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
102 inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
103 iocb->ki_pos + iov_iter_count(from))) {
104 filemap_invalidate_lock(inode->i_mapping);
105 retval = udf_expand_file_adinicb(inode);
106 filemap_invalidate_unlock(inode->i_mapping);
107 if (retval)
108 goto out;
109 }
110
111 retval = __generic_file_write_iter(iocb, from);
112out:
113 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
114 down_write(&iinfo->i_data_sem);
115 iinfo->i_lenAlloc = inode->i_size;
116 up_write(&iinfo->i_data_sem);
117 }
118 inode_unlock(inode);
119
120 if (retval > 0) {
121 mark_inode_dirty(inode);
122 retval = generic_write_sync(iocb, retval);
123 }
124
125 return retval;
126}
127
128long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
129{
130 struct inode *inode = file_inode(filp);
131 long old_block, new_block;
132 int result;
133
134 if (file_permission(filp, MAY_READ) != 0) {
135 udf_debug("no permission to access inode %lu\n", inode->i_ino);
136 return -EPERM;
137 }
138
139 if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
140 (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
141 udf_debug("invalid argument to udf_ioctl\n");
142 return -EINVAL;
143 }
144
145 switch (cmd) {
146 case UDF_GETVOLIDENT:
147 if (copy_to_user((char __user *)arg,
148 UDF_SB(inode->i_sb)->s_volume_ident, 32))
149 return -EFAULT;
150 return 0;
151 case UDF_RELOCATE_BLOCKS:
152 if (!capable(CAP_SYS_ADMIN))
153 return -EPERM;
154 if (get_user(old_block, (long __user *)arg))
155 return -EFAULT;
156 result = udf_relocate_blocks(inode->i_sb,
157 old_block, &new_block);
158 if (result == 0)
159 result = put_user(new_block, (long __user *)arg);
160 return result;
161 case UDF_GETEASIZE:
162 return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
163 case UDF_GETEABLOCK:
164 return copy_to_user((char __user *)arg,
165 UDF_I(inode)->i_data,
166 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
167 default:
168 return -ENOIOCTLCMD;
169 }
170
171 return 0;
172}
173
174static int udf_release_file(struct inode *inode, struct file *filp)
175{
176 if (filp->f_mode & FMODE_WRITE &&
177 atomic_read(&inode->i_writecount) == 1) {
178 /*
179 * Grab i_mutex to avoid races with writes changing i_size
180 * while we are running.
181 */
182 inode_lock(inode);
183 down_write(&UDF_I(inode)->i_data_sem);
184 udf_discard_prealloc(inode);
185 udf_truncate_tail_extent(inode);
186 up_write(&UDF_I(inode)->i_data_sem);
187 inode_unlock(inode);
188 }
189 return 0;
190}
191
192static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
193{
194 file_accessed(file);
195 vma->vm_ops = &udf_file_vm_ops;
196
197 return 0;
198}
199
200const struct file_operations udf_file_operations = {
201 .read_iter = generic_file_read_iter,
202 .unlocked_ioctl = udf_ioctl,
203 .open = generic_file_open,
204 .mmap = udf_file_mmap,
205 .write_iter = udf_file_write_iter,
206 .release = udf_release_file,
207 .fsync = generic_file_fsync,
208 .splice_read = filemap_splice_read,
209 .splice_write = iter_file_splice_write,
210 .llseek = generic_file_llseek,
211};
212
213static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
214 struct iattr *attr)
215{
216 struct inode *inode = d_inode(dentry);
217 struct super_block *sb = inode->i_sb;
218 int error;
219
220 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
221 if (error)
222 return error;
223
224 if ((attr->ia_valid & ATTR_UID) &&
225 UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
226 !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
227 return -EPERM;
228 if ((attr->ia_valid & ATTR_GID) &&
229 UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
230 !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
231 return -EPERM;
232
233 if ((attr->ia_valid & ATTR_SIZE) &&
234 attr->ia_size != i_size_read(inode)) {
235 filemap_invalidate_lock(inode->i_mapping);
236 error = udf_setsize(inode, attr->ia_size);
237 filemap_invalidate_unlock(inode->i_mapping);
238 if (error)
239 return error;
240 }
241
242 if (attr->ia_valid & ATTR_MODE)
243 udf_update_extra_perms(inode, attr->ia_mode);
244
245 setattr_copy(&nop_mnt_idmap, inode, attr);
246 mark_inode_dirty(inode);
247 return 0;
248}
249
250const struct inode_operations udf_file_inode_operations = {
251 .setattr = udf_setattr,
252};
1/*
2 * file.c
3 *
4 * PURPOSE
5 * File handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998-1999 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
16 *
17 * HISTORY
18 *
19 * 10/02/98 dgb Attempt to integrate into udf.o
20 * 10/07/98 Switched to using generic_readpage, etc., like isofs
21 * And it works!
22 * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but
23 * ICBTAG_FLAG_AD_IN_ICB.
24 * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c
25 * 05/12/99 Preliminary file write support
26 */
27
28#include "udfdecl.h"
29#include <linux/fs.h>
30#include <linux/uaccess.h>
31#include <linux/kernel.h>
32#include <linux/string.h> /* memset */
33#include <linux/capability.h>
34#include <linux/errno.h>
35#include <linux/pagemap.h>
36#include <linux/uio.h>
37
38#include "udf_i.h"
39#include "udf_sb.h"
40
41static void __udf_adinicb_readpage(struct page *page)
42{
43 struct inode *inode = page->mapping->host;
44 char *kaddr;
45 struct udf_inode_info *iinfo = UDF_I(inode);
46
47 kaddr = kmap(page);
48 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
49 memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
50 flush_dcache_page(page);
51 SetPageUptodate(page);
52 kunmap(page);
53}
54
55static int udf_adinicb_readpage(struct file *file, struct page *page)
56{
57 BUG_ON(!PageLocked(page));
58 __udf_adinicb_readpage(page);
59 unlock_page(page);
60
61 return 0;
62}
63
64static int udf_adinicb_writepage(struct page *page,
65 struct writeback_control *wbc)
66{
67 struct inode *inode = page->mapping->host;
68 char *kaddr;
69 struct udf_inode_info *iinfo = UDF_I(inode);
70
71 BUG_ON(!PageLocked(page));
72
73 kaddr = kmap(page);
74 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size);
75 mark_inode_dirty(inode);
76 SetPageUptodate(page);
77 kunmap(page);
78 unlock_page(page);
79
80 return 0;
81}
82
83static int udf_adinicb_write_begin(struct file *file,
84 struct address_space *mapping, loff_t pos,
85 unsigned len, unsigned flags, struct page **pagep,
86 void **fsdata)
87{
88 struct page *page;
89
90 if (WARN_ON_ONCE(pos >= PAGE_SIZE))
91 return -EIO;
92 page = grab_cache_page_write_begin(mapping, 0, flags);
93 if (!page)
94 return -ENOMEM;
95 *pagep = page;
96
97 if (!PageUptodate(page) && len != PAGE_SIZE)
98 __udf_adinicb_readpage(page);
99 return 0;
100}
101
102static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
103 loff_t offset)
104{
105 /* Fallback to buffered I/O. */
106 return 0;
107}
108
109const struct address_space_operations udf_adinicb_aops = {
110 .readpage = udf_adinicb_readpage,
111 .writepage = udf_adinicb_writepage,
112 .write_begin = udf_adinicb_write_begin,
113 .write_end = simple_write_end,
114 .direct_IO = udf_adinicb_direct_IO,
115};
116
117static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
118{
119 ssize_t retval;
120 struct file *file = iocb->ki_filp;
121 struct inode *inode = file_inode(file);
122 struct udf_inode_info *iinfo = UDF_I(inode);
123 int err;
124
125 inode_lock(inode);
126
127 retval = generic_write_checks(iocb, from);
128 if (retval <= 0)
129 goto out;
130
131 down_write(&iinfo->i_data_sem);
132 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
133 loff_t end = iocb->ki_pos + iov_iter_count(from);
134
135 if (inode->i_sb->s_blocksize <
136 (udf_file_entry_alloc_offset(inode) + end)) {
137 err = udf_expand_file_adinicb(inode);
138 if (err) {
139 inode_unlock(inode);
140 udf_debug("udf_expand_adinicb: err=%d\n", err);
141 return err;
142 }
143 } else {
144 iinfo->i_lenAlloc = max(end, inode->i_size);
145 up_write(&iinfo->i_data_sem);
146 }
147 } else
148 up_write(&iinfo->i_data_sem);
149
150 retval = __generic_file_write_iter(iocb, from);
151out:
152 inode_unlock(inode);
153
154 if (retval > 0) {
155 mark_inode_dirty(inode);
156 err = generic_write_sync(file, iocb->ki_pos - retval, retval);
157 if (err < 0)
158 retval = err;
159 }
160
161 return retval;
162}
163
164long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
165{
166 struct inode *inode = file_inode(filp);
167 long old_block, new_block;
168 int result = -EINVAL;
169
170 if (inode_permission(inode, MAY_READ) != 0) {
171 udf_debug("no permission to access inode %lu\n", inode->i_ino);
172 result = -EPERM;
173 goto out;
174 }
175
176 if (!arg) {
177 udf_debug("invalid argument to udf_ioctl\n");
178 result = -EINVAL;
179 goto out;
180 }
181
182 switch (cmd) {
183 case UDF_GETVOLIDENT:
184 if (copy_to_user((char __user *)arg,
185 UDF_SB(inode->i_sb)->s_volume_ident, 32))
186 result = -EFAULT;
187 else
188 result = 0;
189 goto out;
190 case UDF_RELOCATE_BLOCKS:
191 if (!capable(CAP_SYS_ADMIN)) {
192 result = -EPERM;
193 goto out;
194 }
195 if (get_user(old_block, (long __user *)arg)) {
196 result = -EFAULT;
197 goto out;
198 }
199 result = udf_relocate_blocks(inode->i_sb,
200 old_block, &new_block);
201 if (result == 0)
202 result = put_user(new_block, (long __user *)arg);
203 goto out;
204 case UDF_GETEASIZE:
205 result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
206 goto out;
207 case UDF_GETEABLOCK:
208 result = copy_to_user((char __user *)arg,
209 UDF_I(inode)->i_ext.i_data,
210 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
211 goto out;
212 }
213
214out:
215 return result;
216}
217
218static int udf_release_file(struct inode *inode, struct file *filp)
219{
220 if (filp->f_mode & FMODE_WRITE &&
221 atomic_read(&inode->i_writecount) == 1) {
222 /*
223 * Grab i_mutex to avoid races with writes changing i_size
224 * while we are running.
225 */
226 inode_lock(inode);
227 down_write(&UDF_I(inode)->i_data_sem);
228 udf_discard_prealloc(inode);
229 udf_truncate_tail_extent(inode);
230 up_write(&UDF_I(inode)->i_data_sem);
231 inode_unlock(inode);
232 }
233 return 0;
234}
235
236const struct file_operations udf_file_operations = {
237 .read_iter = generic_file_read_iter,
238 .unlocked_ioctl = udf_ioctl,
239 .open = generic_file_open,
240 .mmap = generic_file_mmap,
241 .write_iter = udf_file_write_iter,
242 .release = udf_release_file,
243 .fsync = generic_file_fsync,
244 .splice_read = generic_file_splice_read,
245 .llseek = generic_file_llseek,
246};
247
248static int udf_setattr(struct dentry *dentry, struct iattr *attr)
249{
250 struct inode *inode = d_inode(dentry);
251 int error;
252
253 error = inode_change_ok(inode, attr);
254 if (error)
255 return error;
256
257 if ((attr->ia_valid & ATTR_SIZE) &&
258 attr->ia_size != i_size_read(inode)) {
259 error = udf_setsize(inode, attr->ia_size);
260 if (error)
261 return error;
262 }
263
264 setattr_copy(inode, attr);
265 mark_inode_dirty(inode);
266 return 0;
267}
268
269const struct inode_operations udf_file_inode_operations = {
270 .setattr = udf_setattr,
271};