Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * File operations for Coda.
  4 * Original version: (C) 1996 Peter Braam 
  5 * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
  6 *
  7 * Carnegie Mellon encourages users of this code to contribute improvements
  8 * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
  9 */
 10
 11#include <linux/refcount.h>
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <linux/time.h>
 15#include <linux/file.h>
 16#include <linux/fs.h>
 17#include <linux/pagemap.h>
 18#include <linux/stat.h>
 19#include <linux/cred.h>
 20#include <linux/errno.h>
 21#include <linux/spinlock.h>
 22#include <linux/string.h>
 23#include <linux/slab.h>
 24#include <linux/uaccess.h>
 25#include <linux/uio.h>
 26
 27#include <linux/coda.h>
 28#include "coda_psdev.h"
 29#include "coda_linux.h"
 30#include "coda_int.h"
 31
 32struct coda_vm_ops {
 33	refcount_t refcnt;
 34	struct file *coda_file;
 35	const struct vm_operations_struct *host_vm_ops;
 36	struct vm_operations_struct vm_ops;
 37};
 38
 39static ssize_t
 40coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 41{
 42	struct file *coda_file = iocb->ki_filp;
 43	struct inode *coda_inode = file_inode(coda_file);
 44	struct coda_file_info *cfi = coda_ftoc(coda_file);
 45	loff_t ki_pos = iocb->ki_pos;
 46	size_t count = iov_iter_count(to);
 47	ssize_t ret;
 48
 49	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 50				  &cfi->cfi_access_intent,
 51				  count, ki_pos, CODA_ACCESS_TYPE_READ);
 52	if (ret)
 53		goto finish_read;
 54
 55	ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
 56
 57finish_read:
 58	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 59			    &cfi->cfi_access_intent,
 60			    count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
 61	return ret;
 62}
 63
 64static ssize_t
 65coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
 66{
 67	struct file *coda_file = iocb->ki_filp;
 68	struct inode *coda_inode = file_inode(coda_file);
 69	struct coda_file_info *cfi = coda_ftoc(coda_file);
 70	struct file *host_file = cfi->cfi_container;
 71	loff_t ki_pos = iocb->ki_pos;
 72	size_t count = iov_iter_count(to);
 73	ssize_t ret;
 74
 75	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 76				  &cfi->cfi_access_intent,
 77				  count, ki_pos, CODA_ACCESS_TYPE_WRITE);
 78	if (ret)
 79		goto finish_write;
 80
 81	file_start_write(host_file);
 82	inode_lock(coda_inode);
 83	ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
 84	coda_inode->i_size = file_inode(host_file)->i_size;
 85	coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
 86	coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
 87	inode_unlock(coda_inode);
 88	file_end_write(host_file);
 89
 90finish_write:
 91	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 92			    &cfi->cfi_access_intent,
 93			    count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH);
 94	return ret;
 95}
 96
 97static void
 98coda_vm_open(struct vm_area_struct *vma)
 99{
100	struct coda_vm_ops *cvm_ops =
101		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
102
103	refcount_inc(&cvm_ops->refcnt);
104
105	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
106		cvm_ops->host_vm_ops->open(vma);
107}
108
109static void
110coda_vm_close(struct vm_area_struct *vma)
111{
112	struct coda_vm_ops *cvm_ops =
113		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
114
115	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
116		cvm_ops->host_vm_ops->close(vma);
117
118	if (refcount_dec_and_test(&cvm_ops->refcnt)) {
119		vma->vm_ops = cvm_ops->host_vm_ops;
120		fput(cvm_ops->coda_file);
121		kfree(cvm_ops);
122	}
123}
124
125static int
126coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
127{
128	struct inode *coda_inode = file_inode(coda_file);
129	struct coda_file_info *cfi = coda_ftoc(coda_file);
130	struct file *host_file = cfi->cfi_container;
131	struct inode *host_inode = file_inode(host_file);
132	struct coda_inode_info *cii;
133	struct coda_vm_ops *cvm_ops;
134	loff_t ppos;
135	size_t count;
136	int ret;
137
138	if (!host_file->f_op->mmap)
139		return -ENODEV;
140
141	if (WARN_ON(coda_file != vma->vm_file))
142		return -EIO;
143
144	count = vma->vm_end - vma->vm_start;
145	ppos = vma->vm_pgoff * PAGE_SIZE;
146
147	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
148				  &cfi->cfi_access_intent,
149				  count, ppos, CODA_ACCESS_TYPE_MMAP);
150	if (ret)
151		return ret;
152
153	cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
154	if (!cvm_ops)
155		return -ENOMEM;
156
157	cii = ITOC(coda_inode);
158	spin_lock(&cii->c_lock);
159	coda_file->f_mapping = host_file->f_mapping;
160	if (coda_inode->i_mapping == &coda_inode->i_data)
161		coda_inode->i_mapping = host_inode->i_mapping;
162
163	/* only allow additional mmaps as long as userspace isn't changing
164	 * the container file on us! */
165	else if (coda_inode->i_mapping != host_inode->i_mapping) {
166		spin_unlock(&cii->c_lock);
167		kfree(cvm_ops);
168		return -EBUSY;
169	}
170
171	/* keep track of how often the coda_inode/host_file has been mmapped */
172	cii->c_mapcount++;
173	cfi->cfi_mapcount++;
174	spin_unlock(&cii->c_lock);
175
176	vma->vm_file = get_file(host_file);
177	ret = call_mmap(vma->vm_file, vma);
178
179	if (ret) {
180		/* if call_mmap fails, our caller will put host_file so we
181		 * should drop the reference to the coda_file that we got.
182		 */
183		fput(coda_file);
184		kfree(cvm_ops);
185	} else {
186		/* here we add redirects for the open/close vm_operations */
187		cvm_ops->host_vm_ops = vma->vm_ops;
188		if (vma->vm_ops)
189			cvm_ops->vm_ops = *vma->vm_ops;
190
191		cvm_ops->vm_ops.open = coda_vm_open;
192		cvm_ops->vm_ops.close = coda_vm_close;
193		cvm_ops->coda_file = coda_file;
194		refcount_set(&cvm_ops->refcnt, 1);
195
196		vma->vm_ops = &cvm_ops->vm_ops;
197	}
198	return ret;
199}
200
201int coda_open(struct inode *coda_inode, struct file *coda_file)
202{
203	struct file *host_file = NULL;
204	int error;
205	unsigned short flags = coda_file->f_flags & (~O_EXCL);
206	unsigned short coda_flags = coda_flags_to_cflags(flags);
207	struct coda_file_info *cfi;
208
209	cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
210	if (!cfi)
211		return -ENOMEM;
212
213	error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
214			   &host_file);
215	if (!host_file)
216		error = -EIO;
217
218	if (error) {
219		kfree(cfi);
220		return error;
221	}
222
223	host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC);
224
225	cfi->cfi_magic = CODA_MAGIC;
226	cfi->cfi_mapcount = 0;
227	cfi->cfi_container = host_file;
228	/* assume access intents are supported unless we hear otherwise */
229	cfi->cfi_access_intent = true;
230
231	BUG_ON(coda_file->private_data != NULL);
232	coda_file->private_data = cfi;
233	return 0;
234}
235
236int coda_release(struct inode *coda_inode, struct file *coda_file)
237{
238	unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
239	unsigned short coda_flags = coda_flags_to_cflags(flags);
240	struct coda_file_info *cfi;
241	struct coda_inode_info *cii;
242	struct inode *host_inode;
 
243
244	cfi = coda_ftoc(coda_file);
245
246	venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
247			  coda_flags, coda_file->f_cred->fsuid);
248
249	host_inode = file_inode(cfi->cfi_container);
250	cii = ITOC(coda_inode);
251
252	/* did we mmap this file? */
253	spin_lock(&cii->c_lock);
254	if (coda_inode->i_mapping == &host_inode->i_data) {
255		cii->c_mapcount -= cfi->cfi_mapcount;
256		if (!cii->c_mapcount)
257			coda_inode->i_mapping = &coda_inode->i_data;
258	}
259	spin_unlock(&cii->c_lock);
260
261	fput(cfi->cfi_container);
262	kfree(coda_file->private_data);
263	coda_file->private_data = NULL;
264
265	/* VFS fput ignores the return value from file_operations->release, so
266	 * there is no use returning an error here */
267	return 0;
268}
269
270int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
271{
272	struct file *host_file;
273	struct inode *coda_inode = file_inode(coda_file);
274	struct coda_file_info *cfi;
275	int err;
276
277	if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) ||
278	      S_ISLNK(coda_inode->i_mode)))
279		return -EINVAL;
280
281	err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end);
282	if (err)
283		return err;
284	inode_lock(coda_inode);
285
286	cfi = coda_ftoc(coda_file);
287	host_file = cfi->cfi_container;
288
289	err = vfs_fsync(host_file, datasync);
290	if (!err && !datasync)
291		err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
292	inode_unlock(coda_inode);
293
294	return err;
295}
296
297const struct file_operations coda_file_operations = {
298	.llseek		= generic_file_llseek,
299	.read_iter	= coda_file_read_iter,
300	.write_iter	= coda_file_write_iter,
301	.mmap		= coda_file_mmap,
302	.open		= coda_open,
303	.release	= coda_release,
304	.fsync		= coda_fsync,
305	.splice_read	= generic_file_splice_read,
306};
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * File operations for Coda.
  4 * Original version: (C) 1996 Peter Braam 
  5 * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
  6 *
  7 * Carnegie Mellon encourages users of this code to contribute improvements
  8 * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
  9 */
 10
 
 11#include <linux/types.h>
 12#include <linux/kernel.h>
 13#include <linux/time.h>
 14#include <linux/file.h>
 15#include <linux/fs.h>
 
 16#include <linux/stat.h>
 17#include <linux/cred.h>
 18#include <linux/errno.h>
 19#include <linux/spinlock.h>
 20#include <linux/string.h>
 21#include <linux/slab.h>
 22#include <linux/uaccess.h>
 23#include <linux/uio.h>
 24
 25#include <linux/coda.h>
 26#include "coda_psdev.h"
 27#include "coda_linux.h"
 28#include "coda_int.h"
 29
 30struct coda_vm_ops {
 31	atomic_t refcnt;
 32	struct file *coda_file;
 33	const struct vm_operations_struct *host_vm_ops;
 34	struct vm_operations_struct vm_ops;
 35};
 36
 37static ssize_t
 38coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 39{
 40	struct file *coda_file = iocb->ki_filp;
 41	struct inode *coda_inode = file_inode(coda_file);
 42	struct coda_file_info *cfi = coda_ftoc(coda_file);
 43	loff_t ki_pos = iocb->ki_pos;
 44	size_t count = iov_iter_count(to);
 45	ssize_t ret;
 46
 47	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 48				  &cfi->cfi_access_intent,
 49				  count, ki_pos, CODA_ACCESS_TYPE_READ);
 50	if (ret)
 51		goto finish_read;
 52
 53	ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
 54
 55finish_read:
 56	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 57			    &cfi->cfi_access_intent,
 58			    count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
 59	return ret;
 60}
 61
 62static ssize_t
 63coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
 64{
 65	struct file *coda_file = iocb->ki_filp;
 66	struct inode *coda_inode = file_inode(coda_file);
 67	struct coda_file_info *cfi = coda_ftoc(coda_file);
 68	struct file *host_file = cfi->cfi_container;
 69	loff_t ki_pos = iocb->ki_pos;
 70	size_t count = iov_iter_count(to);
 71	ssize_t ret;
 72
 73	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 74				  &cfi->cfi_access_intent,
 75				  count, ki_pos, CODA_ACCESS_TYPE_WRITE);
 76	if (ret)
 77		goto finish_write;
 78
 79	file_start_write(host_file);
 80	inode_lock(coda_inode);
 81	ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
 82	coda_inode->i_size = file_inode(host_file)->i_size;
 83	coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
 84	coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
 85	inode_unlock(coda_inode);
 86	file_end_write(host_file);
 87
 88finish_write:
 89	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
 90			    &cfi->cfi_access_intent,
 91			    count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH);
 92	return ret;
 93}
 94
 95static void
 96coda_vm_open(struct vm_area_struct *vma)
 97{
 98	struct coda_vm_ops *cvm_ops =
 99		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
100
101	atomic_inc(&cvm_ops->refcnt);
102
103	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
104		cvm_ops->host_vm_ops->open(vma);
105}
106
107static void
108coda_vm_close(struct vm_area_struct *vma)
109{
110	struct coda_vm_ops *cvm_ops =
111		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
112
113	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
114		cvm_ops->host_vm_ops->close(vma);
115
116	if (atomic_dec_and_test(&cvm_ops->refcnt)) {
117		vma->vm_ops = cvm_ops->host_vm_ops;
118		fput(cvm_ops->coda_file);
119		kfree(cvm_ops);
120	}
121}
122
123static int
124coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
125{
126	struct inode *coda_inode = file_inode(coda_file);
127	struct coda_file_info *cfi = coda_ftoc(coda_file);
128	struct file *host_file = cfi->cfi_container;
129	struct inode *host_inode = file_inode(host_file);
130	struct coda_inode_info *cii;
131	struct coda_vm_ops *cvm_ops;
132	loff_t ppos;
133	size_t count;
134	int ret;
135
136	if (!host_file->f_op->mmap)
137		return -ENODEV;
138
139	if (WARN_ON(coda_file != vma->vm_file))
140		return -EIO;
141
142	count = vma->vm_end - vma->vm_start;
143	ppos = vma->vm_pgoff * PAGE_SIZE;
144
145	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
146				  &cfi->cfi_access_intent,
147				  count, ppos, CODA_ACCESS_TYPE_MMAP);
148	if (ret)
149		return ret;
150
151	cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
152	if (!cvm_ops)
153		return -ENOMEM;
154
155	cii = ITOC(coda_inode);
156	spin_lock(&cii->c_lock);
157	coda_file->f_mapping = host_file->f_mapping;
158	if (coda_inode->i_mapping == &coda_inode->i_data)
159		coda_inode->i_mapping = host_inode->i_mapping;
160
161	/* only allow additional mmaps as long as userspace isn't changing
162	 * the container file on us! */
163	else if (coda_inode->i_mapping != host_inode->i_mapping) {
164		spin_unlock(&cii->c_lock);
165		kfree(cvm_ops);
166		return -EBUSY;
167	}
168
169	/* keep track of how often the coda_inode/host_file has been mmapped */
170	cii->c_mapcount++;
171	cfi->cfi_mapcount++;
172	spin_unlock(&cii->c_lock);
173
174	vma->vm_file = get_file(host_file);
175	ret = call_mmap(vma->vm_file, vma);
176
177	if (ret) {
178		/* if call_mmap fails, our caller will put coda_file so we
179		 * should drop the reference to the host_file that we got.
180		 */
181		fput(host_file);
182		kfree(cvm_ops);
183	} else {
184		/* here we add redirects for the open/close vm_operations */
185		cvm_ops->host_vm_ops = vma->vm_ops;
186		if (vma->vm_ops)
187			cvm_ops->vm_ops = *vma->vm_ops;
188
189		cvm_ops->vm_ops.open = coda_vm_open;
190		cvm_ops->vm_ops.close = coda_vm_close;
191		cvm_ops->coda_file = coda_file;
192		atomic_set(&cvm_ops->refcnt, 1);
193
194		vma->vm_ops = &cvm_ops->vm_ops;
195	}
196	return ret;
197}
198
199int coda_open(struct inode *coda_inode, struct file *coda_file)
200{
201	struct file *host_file = NULL;
202	int error;
203	unsigned short flags = coda_file->f_flags & (~O_EXCL);
204	unsigned short coda_flags = coda_flags_to_cflags(flags);
205	struct coda_file_info *cfi;
206
207	cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
208	if (!cfi)
209		return -ENOMEM;
210
211	error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
212			   &host_file);
213	if (!host_file)
214		error = -EIO;
215
216	if (error) {
217		kfree(cfi);
218		return error;
219	}
220
221	host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC);
222
223	cfi->cfi_magic = CODA_MAGIC;
224	cfi->cfi_mapcount = 0;
225	cfi->cfi_container = host_file;
226	/* assume access intents are supported unless we hear otherwise */
227	cfi->cfi_access_intent = true;
228
229	BUG_ON(coda_file->private_data != NULL);
230	coda_file->private_data = cfi;
231	return 0;
232}
233
234int coda_release(struct inode *coda_inode, struct file *coda_file)
235{
236	unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
237	unsigned short coda_flags = coda_flags_to_cflags(flags);
238	struct coda_file_info *cfi;
239	struct coda_inode_info *cii;
240	struct inode *host_inode;
241	int err;
242
243	cfi = coda_ftoc(coda_file);
244
245	err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
246			  coda_flags, coda_file->f_cred->fsuid);
247
248	host_inode = file_inode(cfi->cfi_container);
249	cii = ITOC(coda_inode);
250
251	/* did we mmap this file? */
252	spin_lock(&cii->c_lock);
253	if (coda_inode->i_mapping == &host_inode->i_data) {
254		cii->c_mapcount -= cfi->cfi_mapcount;
255		if (!cii->c_mapcount)
256			coda_inode->i_mapping = &coda_inode->i_data;
257	}
258	spin_unlock(&cii->c_lock);
259
260	fput(cfi->cfi_container);
261	kfree(coda_file->private_data);
262	coda_file->private_data = NULL;
263
264	/* VFS fput ignores the return value from file_operations->release, so
265	 * there is no use returning an error here */
266	return 0;
267}
268
269int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
270{
271	struct file *host_file;
272	struct inode *coda_inode = file_inode(coda_file);
273	struct coda_file_info *cfi;
274	int err;
275
276	if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) ||
277	      S_ISLNK(coda_inode->i_mode)))
278		return -EINVAL;
279
280	err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end);
281	if (err)
282		return err;
283	inode_lock(coda_inode);
284
285	cfi = coda_ftoc(coda_file);
286	host_file = cfi->cfi_container;
287
288	err = vfs_fsync(host_file, datasync);
289	if (!err && !datasync)
290		err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
291	inode_unlock(coda_inode);
292
293	return err;
294}
295
296const struct file_operations coda_file_operations = {
297	.llseek		= generic_file_llseek,
298	.read_iter	= coda_file_read_iter,
299	.write_iter	= coda_file_write_iter,
300	.mmap		= coda_file_mmap,
301	.open		= coda_open,
302	.release	= coda_release,
303	.fsync		= coda_fsync,
304	.splice_read	= generic_file_splice_read,
305};