Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
 
 
  3 * This file contians vfs file ops for 9P2000.
  4 *
  5 *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
  6 *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/errno.h>
 11#include <linux/fs.h>
 12#include <linux/sched.h>
 13#include <linux/file.h>
 14#include <linux/stat.h>
 15#include <linux/string.h>
 16#include <linux/inet.h>
 17#include <linux/list.h>
 18#include <linux/pagemap.h>
 19#include <linux/utsname.h>
 20#include <linux/uaccess.h>
 
 21#include <linux/uio.h>
 22#include <linux/slab.h>
 23#include <net/9p/9p.h>
 24#include <net/9p/client.h>
 25
 26#include "v9fs.h"
 27#include "v9fs_vfs.h"
 28#include "fid.h"
 29#include "cache.h"
 30
 31static const struct vm_operations_struct v9fs_file_vm_ops;
 32static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
 33
 34/**
 35 * v9fs_file_open - open a file (or directory)
 36 * @inode: inode to be opened
 37 * @file: file being opened
 38 *
 39 */
 40
 41int v9fs_file_open(struct inode *inode, struct file *file)
 42{
 43	int err;
 44	struct v9fs_inode *v9inode;
 45	struct v9fs_session_info *v9ses;
 46	struct p9_fid *fid, *writeback_fid;
 47	int omode;
 48
 49	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
 50	v9inode = V9FS_I(inode);
 51	v9ses = v9fs_inode2v9ses(inode);
 52	if (v9fs_proto_dotl(v9ses))
 53		omode = v9fs_open_to_dotl_flags(file->f_flags);
 54	else
 55		omode = v9fs_uflags2omode(file->f_flags,
 56					v9fs_proto_dotu(v9ses));
 57	fid = file->private_data;
 58	if (!fid) {
 59		fid = v9fs_fid_clone(file_dentry(file));
 60		if (IS_ERR(fid))
 61			return PTR_ERR(fid);
 62
 63		err = p9_client_open(fid, omode);
 64		if (err < 0) {
 65			p9_fid_put(fid);
 66			return err;
 67		}
 68		if ((file->f_flags & O_APPEND) &&
 69			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
 70			generic_file_llseek(file, 0, SEEK_END);
 71
 72		file->private_data = fid;
 73	}
 74
 
 75	mutex_lock(&v9inode->v_mutex);
 76	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
 77	    !v9inode->writeback_fid &&
 78	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
 79		/*
 80		 * clone a fid and add it to writeback_fid
 81		 * we do it during open time instead of
 82		 * page dirty time via write_begin/page_mkwrite
 83		 * because we want write after unlink usecase
 84		 * to work.
 85		 */
 86		writeback_fid = v9fs_writeback_fid(file_dentry(file));
 87		if (IS_ERR(writeback_fid)) {
 88			err = PTR_ERR(writeback_fid);
 89			mutex_unlock(&v9inode->v_mutex);
 90			goto out_error;
 91		}
 92		v9inode->writeback_fid = (void *) writeback_fid;
 93	}
 94	mutex_unlock(&v9inode->v_mutex);
 95	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
 96		fscache_use_cookie(v9fs_inode_cookie(v9inode),
 97				   file->f_mode & FMODE_WRITE);
 98	v9fs_open_fid_add(inode, &fid);
 99	return 0;
100out_error:
101	p9_fid_put(file->private_data);
102	file->private_data = NULL;
103	return err;
104}
105
106/**
107 * v9fs_file_lock - lock a file (or directory)
108 * @filp: file to be locked
109 * @cmd: lock command
110 * @fl: file lock structure
111 *
112 * Bugs: this looks like a local only lock, we should extend into 9P
113 *       by using open exclusive
114 */
115
116static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
117{
 
118	struct inode *inode = file_inode(filp);
119
120	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
121
 
 
 
 
122	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
123		filemap_write_and_wait(inode->i_mapping);
124		invalidate_mapping_pages(&inode->i_data, 0, -1);
125	}
126
127	return 0;
128}
129
130static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
131{
132	struct p9_flock flock;
133	struct p9_fid *fid;
134	uint8_t status = P9_LOCK_ERROR;
135	int res = 0;
136	unsigned char fl_type;
137	struct v9fs_session_info *v9ses;
138
139	fid = filp->private_data;
140	BUG_ON(fid == NULL);
141
142	BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX);
 
143
144	res = locks_lock_file_wait(filp, fl);
145	if (res < 0)
146		goto out;
147
148	/* convert posix lock to p9 tlock args */
149	memset(&flock, 0, sizeof(flock));
150	/* map the lock type */
151	switch (fl->fl_type) {
152	case F_RDLCK:
153		flock.type = P9_LOCK_TYPE_RDLCK;
154		break;
155	case F_WRLCK:
156		flock.type = P9_LOCK_TYPE_WRLCK;
157		break;
158	case F_UNLCK:
159		flock.type = P9_LOCK_TYPE_UNLCK;
160		break;
161	}
162	flock.start = fl->fl_start;
163	if (fl->fl_end == OFFSET_MAX)
164		flock.length = 0;
165	else
166		flock.length = fl->fl_end - fl->fl_start + 1;
167	flock.proc_id = fl->fl_pid;
168	flock.client_id = fid->clnt->name;
169	if (IS_SETLKW(cmd))
170		flock.flags = P9_LOCK_FLAGS_BLOCK;
171
172	v9ses = v9fs_inode2v9ses(file_inode(filp));
173
174	/*
175	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
176	 * for lock request, keep on trying
177	 */
178	for (;;) {
179		res = p9_client_lock_dotl(fid, &flock, &status);
180		if (res < 0)
181			goto out_unlock;
182
183		if (status != P9_LOCK_BLOCKED)
184			break;
185		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
186			break;
187		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
188				!= 0)
189			break;
190		/*
191		 * p9_client_lock_dotl overwrites flock.client_id with the
192		 * server message, free and reuse the client name
193		 */
194		if (flock.client_id != fid->clnt->name) {
195			kfree(flock.client_id);
196			flock.client_id = fid->clnt->name;
197		}
198	}
199
200	/* map 9p status to VFS status */
201	switch (status) {
202	case P9_LOCK_SUCCESS:
203		res = 0;
204		break;
205	case P9_LOCK_BLOCKED:
206		res = -EAGAIN;
207		break;
208	default:
209		WARN_ONCE(1, "unknown lock status code: %d\n", status);
210		fallthrough;
211	case P9_LOCK_ERROR:
212	case P9_LOCK_GRACE:
213		res = -ENOLCK;
214		break;
215	}
216
217out_unlock:
218	/*
219	 * incase server returned error for lock request, revert
220	 * it locally
221	 */
222	if (res < 0 && fl->fl_type != F_UNLCK) {
223		fl_type = fl->fl_type;
224		fl->fl_type = F_UNLCK;
225		/* Even if this fails we want to return the remote error */
226		locks_lock_file_wait(filp, fl);
227		fl->fl_type = fl_type;
228	}
229	if (flock.client_id != fid->clnt->name)
230		kfree(flock.client_id);
231out:
232	return res;
233}
234
235static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
236{
237	struct p9_getlock glock;
238	struct p9_fid *fid;
239	int res = 0;
240
241	fid = filp->private_data;
242	BUG_ON(fid == NULL);
243
244	posix_test_lock(filp, fl);
245	/*
246	 * if we have a conflicting lock locally, no need to validate
247	 * with server
248	 */
249	if (fl->fl_type != F_UNLCK)
250		return res;
251
252	/* convert posix lock to p9 tgetlock args */
253	memset(&glock, 0, sizeof(glock));
254	glock.type  = P9_LOCK_TYPE_UNLCK;
255	glock.start = fl->fl_start;
256	if (fl->fl_end == OFFSET_MAX)
257		glock.length = 0;
258	else
259		glock.length = fl->fl_end - fl->fl_start + 1;
260	glock.proc_id = fl->fl_pid;
261	glock.client_id = fid->clnt->name;
262
263	res = p9_client_getlock_dotl(fid, &glock);
264	if (res < 0)
265		goto out;
266	/* map 9p lock type to os lock type */
267	switch (glock.type) {
268	case P9_LOCK_TYPE_RDLCK:
269		fl->fl_type = F_RDLCK;
270		break;
271	case P9_LOCK_TYPE_WRLCK:
272		fl->fl_type = F_WRLCK;
273		break;
274	case P9_LOCK_TYPE_UNLCK:
275		fl->fl_type = F_UNLCK;
276		break;
277	}
278	if (glock.type != P9_LOCK_TYPE_UNLCK) {
279		fl->fl_start = glock.start;
280		if (glock.length == 0)
281			fl->fl_end = OFFSET_MAX;
282		else
283			fl->fl_end = glock.start + glock.length - 1;
284		fl->fl_pid = -glock.proc_id;
285	}
286out:
287	if (glock.client_id != fid->clnt->name)
288		kfree(glock.client_id);
289	return res;
290}
291
292/**
293 * v9fs_file_lock_dotl - lock a file (or directory)
294 * @filp: file to be locked
295 * @cmd: lock command
296 * @fl: file lock structure
297 *
298 */
299
300static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
301{
302	struct inode *inode = file_inode(filp);
303	int ret = -ENOLCK;
304
305	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
306		 filp, cmd, fl, filp);
307
 
 
 
 
308	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
309		filemap_write_and_wait(inode->i_mapping);
310		invalidate_mapping_pages(&inode->i_data, 0, -1);
311	}
312
313	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
314		ret = v9fs_file_do_lock(filp, cmd, fl);
315	else if (IS_GETLK(cmd))
316		ret = v9fs_file_getlock(filp, fl);
317	else
318		ret = -EINVAL;
 
319	return ret;
320}
321
322/**
323 * v9fs_file_flock_dotl - lock a file
324 * @filp: file to be locked
325 * @cmd: lock command
326 * @fl: file lock structure
327 *
328 */
329
330static int v9fs_file_flock_dotl(struct file *filp, int cmd,
331	struct file_lock *fl)
332{
333	struct inode *inode = file_inode(filp);
334	int ret = -ENOLCK;
335
336	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
337		 filp, cmd, fl, filp);
338
 
 
 
 
339	if (!(fl->fl_flags & FL_FLOCK))
340		goto out_err;
341
342	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
343		filemap_write_and_wait(inode->i_mapping);
344		invalidate_mapping_pages(&inode->i_data, 0, -1);
345	}
346	/* Convert flock to posix lock */
347	fl->fl_flags |= FL_POSIX;
348	fl->fl_flags ^= FL_FLOCK;
349
350	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
351		ret = v9fs_file_do_lock(filp, cmd, fl);
352	else
353		ret = -EINVAL;
354out_err:
355	return ret;
356}
357
358/**
359 * v9fs_file_read_iter - read from a file
360 * @iocb: The operation parameters
361 * @to: The buffer to read into
 
 
362 *
363 */
 
364static ssize_t
365v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
366{
367	struct p9_fid *fid = iocb->ki_filp->private_data;
368	int ret, err = 0;
369
370	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
371		 iov_iter_count(to), iocb->ki_pos);
372
373	if (iocb->ki_filp->f_flags & O_NONBLOCK)
374		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
375	else
376		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
377	if (!ret)
378		return err;
379
380	iocb->ki_pos += ret;
381	return ret;
382}
383
384/**
385 * v9fs_file_write_iter - write to a file
386 * @iocb: The operation parameters
387 * @from: The data to write
 
 
388 *
389 */
390static ssize_t
391v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
392{
393	struct file *file = iocb->ki_filp;
394	ssize_t retval;
395	loff_t origin;
396	int err = 0;
397
398	retval = generic_write_checks(iocb, from);
399	if (retval <= 0)
400		return retval;
401
402	origin = iocb->ki_pos;
403	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
404	if (retval > 0) {
405		struct inode *inode = file_inode(file);
406		loff_t i_size;
407		unsigned long pg_start, pg_end;
408
409		pg_start = origin >> PAGE_SHIFT;
410		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
411		if (inode->i_mapping && inode->i_mapping->nrpages)
412			invalidate_inode_pages2_range(inode->i_mapping,
413						      pg_start, pg_end);
414		iocb->ki_pos += retval;
415		i_size = i_size_read(inode);
416		if (iocb->ki_pos > i_size) {
417			inode_add_bytes(inode, iocb->ki_pos - i_size);
418			/*
419			 * Need to serialize against i_size_write() in
420			 * v9fs_stat2inode()
421			 */
422			v9fs_i_size_write(inode, iocb->ki_pos);
423		}
424		return retval;
425	}
426	return err;
427}
428
429static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
430			   int datasync)
431{
432	struct p9_fid *fid;
433	struct inode *inode = filp->f_mapping->host;
434	struct p9_wstat wstat;
435	int retval;
436
437	retval = file_write_and_wait_range(filp, start, end);
438	if (retval)
439		return retval;
440
441	inode_lock(inode);
442	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
443
444	fid = filp->private_data;
445	v9fs_blank_wstat(&wstat);
446
447	retval = p9_client_wstat(fid, &wstat);
448	inode_unlock(inode);
449
450	return retval;
451}
452
453int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
454			 int datasync)
455{
456	struct p9_fid *fid;
457	struct inode *inode = filp->f_mapping->host;
458	int retval;
459
460	retval = file_write_and_wait_range(filp, start, end);
461	if (retval)
462		return retval;
463
464	inode_lock(inode);
465	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
466
467	fid = filp->private_data;
468
469	retval = p9_client_fsync(fid, datasync);
470	inode_unlock(inode);
471
472	return retval;
473}
474
475static int
476v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
477{
478	int retval;
479
480
481	retval = generic_file_mmap(filp, vma);
482	if (!retval)
483		vma->vm_ops = &v9fs_file_vm_ops;
484
485	return retval;
486}
487
488static int
489v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
490{
491	int retval;
492	struct inode *inode;
493	struct v9fs_inode *v9inode;
494	struct p9_fid *fid;
495
496	inode = file_inode(filp);
497	v9inode = V9FS_I(inode);
498	mutex_lock(&v9inode->v_mutex);
499	if (!v9inode->writeback_fid &&
500	    (vma->vm_flags & VM_SHARED) &&
501	    (vma->vm_flags & VM_WRITE)) {
502		/*
503		 * clone a fid and add it to writeback_fid
504		 * we do it during mmap instead of
505		 * page dirty time via write_begin/page_mkwrite
506		 * because we want write after unlink usecase
507		 * to work.
508		 */
509		fid = v9fs_writeback_fid(file_dentry(filp));
510		if (IS_ERR(fid)) {
511			retval = PTR_ERR(fid);
512			mutex_unlock(&v9inode->v_mutex);
513			return retval;
514		}
515		v9inode->writeback_fid = (void *) fid;
516	}
517	mutex_unlock(&v9inode->v_mutex);
518
519	retval = generic_file_mmap(filp, vma);
520	if (!retval)
521		vma->vm_ops = &v9fs_mmap_file_vm_ops;
522
523	return retval;
524}
525
526static vm_fault_t
527v9fs_vm_page_mkwrite(struct vm_fault *vmf)
528{
529	struct v9fs_inode *v9inode;
530	struct folio *folio = page_folio(vmf->page);
531	struct file *filp = vmf->vma->vm_file;
532	struct inode *inode = file_inode(filp);
533
534
535	p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
536		 folio, (unsigned long)filp->private_data);
537
538	v9inode = V9FS_I(inode);
539
540	/* Wait for the page to be written to the cache before we allow it to
541	 * be modified.  We then assume the entire page will need writing back.
542	 */
543#ifdef CONFIG_9P_FSCACHE
544	if (folio_test_fscache(folio) &&
545	    folio_wait_fscache_killable(folio) < 0)
546		return VM_FAULT_NOPAGE;
547#endif
548
549	/* Update file times before taking page lock */
550	file_update_time(filp);
551
 
 
 
552	BUG_ON(!v9inode->writeback_fid);
553	if (folio_lock_killable(folio) < 0)
554		return VM_FAULT_RETRY;
555	if (folio_mapping(folio) != inode->i_mapping)
556		goto out_unlock;
557	folio_wait_stable(folio);
558
559	return VM_FAULT_LOCKED;
560out_unlock:
561	folio_unlock(folio);
562	return VM_FAULT_NOPAGE;
563}
564
565/**
566 * v9fs_mmap_file_read_iter - read from a file
567 * @iocb: The operation parameters
568 * @to: The buffer to read into
 
 
569 *
570 */
571static ssize_t
572v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
573{
574	/* TODO: Check if there are dirty pages */
575	return v9fs_file_read_iter(iocb, to);
576}
577
578/**
579 * v9fs_mmap_file_write_iter - write to a file
580 * @iocb: The operation parameters
581 * @from: The data to write
 
 
582 *
583 */
584static ssize_t
585v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
586{
587	/*
588	 * TODO: invalidate mmaps on filp's inode between
589	 * offset and offset+count
590	 */
591	return v9fs_file_write_iter(iocb, from);
592}
593
594static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
595{
596	struct inode *inode;
597
598	struct writeback_control wbc = {
599		.nr_to_write = LONG_MAX,
600		.sync_mode = WB_SYNC_ALL,
601		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
602		 /* absolute end, byte at end included */
603		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
604			(vma->vm_end - vma->vm_start - 1),
605	};
606
607	if (!(vma->vm_flags & VM_SHARED))
608		return;
609
610	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
611
612	inode = file_inode(vma->vm_file);
613	filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
 
 
 
 
 
614}
615
616
617static const struct vm_operations_struct v9fs_file_vm_ops = {
618	.fault = filemap_fault,
619	.map_pages = filemap_map_pages,
620	.page_mkwrite = v9fs_vm_page_mkwrite,
621};
622
623static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
624	.close = v9fs_mmap_vm_close,
625	.fault = filemap_fault,
626	.map_pages = filemap_map_pages,
627	.page_mkwrite = v9fs_vm_page_mkwrite,
628};
629
630
631const struct file_operations v9fs_cached_file_operations = {
632	.llseek = generic_file_llseek,
633	.read_iter = generic_file_read_iter,
634	.write_iter = generic_file_write_iter,
635	.open = v9fs_file_open,
636	.release = v9fs_dir_release,
637	.lock = v9fs_file_lock,
638	.mmap = v9fs_file_mmap,
639	.splice_read = generic_file_splice_read,
640	.splice_write = iter_file_splice_write,
641	.fsync = v9fs_file_fsync,
642};
643
644const struct file_operations v9fs_cached_file_operations_dotl = {
645	.llseek = generic_file_llseek,
646	.read_iter = generic_file_read_iter,
647	.write_iter = generic_file_write_iter,
648	.open = v9fs_file_open,
649	.release = v9fs_dir_release,
650	.lock = v9fs_file_lock_dotl,
651	.flock = v9fs_file_flock_dotl,
652	.mmap = v9fs_file_mmap,
653	.splice_read = generic_file_splice_read,
654	.splice_write = iter_file_splice_write,
655	.fsync = v9fs_file_fsync_dotl,
656};
657
658const struct file_operations v9fs_file_operations = {
659	.llseek = generic_file_llseek,
660	.read_iter = v9fs_file_read_iter,
661	.write_iter = v9fs_file_write_iter,
662	.open = v9fs_file_open,
663	.release = v9fs_dir_release,
664	.lock = v9fs_file_lock,
665	.mmap = generic_file_readonly_mmap,
666	.splice_read = generic_file_splice_read,
667	.splice_write = iter_file_splice_write,
668	.fsync = v9fs_file_fsync,
669};
670
671const struct file_operations v9fs_file_operations_dotl = {
672	.llseek = generic_file_llseek,
673	.read_iter = v9fs_file_read_iter,
674	.write_iter = v9fs_file_write_iter,
675	.open = v9fs_file_open,
676	.release = v9fs_dir_release,
677	.lock = v9fs_file_lock_dotl,
678	.flock = v9fs_file_flock_dotl,
679	.mmap = generic_file_readonly_mmap,
680	.splice_read = generic_file_splice_read,
681	.splice_write = iter_file_splice_write,
682	.fsync = v9fs_file_fsync_dotl,
683};
684
685const struct file_operations v9fs_mmap_file_operations = {
686	.llseek = generic_file_llseek,
687	.read_iter = v9fs_mmap_file_read_iter,
688	.write_iter = v9fs_mmap_file_write_iter,
689	.open = v9fs_file_open,
690	.release = v9fs_dir_release,
691	.lock = v9fs_file_lock,
692	.mmap = v9fs_mmap_file_mmap,
693	.splice_read = generic_file_splice_read,
694	.splice_write = iter_file_splice_write,
695	.fsync = v9fs_file_fsync,
696};
697
698const struct file_operations v9fs_mmap_file_operations_dotl = {
699	.llseek = generic_file_llseek,
700	.read_iter = v9fs_mmap_file_read_iter,
701	.write_iter = v9fs_mmap_file_write_iter,
702	.open = v9fs_file_open,
703	.release = v9fs_dir_release,
704	.lock = v9fs_file_lock_dotl,
705	.flock = v9fs_file_flock_dotl,
706	.mmap = v9fs_mmap_file_mmap,
707	.splice_read = generic_file_splice_read,
708	.splice_write = iter_file_splice_write,
709	.fsync = v9fs_file_fsync_dotl,
710};
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/fs/9p/vfs_file.c
  4 *
  5 * This file contians vfs file ops for 9P2000.
  6 *
  7 *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
  8 *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/errno.h>
 13#include <linux/fs.h>
 14#include <linux/sched.h>
 15#include <linux/file.h>
 16#include <linux/stat.h>
 17#include <linux/string.h>
 18#include <linux/inet.h>
 19#include <linux/list.h>
 20#include <linux/pagemap.h>
 21#include <linux/utsname.h>
 22#include <linux/uaccess.h>
 23#include <linux/idr.h>
 24#include <linux/uio.h>
 25#include <linux/slab.h>
 26#include <net/9p/9p.h>
 27#include <net/9p/client.h>
 28
 29#include "v9fs.h"
 30#include "v9fs_vfs.h"
 31#include "fid.h"
 32#include "cache.h"
 33
 34static const struct vm_operations_struct v9fs_file_vm_ops;
 35static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
 36
 37/**
 38 * v9fs_file_open - open a file (or directory)
 39 * @inode: inode to be opened
 40 * @file: file being opened
 41 *
 42 */
 43
 44int v9fs_file_open(struct inode *inode, struct file *file)
 45{
 46	int err;
 47	struct v9fs_inode *v9inode;
 48	struct v9fs_session_info *v9ses;
 49	struct p9_fid *fid;
 50	int omode;
 51
 52	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
 53	v9inode = V9FS_I(inode);
 54	v9ses = v9fs_inode2v9ses(inode);
 55	if (v9fs_proto_dotl(v9ses))
 56		omode = v9fs_open_to_dotl_flags(file->f_flags);
 57	else
 58		omode = v9fs_uflags2omode(file->f_flags,
 59					v9fs_proto_dotu(v9ses));
 60	fid = file->private_data;
 61	if (!fid) {
 62		fid = v9fs_fid_clone(file_dentry(file));
 63		if (IS_ERR(fid))
 64			return PTR_ERR(fid);
 65
 66		err = p9_client_open(fid, omode);
 67		if (err < 0) {
 68			p9_client_clunk(fid);
 69			return err;
 70		}
 71		if ((file->f_flags & O_APPEND) &&
 72			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
 73			generic_file_llseek(file, 0, SEEK_END);
 
 
 74	}
 75
 76	file->private_data = fid;
 77	mutex_lock(&v9inode->v_mutex);
 78	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
 79	    !v9inode->writeback_fid &&
 80	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
 81		/*
 82		 * clone a fid and add it to writeback_fid
 83		 * we do it during open time instead of
 84		 * page dirty time via write_begin/page_mkwrite
 85		 * because we want write after unlink usecase
 86		 * to work.
 87		 */
 88		fid = v9fs_writeback_fid(file_dentry(file));
 89		if (IS_ERR(fid)) {
 90			err = PTR_ERR(fid);
 91			mutex_unlock(&v9inode->v_mutex);
 92			goto out_error;
 93		}
 94		v9inode->writeback_fid = (void *) fid;
 95	}
 96	mutex_unlock(&v9inode->v_mutex);
 97	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
 98		v9fs_cache_inode_set_cookie(inode, file);
 
 
 99	return 0;
100out_error:
101	p9_client_clunk(file->private_data);
102	file->private_data = NULL;
103	return err;
104}
105
106/**
107 * v9fs_file_lock - lock a file (or directory)
108 * @filp: file to be locked
109 * @cmd: lock command
110 * @fl: file lock structure
111 *
112 * Bugs: this looks like a local only lock, we should extend into 9P
113 *       by using open exclusive
114 */
115
116static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
117{
118	int res = 0;
119	struct inode *inode = file_inode(filp);
120
121	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
122
123	/* No mandatory locks */
124	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
125		return -ENOLCK;
126
127	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
128		filemap_write_and_wait(inode->i_mapping);
129		invalidate_mapping_pages(&inode->i_data, 0, -1);
130	}
131
132	return res;
133}
134
135static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
136{
137	struct p9_flock flock;
138	struct p9_fid *fid;
139	uint8_t status = P9_LOCK_ERROR;
140	int res = 0;
141	unsigned char fl_type;
142	struct v9fs_session_info *v9ses;
143
144	fid = filp->private_data;
145	BUG_ON(fid == NULL);
146
147	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
148		BUG();
149
150	res = locks_lock_file_wait(filp, fl);
151	if (res < 0)
152		goto out;
153
154	/* convert posix lock to p9 tlock args */
155	memset(&flock, 0, sizeof(flock));
156	/* map the lock type */
157	switch (fl->fl_type) {
158	case F_RDLCK:
159		flock.type = P9_LOCK_TYPE_RDLCK;
160		break;
161	case F_WRLCK:
162		flock.type = P9_LOCK_TYPE_WRLCK;
163		break;
164	case F_UNLCK:
165		flock.type = P9_LOCK_TYPE_UNLCK;
166		break;
167	}
168	flock.start = fl->fl_start;
169	if (fl->fl_end == OFFSET_MAX)
170		flock.length = 0;
171	else
172		flock.length = fl->fl_end - fl->fl_start + 1;
173	flock.proc_id = fl->fl_pid;
174	flock.client_id = fid->clnt->name;
175	if (IS_SETLKW(cmd))
176		flock.flags = P9_LOCK_FLAGS_BLOCK;
177
178	v9ses = v9fs_inode2v9ses(file_inode(filp));
179
180	/*
181	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
182	 * for lock request, keep on trying
183	 */
184	for (;;) {
185		res = p9_client_lock_dotl(fid, &flock, &status);
186		if (res < 0)
187			goto out_unlock;
188
189		if (status != P9_LOCK_BLOCKED)
190			break;
191		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
192			break;
193		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
194				!= 0)
195			break;
196		/*
197		 * p9_client_lock_dotl overwrites flock.client_id with the
198		 * server message, free and reuse the client name
199		 */
200		if (flock.client_id != fid->clnt->name) {
201			kfree(flock.client_id);
202			flock.client_id = fid->clnt->name;
203		}
204	}
205
206	/* map 9p status to VFS status */
207	switch (status) {
208	case P9_LOCK_SUCCESS:
209		res = 0;
210		break;
211	case P9_LOCK_BLOCKED:
212		res = -EAGAIN;
213		break;
214	default:
215		WARN_ONCE(1, "unknown lock status code: %d\n", status);
216		/* fall through */
217	case P9_LOCK_ERROR:
218	case P9_LOCK_GRACE:
219		res = -ENOLCK;
220		break;
221	}
222
223out_unlock:
224	/*
225	 * incase server returned error for lock request, revert
226	 * it locally
227	 */
228	if (res < 0 && fl->fl_type != F_UNLCK) {
229		fl_type = fl->fl_type;
230		fl->fl_type = F_UNLCK;
231		/* Even if this fails we want to return the remote error */
232		locks_lock_file_wait(filp, fl);
233		fl->fl_type = fl_type;
234	}
235	if (flock.client_id != fid->clnt->name)
236		kfree(flock.client_id);
237out:
238	return res;
239}
240
241static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
242{
243	struct p9_getlock glock;
244	struct p9_fid *fid;
245	int res = 0;
246
247	fid = filp->private_data;
248	BUG_ON(fid == NULL);
249
250	posix_test_lock(filp, fl);
251	/*
252	 * if we have a conflicting lock locally, no need to validate
253	 * with server
254	 */
255	if (fl->fl_type != F_UNLCK)
256		return res;
257
258	/* convert posix lock to p9 tgetlock args */
259	memset(&glock, 0, sizeof(glock));
260	glock.type  = P9_LOCK_TYPE_UNLCK;
261	glock.start = fl->fl_start;
262	if (fl->fl_end == OFFSET_MAX)
263		glock.length = 0;
264	else
265		glock.length = fl->fl_end - fl->fl_start + 1;
266	glock.proc_id = fl->fl_pid;
267	glock.client_id = fid->clnt->name;
268
269	res = p9_client_getlock_dotl(fid, &glock);
270	if (res < 0)
271		goto out;
272	/* map 9p lock type to os lock type */
273	switch (glock.type) {
274	case P9_LOCK_TYPE_RDLCK:
275		fl->fl_type = F_RDLCK;
276		break;
277	case P9_LOCK_TYPE_WRLCK:
278		fl->fl_type = F_WRLCK;
279		break;
280	case P9_LOCK_TYPE_UNLCK:
281		fl->fl_type = F_UNLCK;
282		break;
283	}
284	if (glock.type != P9_LOCK_TYPE_UNLCK) {
285		fl->fl_start = glock.start;
286		if (glock.length == 0)
287			fl->fl_end = OFFSET_MAX;
288		else
289			fl->fl_end = glock.start + glock.length - 1;
290		fl->fl_pid = -glock.proc_id;
291	}
292out:
293	if (glock.client_id != fid->clnt->name)
294		kfree(glock.client_id);
295	return res;
296}
297
298/**
299 * v9fs_file_lock_dotl - lock a file (or directory)
300 * @filp: file to be locked
301 * @cmd: lock command
302 * @fl: file lock structure
303 *
304 */
305
306static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
307{
308	struct inode *inode = file_inode(filp);
309	int ret = -ENOLCK;
310
311	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
312		 filp, cmd, fl, filp);
313
314	/* No mandatory locks */
315	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
316		goto out_err;
317
318	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
319		filemap_write_and_wait(inode->i_mapping);
320		invalidate_mapping_pages(&inode->i_data, 0, -1);
321	}
322
323	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
324		ret = v9fs_file_do_lock(filp, cmd, fl);
325	else if (IS_GETLK(cmd))
326		ret = v9fs_file_getlock(filp, fl);
327	else
328		ret = -EINVAL;
329out_err:
330	return ret;
331}
332
333/**
334 * v9fs_file_flock_dotl - lock a file
335 * @filp: file to be locked
336 * @cmd: lock command
337 * @fl: file lock structure
338 *
339 */
340
341static int v9fs_file_flock_dotl(struct file *filp, int cmd,
342	struct file_lock *fl)
343{
344	struct inode *inode = file_inode(filp);
345	int ret = -ENOLCK;
346
347	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
348		 filp, cmd, fl, filp);
349
350	/* No mandatory locks */
351	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
352		goto out_err;
353
354	if (!(fl->fl_flags & FL_FLOCK))
355		goto out_err;
356
357	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
358		filemap_write_and_wait(inode->i_mapping);
359		invalidate_mapping_pages(&inode->i_data, 0, -1);
360	}
361	/* Convert flock to posix lock */
362	fl->fl_flags |= FL_POSIX;
363	fl->fl_flags ^= FL_FLOCK;
364
365	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
366		ret = v9fs_file_do_lock(filp, cmd, fl);
367	else
368		ret = -EINVAL;
369out_err:
370	return ret;
371}
372
373/**
374 * v9fs_file_read - read from a file
375 * @filp: file pointer to read
376 * @udata: user data buffer to read data into
377 * @count: size of buffer
378 * @offset: offset at which to read data
379 *
380 */
381
382static ssize_t
383v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
384{
385	struct p9_fid *fid = iocb->ki_filp->private_data;
386	int ret, err = 0;
387
388	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
389		 iov_iter_count(to), iocb->ki_pos);
390
391	ret = p9_client_read(fid, iocb->ki_pos, to, &err);
 
 
 
392	if (!ret)
393		return err;
394
395	iocb->ki_pos += ret;
396	return ret;
397}
398
399/**
400 * v9fs_file_write - write to a file
401 * @filp: file pointer to write
402 * @data: data buffer to write data from
403 * @count: size of buffer
404 * @offset: offset at which to write data
405 *
406 */
407static ssize_t
408v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
409{
410	struct file *file = iocb->ki_filp;
411	ssize_t retval;
412	loff_t origin;
413	int err = 0;
414
415	retval = generic_write_checks(iocb, from);
416	if (retval <= 0)
417		return retval;
418
419	origin = iocb->ki_pos;
420	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
421	if (retval > 0) {
422		struct inode *inode = file_inode(file);
423		loff_t i_size;
424		unsigned long pg_start, pg_end;
 
425		pg_start = origin >> PAGE_SHIFT;
426		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
427		if (inode->i_mapping && inode->i_mapping->nrpages)
428			invalidate_inode_pages2_range(inode->i_mapping,
429						      pg_start, pg_end);
430		iocb->ki_pos += retval;
431		i_size = i_size_read(inode);
432		if (iocb->ki_pos > i_size) {
433			inode_add_bytes(inode, iocb->ki_pos - i_size);
434			/*
435			 * Need to serialize against i_size_write() in
436			 * v9fs_stat2inode()
437			 */
438			v9fs_i_size_write(inode, iocb->ki_pos);
439		}
440		return retval;
441	}
442	return err;
443}
444
445static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
446			   int datasync)
447{
448	struct p9_fid *fid;
449	struct inode *inode = filp->f_mapping->host;
450	struct p9_wstat wstat;
451	int retval;
452
453	retval = file_write_and_wait_range(filp, start, end);
454	if (retval)
455		return retval;
456
457	inode_lock(inode);
458	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
459
460	fid = filp->private_data;
461	v9fs_blank_wstat(&wstat);
462
463	retval = p9_client_wstat(fid, &wstat);
464	inode_unlock(inode);
465
466	return retval;
467}
468
469int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
470			 int datasync)
471{
472	struct p9_fid *fid;
473	struct inode *inode = filp->f_mapping->host;
474	int retval;
475
476	retval = file_write_and_wait_range(filp, start, end);
477	if (retval)
478		return retval;
479
480	inode_lock(inode);
481	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
482
483	fid = filp->private_data;
484
485	retval = p9_client_fsync(fid, datasync);
486	inode_unlock(inode);
487
488	return retval;
489}
490
491static int
492v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
493{
494	int retval;
495
496
497	retval = generic_file_mmap(filp, vma);
498	if (!retval)
499		vma->vm_ops = &v9fs_file_vm_ops;
500
501	return retval;
502}
503
504static int
505v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
506{
507	int retval;
508	struct inode *inode;
509	struct v9fs_inode *v9inode;
510	struct p9_fid *fid;
511
512	inode = file_inode(filp);
513	v9inode = V9FS_I(inode);
514	mutex_lock(&v9inode->v_mutex);
515	if (!v9inode->writeback_fid &&
516	    (vma->vm_flags & VM_SHARED) &&
517	    (vma->vm_flags & VM_WRITE)) {
518		/*
519		 * clone a fid and add it to writeback_fid
520		 * we do it during mmap instead of
521		 * page dirty time via write_begin/page_mkwrite
522		 * because we want write after unlink usecase
523		 * to work.
524		 */
525		fid = v9fs_writeback_fid(file_dentry(filp));
526		if (IS_ERR(fid)) {
527			retval = PTR_ERR(fid);
528			mutex_unlock(&v9inode->v_mutex);
529			return retval;
530		}
531		v9inode->writeback_fid = (void *) fid;
532	}
533	mutex_unlock(&v9inode->v_mutex);
534
535	retval = generic_file_mmap(filp, vma);
536	if (!retval)
537		vma->vm_ops = &v9fs_mmap_file_vm_ops;
538
539	return retval;
540}
541
542static vm_fault_t
543v9fs_vm_page_mkwrite(struct vm_fault *vmf)
544{
545	struct v9fs_inode *v9inode;
546	struct page *page = vmf->page;
547	struct file *filp = vmf->vma->vm_file;
548	struct inode *inode = file_inode(filp);
549
550
551	p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
552		 page, (unsigned long)filp->private_data);
 
 
 
 
 
 
 
 
 
 
 
553
554	/* Update file times before taking page lock */
555	file_update_time(filp);
556
557	v9inode = V9FS_I(inode);
558	/* make sure the cache has finished storing the page */
559	v9fs_fscache_wait_on_page_write(inode, page);
560	BUG_ON(!v9inode->writeback_fid);
561	lock_page(page);
562	if (page->mapping != inode->i_mapping)
 
563		goto out_unlock;
564	wait_for_stable_page(page);
565
566	return VM_FAULT_LOCKED;
567out_unlock:
568	unlock_page(page);
569	return VM_FAULT_NOPAGE;
570}
571
572/**
573 * v9fs_mmap_file_read - read from a file
574 * @filp: file pointer to read
575 * @data: user data buffer to read data into
576 * @count: size of buffer
577 * @offset: offset at which to read data
578 *
579 */
580static ssize_t
581v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
582{
583	/* TODO: Check if there are dirty pages */
584	return v9fs_file_read_iter(iocb, to);
585}
586
587/**
588 * v9fs_mmap_file_write - write to a file
589 * @filp: file pointer to write
590 * @data: data buffer to write data from
591 * @count: size of buffer
592 * @offset: offset at which to write data
593 *
594 */
595static ssize_t
596v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
597{
598	/*
599	 * TODO: invalidate mmaps on filp's inode between
600	 * offset and offset+count
601	 */
602	return v9fs_file_write_iter(iocb, from);
603}
604
605static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
606{
607	struct inode *inode;
608
609	struct writeback_control wbc = {
610		.nr_to_write = LONG_MAX,
611		.sync_mode = WB_SYNC_ALL,
612		.range_start = vma->vm_pgoff * PAGE_SIZE,
613		 /* absolute end, byte at end included */
614		.range_end = vma->vm_pgoff * PAGE_SIZE +
615			(vma->vm_end - vma->vm_start - 1),
616	};
617
618	if (!(vma->vm_flags & VM_SHARED))
619		return;
620
621	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
622
623	inode = file_inode(vma->vm_file);
624
625	if (!mapping_cap_writeback_dirty(inode->i_mapping))
626		wbc.nr_to_write = 0;
627
628	might_sleep();
629	sync_inode(inode, &wbc);
630}
631
632
633static const struct vm_operations_struct v9fs_file_vm_ops = {
634	.fault = filemap_fault,
635	.map_pages = filemap_map_pages,
636	.page_mkwrite = v9fs_vm_page_mkwrite,
637};
638
639static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
640	.close = v9fs_mmap_vm_close,
641	.fault = filemap_fault,
642	.map_pages = filemap_map_pages,
643	.page_mkwrite = v9fs_vm_page_mkwrite,
644};
645
646
647const struct file_operations v9fs_cached_file_operations = {
648	.llseek = generic_file_llseek,
649	.read_iter = generic_file_read_iter,
650	.write_iter = generic_file_write_iter,
651	.open = v9fs_file_open,
652	.release = v9fs_dir_release,
653	.lock = v9fs_file_lock,
654	.mmap = v9fs_file_mmap,
 
 
655	.fsync = v9fs_file_fsync,
656};
657
658const struct file_operations v9fs_cached_file_operations_dotl = {
659	.llseek = generic_file_llseek,
660	.read_iter = generic_file_read_iter,
661	.write_iter = generic_file_write_iter,
662	.open = v9fs_file_open,
663	.release = v9fs_dir_release,
664	.lock = v9fs_file_lock_dotl,
665	.flock = v9fs_file_flock_dotl,
666	.mmap = v9fs_file_mmap,
 
 
667	.fsync = v9fs_file_fsync_dotl,
668};
669
670const struct file_operations v9fs_file_operations = {
671	.llseek = generic_file_llseek,
672	.read_iter = v9fs_file_read_iter,
673	.write_iter = v9fs_file_write_iter,
674	.open = v9fs_file_open,
675	.release = v9fs_dir_release,
676	.lock = v9fs_file_lock,
677	.mmap = generic_file_readonly_mmap,
 
 
678	.fsync = v9fs_file_fsync,
679};
680
681const struct file_operations v9fs_file_operations_dotl = {
682	.llseek = generic_file_llseek,
683	.read_iter = v9fs_file_read_iter,
684	.write_iter = v9fs_file_write_iter,
685	.open = v9fs_file_open,
686	.release = v9fs_dir_release,
687	.lock = v9fs_file_lock_dotl,
688	.flock = v9fs_file_flock_dotl,
689	.mmap = generic_file_readonly_mmap,
 
 
690	.fsync = v9fs_file_fsync_dotl,
691};
692
693const struct file_operations v9fs_mmap_file_operations = {
694	.llseek = generic_file_llseek,
695	.read_iter = v9fs_mmap_file_read_iter,
696	.write_iter = v9fs_mmap_file_write_iter,
697	.open = v9fs_file_open,
698	.release = v9fs_dir_release,
699	.lock = v9fs_file_lock,
700	.mmap = v9fs_mmap_file_mmap,
 
 
701	.fsync = v9fs_file_fsync,
702};
703
704const struct file_operations v9fs_mmap_file_operations_dotl = {
705	.llseek = generic_file_llseek,
706	.read_iter = v9fs_mmap_file_read_iter,
707	.write_iter = v9fs_mmap_file_write_iter,
708	.open = v9fs_file_open,
709	.release = v9fs_dir_release,
710	.lock = v9fs_file_lock_dotl,
711	.flock = v9fs_file_flock_dotl,
712	.mmap = v9fs_mmap_file_mmap,
 
 
713	.fsync = v9fs_file_fsync_dotl,
714};