Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file contians vfs file ops for 9P2000.
4 *
5 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7 */
8
9#include <linux/module.h>
10#include <linux/errno.h>
11#include <linux/fs.h>
12#include <linux/sched.h>
13#include <linux/file.h>
14#include <linux/stat.h>
15#include <linux/string.h>
16#include <linux/inet.h>
17#include <linux/list.h>
18#include <linux/pagemap.h>
19#include <linux/utsname.h>
20#include <linux/uaccess.h>
21#include <linux/uio.h>
22#include <linux/slab.h>
23#include <net/9p/9p.h>
24#include <net/9p/client.h>
25
26#include "v9fs.h"
27#include "v9fs_vfs.h"
28#include "fid.h"
29#include "cache.h"
30
31static const struct vm_operations_struct v9fs_file_vm_ops;
32static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
33
34/**
35 * v9fs_file_open - open a file (or directory)
36 * @inode: inode to be opened
37 * @file: file being opened
38 *
39 */
40
41int v9fs_file_open(struct inode *inode, struct file *file)
42{
43 int err;
44 struct v9fs_inode *v9inode;
45 struct v9fs_session_info *v9ses;
46 struct p9_fid *fid, *writeback_fid;
47 int omode;
48
49 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
50 v9inode = V9FS_I(inode);
51 v9ses = v9fs_inode2v9ses(inode);
52 if (v9fs_proto_dotl(v9ses))
53 omode = v9fs_open_to_dotl_flags(file->f_flags);
54 else
55 omode = v9fs_uflags2omode(file->f_flags,
56 v9fs_proto_dotu(v9ses));
57 fid = file->private_data;
58 if (!fid) {
59 fid = v9fs_fid_clone(file_dentry(file));
60 if (IS_ERR(fid))
61 return PTR_ERR(fid);
62
63 err = p9_client_open(fid, omode);
64 if (err < 0) {
65 p9_fid_put(fid);
66 return err;
67 }
68 if ((file->f_flags & O_APPEND) &&
69 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
70 generic_file_llseek(file, 0, SEEK_END);
71
72 file->private_data = fid;
73 }
74
75 mutex_lock(&v9inode->v_mutex);
76 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
77 !v9inode->writeback_fid &&
78 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
79 /*
80 * clone a fid and add it to writeback_fid
81 * we do it during open time instead of
82 * page dirty time via write_begin/page_mkwrite
83 * because we want write after unlink usecase
84 * to work.
85 */
86 writeback_fid = v9fs_writeback_fid(file_dentry(file));
87 if (IS_ERR(writeback_fid)) {
88 err = PTR_ERR(writeback_fid);
89 mutex_unlock(&v9inode->v_mutex);
90 goto out_error;
91 }
92 v9inode->writeback_fid = (void *) writeback_fid;
93 }
94 mutex_unlock(&v9inode->v_mutex);
95 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
96 fscache_use_cookie(v9fs_inode_cookie(v9inode),
97 file->f_mode & FMODE_WRITE);
98 v9fs_open_fid_add(inode, &fid);
99 return 0;
100out_error:
101 p9_fid_put(file->private_data);
102 file->private_data = NULL;
103 return err;
104}
105
106/**
107 * v9fs_file_lock - lock a file (or directory)
108 * @filp: file to be locked
109 * @cmd: lock command
110 * @fl: file lock structure
111 *
112 * Bugs: this looks like a local only lock, we should extend into 9P
113 * by using open exclusive
114 */
115
116static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
117{
118 struct inode *inode = file_inode(filp);
119
120 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
121
122 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
123 filemap_write_and_wait(inode->i_mapping);
124 invalidate_mapping_pages(&inode->i_data, 0, -1);
125 }
126
127 return 0;
128}
129
130static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
131{
132 struct p9_flock flock;
133 struct p9_fid *fid;
134 uint8_t status = P9_LOCK_ERROR;
135 int res = 0;
136 unsigned char fl_type;
137 struct v9fs_session_info *v9ses;
138
139 fid = filp->private_data;
140 BUG_ON(fid == NULL);
141
142 BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX);
143
144 res = locks_lock_file_wait(filp, fl);
145 if (res < 0)
146 goto out;
147
148 /* convert posix lock to p9 tlock args */
149 memset(&flock, 0, sizeof(flock));
150 /* map the lock type */
151 switch (fl->fl_type) {
152 case F_RDLCK:
153 flock.type = P9_LOCK_TYPE_RDLCK;
154 break;
155 case F_WRLCK:
156 flock.type = P9_LOCK_TYPE_WRLCK;
157 break;
158 case F_UNLCK:
159 flock.type = P9_LOCK_TYPE_UNLCK;
160 break;
161 }
162 flock.start = fl->fl_start;
163 if (fl->fl_end == OFFSET_MAX)
164 flock.length = 0;
165 else
166 flock.length = fl->fl_end - fl->fl_start + 1;
167 flock.proc_id = fl->fl_pid;
168 flock.client_id = fid->clnt->name;
169 if (IS_SETLKW(cmd))
170 flock.flags = P9_LOCK_FLAGS_BLOCK;
171
172 v9ses = v9fs_inode2v9ses(file_inode(filp));
173
174 /*
175 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
176 * for lock request, keep on trying
177 */
178 for (;;) {
179 res = p9_client_lock_dotl(fid, &flock, &status);
180 if (res < 0)
181 goto out_unlock;
182
183 if (status != P9_LOCK_BLOCKED)
184 break;
185 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
186 break;
187 if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
188 != 0)
189 break;
190 /*
191 * p9_client_lock_dotl overwrites flock.client_id with the
192 * server message, free and reuse the client name
193 */
194 if (flock.client_id != fid->clnt->name) {
195 kfree(flock.client_id);
196 flock.client_id = fid->clnt->name;
197 }
198 }
199
200 /* map 9p status to VFS status */
201 switch (status) {
202 case P9_LOCK_SUCCESS:
203 res = 0;
204 break;
205 case P9_LOCK_BLOCKED:
206 res = -EAGAIN;
207 break;
208 default:
209 WARN_ONCE(1, "unknown lock status code: %d\n", status);
210 fallthrough;
211 case P9_LOCK_ERROR:
212 case P9_LOCK_GRACE:
213 res = -ENOLCK;
214 break;
215 }
216
217out_unlock:
218 /*
219 * incase server returned error for lock request, revert
220 * it locally
221 */
222 if (res < 0 && fl->fl_type != F_UNLCK) {
223 fl_type = fl->fl_type;
224 fl->fl_type = F_UNLCK;
225 /* Even if this fails we want to return the remote error */
226 locks_lock_file_wait(filp, fl);
227 fl->fl_type = fl_type;
228 }
229 if (flock.client_id != fid->clnt->name)
230 kfree(flock.client_id);
231out:
232 return res;
233}
234
235static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
236{
237 struct p9_getlock glock;
238 struct p9_fid *fid;
239 int res = 0;
240
241 fid = filp->private_data;
242 BUG_ON(fid == NULL);
243
244 posix_test_lock(filp, fl);
245 /*
246 * if we have a conflicting lock locally, no need to validate
247 * with server
248 */
249 if (fl->fl_type != F_UNLCK)
250 return res;
251
252 /* convert posix lock to p9 tgetlock args */
253 memset(&glock, 0, sizeof(glock));
254 glock.type = P9_LOCK_TYPE_UNLCK;
255 glock.start = fl->fl_start;
256 if (fl->fl_end == OFFSET_MAX)
257 glock.length = 0;
258 else
259 glock.length = fl->fl_end - fl->fl_start + 1;
260 glock.proc_id = fl->fl_pid;
261 glock.client_id = fid->clnt->name;
262
263 res = p9_client_getlock_dotl(fid, &glock);
264 if (res < 0)
265 goto out;
266 /* map 9p lock type to os lock type */
267 switch (glock.type) {
268 case P9_LOCK_TYPE_RDLCK:
269 fl->fl_type = F_RDLCK;
270 break;
271 case P9_LOCK_TYPE_WRLCK:
272 fl->fl_type = F_WRLCK;
273 break;
274 case P9_LOCK_TYPE_UNLCK:
275 fl->fl_type = F_UNLCK;
276 break;
277 }
278 if (glock.type != P9_LOCK_TYPE_UNLCK) {
279 fl->fl_start = glock.start;
280 if (glock.length == 0)
281 fl->fl_end = OFFSET_MAX;
282 else
283 fl->fl_end = glock.start + glock.length - 1;
284 fl->fl_pid = -glock.proc_id;
285 }
286out:
287 if (glock.client_id != fid->clnt->name)
288 kfree(glock.client_id);
289 return res;
290}
291
292/**
293 * v9fs_file_lock_dotl - lock a file (or directory)
294 * @filp: file to be locked
295 * @cmd: lock command
296 * @fl: file lock structure
297 *
298 */
299
300static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
301{
302 struct inode *inode = file_inode(filp);
303 int ret = -ENOLCK;
304
305 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
306 filp, cmd, fl, filp);
307
308 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
309 filemap_write_and_wait(inode->i_mapping);
310 invalidate_mapping_pages(&inode->i_data, 0, -1);
311 }
312
313 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
314 ret = v9fs_file_do_lock(filp, cmd, fl);
315 else if (IS_GETLK(cmd))
316 ret = v9fs_file_getlock(filp, fl);
317 else
318 ret = -EINVAL;
319 return ret;
320}
321
322/**
323 * v9fs_file_flock_dotl - lock a file
324 * @filp: file to be locked
325 * @cmd: lock command
326 * @fl: file lock structure
327 *
328 */
329
330static int v9fs_file_flock_dotl(struct file *filp, int cmd,
331 struct file_lock *fl)
332{
333 struct inode *inode = file_inode(filp);
334 int ret = -ENOLCK;
335
336 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
337 filp, cmd, fl, filp);
338
339 if (!(fl->fl_flags & FL_FLOCK))
340 goto out_err;
341
342 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
343 filemap_write_and_wait(inode->i_mapping);
344 invalidate_mapping_pages(&inode->i_data, 0, -1);
345 }
346 /* Convert flock to posix lock */
347 fl->fl_flags |= FL_POSIX;
348 fl->fl_flags ^= FL_FLOCK;
349
350 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
351 ret = v9fs_file_do_lock(filp, cmd, fl);
352 else
353 ret = -EINVAL;
354out_err:
355 return ret;
356}
357
358/**
359 * v9fs_file_read_iter - read from a file
360 * @iocb: The operation parameters
361 * @to: The buffer to read into
362 *
363 */
364static ssize_t
365v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
366{
367 struct p9_fid *fid = iocb->ki_filp->private_data;
368 int ret, err = 0;
369
370 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
371 iov_iter_count(to), iocb->ki_pos);
372
373 if (iocb->ki_filp->f_flags & O_NONBLOCK)
374 ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
375 else
376 ret = p9_client_read(fid, iocb->ki_pos, to, &err);
377 if (!ret)
378 return err;
379
380 iocb->ki_pos += ret;
381 return ret;
382}
383
384/**
385 * v9fs_file_write_iter - write to a file
386 * @iocb: The operation parameters
387 * @from: The data to write
388 *
389 */
390static ssize_t
391v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
392{
393 struct file *file = iocb->ki_filp;
394 ssize_t retval;
395 loff_t origin;
396 int err = 0;
397
398 retval = generic_write_checks(iocb, from);
399 if (retval <= 0)
400 return retval;
401
402 origin = iocb->ki_pos;
403 retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
404 if (retval > 0) {
405 struct inode *inode = file_inode(file);
406 loff_t i_size;
407 unsigned long pg_start, pg_end;
408
409 pg_start = origin >> PAGE_SHIFT;
410 pg_end = (origin + retval - 1) >> PAGE_SHIFT;
411 if (inode->i_mapping && inode->i_mapping->nrpages)
412 invalidate_inode_pages2_range(inode->i_mapping,
413 pg_start, pg_end);
414 iocb->ki_pos += retval;
415 i_size = i_size_read(inode);
416 if (iocb->ki_pos > i_size) {
417 inode_add_bytes(inode, iocb->ki_pos - i_size);
418 /*
419 * Need to serialize against i_size_write() in
420 * v9fs_stat2inode()
421 */
422 v9fs_i_size_write(inode, iocb->ki_pos);
423 }
424 return retval;
425 }
426 return err;
427}
428
429static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
430 int datasync)
431{
432 struct p9_fid *fid;
433 struct inode *inode = filp->f_mapping->host;
434 struct p9_wstat wstat;
435 int retval;
436
437 retval = file_write_and_wait_range(filp, start, end);
438 if (retval)
439 return retval;
440
441 inode_lock(inode);
442 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
443
444 fid = filp->private_data;
445 v9fs_blank_wstat(&wstat);
446
447 retval = p9_client_wstat(fid, &wstat);
448 inode_unlock(inode);
449
450 return retval;
451}
452
453int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
454 int datasync)
455{
456 struct p9_fid *fid;
457 struct inode *inode = filp->f_mapping->host;
458 int retval;
459
460 retval = file_write_and_wait_range(filp, start, end);
461 if (retval)
462 return retval;
463
464 inode_lock(inode);
465 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
466
467 fid = filp->private_data;
468
469 retval = p9_client_fsync(fid, datasync);
470 inode_unlock(inode);
471
472 return retval;
473}
474
475static int
476v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
477{
478 int retval;
479
480
481 retval = generic_file_mmap(filp, vma);
482 if (!retval)
483 vma->vm_ops = &v9fs_file_vm_ops;
484
485 return retval;
486}
487
488static int
489v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
490{
491 int retval;
492 struct inode *inode;
493 struct v9fs_inode *v9inode;
494 struct p9_fid *fid;
495
496 inode = file_inode(filp);
497 v9inode = V9FS_I(inode);
498 mutex_lock(&v9inode->v_mutex);
499 if (!v9inode->writeback_fid &&
500 (vma->vm_flags & VM_SHARED) &&
501 (vma->vm_flags & VM_WRITE)) {
502 /*
503 * clone a fid and add it to writeback_fid
504 * we do it during mmap instead of
505 * page dirty time via write_begin/page_mkwrite
506 * because we want write after unlink usecase
507 * to work.
508 */
509 fid = v9fs_writeback_fid(file_dentry(filp));
510 if (IS_ERR(fid)) {
511 retval = PTR_ERR(fid);
512 mutex_unlock(&v9inode->v_mutex);
513 return retval;
514 }
515 v9inode->writeback_fid = (void *) fid;
516 }
517 mutex_unlock(&v9inode->v_mutex);
518
519 retval = generic_file_mmap(filp, vma);
520 if (!retval)
521 vma->vm_ops = &v9fs_mmap_file_vm_ops;
522
523 return retval;
524}
525
526static vm_fault_t
527v9fs_vm_page_mkwrite(struct vm_fault *vmf)
528{
529 struct v9fs_inode *v9inode;
530 struct folio *folio = page_folio(vmf->page);
531 struct file *filp = vmf->vma->vm_file;
532 struct inode *inode = file_inode(filp);
533
534
535 p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
536 folio, (unsigned long)filp->private_data);
537
538 v9inode = V9FS_I(inode);
539
540 /* Wait for the page to be written to the cache before we allow it to
541 * be modified. We then assume the entire page will need writing back.
542 */
543#ifdef CONFIG_9P_FSCACHE
544 if (folio_test_fscache(folio) &&
545 folio_wait_fscache_killable(folio) < 0)
546 return VM_FAULT_NOPAGE;
547#endif
548
549 /* Update file times before taking page lock */
550 file_update_time(filp);
551
552 BUG_ON(!v9inode->writeback_fid);
553 if (folio_lock_killable(folio) < 0)
554 return VM_FAULT_RETRY;
555 if (folio_mapping(folio) != inode->i_mapping)
556 goto out_unlock;
557 folio_wait_stable(folio);
558
559 return VM_FAULT_LOCKED;
560out_unlock:
561 folio_unlock(folio);
562 return VM_FAULT_NOPAGE;
563}
564
565/**
566 * v9fs_mmap_file_read_iter - read from a file
567 * @iocb: The operation parameters
568 * @to: The buffer to read into
569 *
570 */
571static ssize_t
572v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
573{
574 /* TODO: Check if there are dirty pages */
575 return v9fs_file_read_iter(iocb, to);
576}
577
578/**
579 * v9fs_mmap_file_write_iter - write to a file
580 * @iocb: The operation parameters
581 * @from: The data to write
582 *
583 */
584static ssize_t
585v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
586{
587 /*
588 * TODO: invalidate mmaps on filp's inode between
589 * offset and offset+count
590 */
591 return v9fs_file_write_iter(iocb, from);
592}
593
594static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
595{
596 struct inode *inode;
597
598 struct writeback_control wbc = {
599 .nr_to_write = LONG_MAX,
600 .sync_mode = WB_SYNC_ALL,
601 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
602 /* absolute end, byte at end included */
603 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
604 (vma->vm_end - vma->vm_start - 1),
605 };
606
607 if (!(vma->vm_flags & VM_SHARED))
608 return;
609
610 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
611
612 inode = file_inode(vma->vm_file);
613 filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
614}
615
616
617static const struct vm_operations_struct v9fs_file_vm_ops = {
618 .fault = filemap_fault,
619 .map_pages = filemap_map_pages,
620 .page_mkwrite = v9fs_vm_page_mkwrite,
621};
622
623static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
624 .close = v9fs_mmap_vm_close,
625 .fault = filemap_fault,
626 .map_pages = filemap_map_pages,
627 .page_mkwrite = v9fs_vm_page_mkwrite,
628};
629
630
631const struct file_operations v9fs_cached_file_operations = {
632 .llseek = generic_file_llseek,
633 .read_iter = generic_file_read_iter,
634 .write_iter = generic_file_write_iter,
635 .open = v9fs_file_open,
636 .release = v9fs_dir_release,
637 .lock = v9fs_file_lock,
638 .mmap = v9fs_file_mmap,
639 .splice_read = generic_file_splice_read,
640 .splice_write = iter_file_splice_write,
641 .fsync = v9fs_file_fsync,
642};
643
644const struct file_operations v9fs_cached_file_operations_dotl = {
645 .llseek = generic_file_llseek,
646 .read_iter = generic_file_read_iter,
647 .write_iter = generic_file_write_iter,
648 .open = v9fs_file_open,
649 .release = v9fs_dir_release,
650 .lock = v9fs_file_lock_dotl,
651 .flock = v9fs_file_flock_dotl,
652 .mmap = v9fs_file_mmap,
653 .splice_read = generic_file_splice_read,
654 .splice_write = iter_file_splice_write,
655 .fsync = v9fs_file_fsync_dotl,
656};
657
658const struct file_operations v9fs_file_operations = {
659 .llseek = generic_file_llseek,
660 .read_iter = v9fs_file_read_iter,
661 .write_iter = v9fs_file_write_iter,
662 .open = v9fs_file_open,
663 .release = v9fs_dir_release,
664 .lock = v9fs_file_lock,
665 .mmap = generic_file_readonly_mmap,
666 .splice_read = generic_file_splice_read,
667 .splice_write = iter_file_splice_write,
668 .fsync = v9fs_file_fsync,
669};
670
671const struct file_operations v9fs_file_operations_dotl = {
672 .llseek = generic_file_llseek,
673 .read_iter = v9fs_file_read_iter,
674 .write_iter = v9fs_file_write_iter,
675 .open = v9fs_file_open,
676 .release = v9fs_dir_release,
677 .lock = v9fs_file_lock_dotl,
678 .flock = v9fs_file_flock_dotl,
679 .mmap = generic_file_readonly_mmap,
680 .splice_read = generic_file_splice_read,
681 .splice_write = iter_file_splice_write,
682 .fsync = v9fs_file_fsync_dotl,
683};
684
685const struct file_operations v9fs_mmap_file_operations = {
686 .llseek = generic_file_llseek,
687 .read_iter = v9fs_mmap_file_read_iter,
688 .write_iter = v9fs_mmap_file_write_iter,
689 .open = v9fs_file_open,
690 .release = v9fs_dir_release,
691 .lock = v9fs_file_lock,
692 .mmap = v9fs_mmap_file_mmap,
693 .splice_read = generic_file_splice_read,
694 .splice_write = iter_file_splice_write,
695 .fsync = v9fs_file_fsync,
696};
697
698const struct file_operations v9fs_mmap_file_operations_dotl = {
699 .llseek = generic_file_llseek,
700 .read_iter = v9fs_mmap_file_read_iter,
701 .write_iter = v9fs_mmap_file_write_iter,
702 .open = v9fs_file_open,
703 .release = v9fs_dir_release,
704 .lock = v9fs_file_lock_dotl,
705 .flock = v9fs_file_flock_dotl,
706 .mmap = v9fs_mmap_file_mmap,
707 .splice_read = generic_file_splice_read,
708 .splice_write = iter_file_splice_write,
709 .fsync = v9fs_file_fsync_dotl,
710};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/9p/vfs_file.c
4 *
5 * This file contians vfs file ops for 9P2000.
6 *
7 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
8 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9 */
10
11#include <linux/module.h>
12#include <linux/errno.h>
13#include <linux/fs.h>
14#include <linux/sched.h>
15#include <linux/file.h>
16#include <linux/stat.h>
17#include <linux/string.h>
18#include <linux/inet.h>
19#include <linux/list.h>
20#include <linux/pagemap.h>
21#include <linux/utsname.h>
22#include <linux/uaccess.h>
23#include <linux/idr.h>
24#include <linux/uio.h>
25#include <linux/slab.h>
26#include <net/9p/9p.h>
27#include <net/9p/client.h>
28
29#include "v9fs.h"
30#include "v9fs_vfs.h"
31#include "fid.h"
32#include "cache.h"
33
34static const struct vm_operations_struct v9fs_file_vm_ops;
35static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
36
37/**
38 * v9fs_file_open - open a file (or directory)
39 * @inode: inode to be opened
40 * @file: file being opened
41 *
42 */
43
44int v9fs_file_open(struct inode *inode, struct file *file)
45{
46 int err;
47 struct v9fs_inode *v9inode;
48 struct v9fs_session_info *v9ses;
49 struct p9_fid *fid, *writeback_fid;
50 int omode;
51
52 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
53 v9inode = V9FS_I(inode);
54 v9ses = v9fs_inode2v9ses(inode);
55 if (v9fs_proto_dotl(v9ses))
56 omode = v9fs_open_to_dotl_flags(file->f_flags);
57 else
58 omode = v9fs_uflags2omode(file->f_flags,
59 v9fs_proto_dotu(v9ses));
60 fid = file->private_data;
61 if (!fid) {
62 fid = v9fs_fid_clone(file_dentry(file));
63 if (IS_ERR(fid))
64 return PTR_ERR(fid);
65
66 err = p9_client_open(fid, omode);
67 if (err < 0) {
68 p9_client_clunk(fid);
69 return err;
70 }
71 if ((file->f_flags & O_APPEND) &&
72 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
73 generic_file_llseek(file, 0, SEEK_END);
74 }
75
76 file->private_data = fid;
77 mutex_lock(&v9inode->v_mutex);
78 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
79 !v9inode->writeback_fid &&
80 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
81 /*
82 * clone a fid and add it to writeback_fid
83 * we do it during open time instead of
84 * page dirty time via write_begin/page_mkwrite
85 * because we want write after unlink usecase
86 * to work.
87 */
88 writeback_fid = v9fs_writeback_fid(file_dentry(file));
89 if (IS_ERR(writeback_fid)) {
90 err = PTR_ERR(writeback_fid);
91 mutex_unlock(&v9inode->v_mutex);
92 goto out_error;
93 }
94 v9inode->writeback_fid = (void *) writeback_fid;
95 }
96 mutex_unlock(&v9inode->v_mutex);
97 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
98 v9fs_cache_inode_set_cookie(inode, file);
99 v9fs_open_fid_add(inode, fid);
100 return 0;
101out_error:
102 p9_client_clunk(file->private_data);
103 file->private_data = NULL;
104 return err;
105}
106
107/**
108 * v9fs_file_lock - lock a file (or directory)
109 * @filp: file to be locked
110 * @cmd: lock command
111 * @fl: file lock structure
112 *
113 * Bugs: this looks like a local only lock, we should extend into 9P
114 * by using open exclusive
115 */
116
117static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
118{
119 int res = 0;
120 struct inode *inode = file_inode(filp);
121
122 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
123
124 /* No mandatory locks */
125 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
126 return -ENOLCK;
127
128 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
129 filemap_write_and_wait(inode->i_mapping);
130 invalidate_mapping_pages(&inode->i_data, 0, -1);
131 }
132
133 return res;
134}
135
136static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
137{
138 struct p9_flock flock;
139 struct p9_fid *fid;
140 uint8_t status = P9_LOCK_ERROR;
141 int res = 0;
142 unsigned char fl_type;
143 struct v9fs_session_info *v9ses;
144
145 fid = filp->private_data;
146 BUG_ON(fid == NULL);
147
148 if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
149 BUG();
150
151 res = locks_lock_file_wait(filp, fl);
152 if (res < 0)
153 goto out;
154
155 /* convert posix lock to p9 tlock args */
156 memset(&flock, 0, sizeof(flock));
157 /* map the lock type */
158 switch (fl->fl_type) {
159 case F_RDLCK:
160 flock.type = P9_LOCK_TYPE_RDLCK;
161 break;
162 case F_WRLCK:
163 flock.type = P9_LOCK_TYPE_WRLCK;
164 break;
165 case F_UNLCK:
166 flock.type = P9_LOCK_TYPE_UNLCK;
167 break;
168 }
169 flock.start = fl->fl_start;
170 if (fl->fl_end == OFFSET_MAX)
171 flock.length = 0;
172 else
173 flock.length = fl->fl_end - fl->fl_start + 1;
174 flock.proc_id = fl->fl_pid;
175 flock.client_id = fid->clnt->name;
176 if (IS_SETLKW(cmd))
177 flock.flags = P9_LOCK_FLAGS_BLOCK;
178
179 v9ses = v9fs_inode2v9ses(file_inode(filp));
180
181 /*
182 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
183 * for lock request, keep on trying
184 */
185 for (;;) {
186 res = p9_client_lock_dotl(fid, &flock, &status);
187 if (res < 0)
188 goto out_unlock;
189
190 if (status != P9_LOCK_BLOCKED)
191 break;
192 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
193 break;
194 if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
195 != 0)
196 break;
197 /*
198 * p9_client_lock_dotl overwrites flock.client_id with the
199 * server message, free and reuse the client name
200 */
201 if (flock.client_id != fid->clnt->name) {
202 kfree(flock.client_id);
203 flock.client_id = fid->clnt->name;
204 }
205 }
206
207 /* map 9p status to VFS status */
208 switch (status) {
209 case P9_LOCK_SUCCESS:
210 res = 0;
211 break;
212 case P9_LOCK_BLOCKED:
213 res = -EAGAIN;
214 break;
215 default:
216 WARN_ONCE(1, "unknown lock status code: %d\n", status);
217 fallthrough;
218 case P9_LOCK_ERROR:
219 case P9_LOCK_GRACE:
220 res = -ENOLCK;
221 break;
222 }
223
224out_unlock:
225 /*
226 * incase server returned error for lock request, revert
227 * it locally
228 */
229 if (res < 0 && fl->fl_type != F_UNLCK) {
230 fl_type = fl->fl_type;
231 fl->fl_type = F_UNLCK;
232 /* Even if this fails we want to return the remote error */
233 locks_lock_file_wait(filp, fl);
234 fl->fl_type = fl_type;
235 }
236 if (flock.client_id != fid->clnt->name)
237 kfree(flock.client_id);
238out:
239 return res;
240}
241
242static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
243{
244 struct p9_getlock glock;
245 struct p9_fid *fid;
246 int res = 0;
247
248 fid = filp->private_data;
249 BUG_ON(fid == NULL);
250
251 posix_test_lock(filp, fl);
252 /*
253 * if we have a conflicting lock locally, no need to validate
254 * with server
255 */
256 if (fl->fl_type != F_UNLCK)
257 return res;
258
259 /* convert posix lock to p9 tgetlock args */
260 memset(&glock, 0, sizeof(glock));
261 glock.type = P9_LOCK_TYPE_UNLCK;
262 glock.start = fl->fl_start;
263 if (fl->fl_end == OFFSET_MAX)
264 glock.length = 0;
265 else
266 glock.length = fl->fl_end - fl->fl_start + 1;
267 glock.proc_id = fl->fl_pid;
268 glock.client_id = fid->clnt->name;
269
270 res = p9_client_getlock_dotl(fid, &glock);
271 if (res < 0)
272 goto out;
273 /* map 9p lock type to os lock type */
274 switch (glock.type) {
275 case P9_LOCK_TYPE_RDLCK:
276 fl->fl_type = F_RDLCK;
277 break;
278 case P9_LOCK_TYPE_WRLCK:
279 fl->fl_type = F_WRLCK;
280 break;
281 case P9_LOCK_TYPE_UNLCK:
282 fl->fl_type = F_UNLCK;
283 break;
284 }
285 if (glock.type != P9_LOCK_TYPE_UNLCK) {
286 fl->fl_start = glock.start;
287 if (glock.length == 0)
288 fl->fl_end = OFFSET_MAX;
289 else
290 fl->fl_end = glock.start + glock.length - 1;
291 fl->fl_pid = -glock.proc_id;
292 }
293out:
294 if (glock.client_id != fid->clnt->name)
295 kfree(glock.client_id);
296 return res;
297}
298
299/**
300 * v9fs_file_lock_dotl - lock a file (or directory)
301 * @filp: file to be locked
302 * @cmd: lock command
303 * @fl: file lock structure
304 *
305 */
306
307static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
308{
309 struct inode *inode = file_inode(filp);
310 int ret = -ENOLCK;
311
312 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
313 filp, cmd, fl, filp);
314
315 /* No mandatory locks */
316 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
317 goto out_err;
318
319 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
320 filemap_write_and_wait(inode->i_mapping);
321 invalidate_mapping_pages(&inode->i_data, 0, -1);
322 }
323
324 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
325 ret = v9fs_file_do_lock(filp, cmd, fl);
326 else if (IS_GETLK(cmd))
327 ret = v9fs_file_getlock(filp, fl);
328 else
329 ret = -EINVAL;
330out_err:
331 return ret;
332}
333
334/**
335 * v9fs_file_flock_dotl - lock a file
336 * @filp: file to be locked
337 * @cmd: lock command
338 * @fl: file lock structure
339 *
340 */
341
342static int v9fs_file_flock_dotl(struct file *filp, int cmd,
343 struct file_lock *fl)
344{
345 struct inode *inode = file_inode(filp);
346 int ret = -ENOLCK;
347
348 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
349 filp, cmd, fl, filp);
350
351 /* No mandatory locks */
352 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
353 goto out_err;
354
355 if (!(fl->fl_flags & FL_FLOCK))
356 goto out_err;
357
358 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
359 filemap_write_and_wait(inode->i_mapping);
360 invalidate_mapping_pages(&inode->i_data, 0, -1);
361 }
362 /* Convert flock to posix lock */
363 fl->fl_flags |= FL_POSIX;
364 fl->fl_flags ^= FL_FLOCK;
365
366 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
367 ret = v9fs_file_do_lock(filp, cmd, fl);
368 else
369 ret = -EINVAL;
370out_err:
371 return ret;
372}
373
374/**
375 * v9fs_file_read - read from a file
376 * @filp: file pointer to read
377 * @udata: user data buffer to read data into
378 * @count: size of buffer
379 * @offset: offset at which to read data
380 *
381 */
382
383static ssize_t
384v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
385{
386 struct p9_fid *fid = iocb->ki_filp->private_data;
387 int ret, err = 0;
388
389 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
390 iov_iter_count(to), iocb->ki_pos);
391
392 if (iocb->ki_filp->f_flags & O_NONBLOCK)
393 ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
394 else
395 ret = p9_client_read(fid, iocb->ki_pos, to, &err);
396 if (!ret)
397 return err;
398
399 iocb->ki_pos += ret;
400 return ret;
401}
402
403/**
404 * v9fs_file_write - write to a file
405 * @filp: file pointer to write
406 * @data: data buffer to write data from
407 * @count: size of buffer
408 * @offset: offset at which to write data
409 *
410 */
411static ssize_t
412v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
413{
414 struct file *file = iocb->ki_filp;
415 ssize_t retval;
416 loff_t origin;
417 int err = 0;
418
419 retval = generic_write_checks(iocb, from);
420 if (retval <= 0)
421 return retval;
422
423 origin = iocb->ki_pos;
424 retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
425 if (retval > 0) {
426 struct inode *inode = file_inode(file);
427 loff_t i_size;
428 unsigned long pg_start, pg_end;
429 pg_start = origin >> PAGE_SHIFT;
430 pg_end = (origin + retval - 1) >> PAGE_SHIFT;
431 if (inode->i_mapping && inode->i_mapping->nrpages)
432 invalidate_inode_pages2_range(inode->i_mapping,
433 pg_start, pg_end);
434 iocb->ki_pos += retval;
435 i_size = i_size_read(inode);
436 if (iocb->ki_pos > i_size) {
437 inode_add_bytes(inode, iocb->ki_pos - i_size);
438 /*
439 * Need to serialize against i_size_write() in
440 * v9fs_stat2inode()
441 */
442 v9fs_i_size_write(inode, iocb->ki_pos);
443 }
444 return retval;
445 }
446 return err;
447}
448
449static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
450 int datasync)
451{
452 struct p9_fid *fid;
453 struct inode *inode = filp->f_mapping->host;
454 struct p9_wstat wstat;
455 int retval;
456
457 retval = file_write_and_wait_range(filp, start, end);
458 if (retval)
459 return retval;
460
461 inode_lock(inode);
462 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
463
464 fid = filp->private_data;
465 v9fs_blank_wstat(&wstat);
466
467 retval = p9_client_wstat(fid, &wstat);
468 inode_unlock(inode);
469
470 return retval;
471}
472
473int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
474 int datasync)
475{
476 struct p9_fid *fid;
477 struct inode *inode = filp->f_mapping->host;
478 int retval;
479
480 retval = file_write_and_wait_range(filp, start, end);
481 if (retval)
482 return retval;
483
484 inode_lock(inode);
485 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
486
487 fid = filp->private_data;
488
489 retval = p9_client_fsync(fid, datasync);
490 inode_unlock(inode);
491
492 return retval;
493}
494
495static int
496v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
497{
498 int retval;
499
500
501 retval = generic_file_mmap(filp, vma);
502 if (!retval)
503 vma->vm_ops = &v9fs_file_vm_ops;
504
505 return retval;
506}
507
508static int
509v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
510{
511 int retval;
512 struct inode *inode;
513 struct v9fs_inode *v9inode;
514 struct p9_fid *fid;
515
516 inode = file_inode(filp);
517 v9inode = V9FS_I(inode);
518 mutex_lock(&v9inode->v_mutex);
519 if (!v9inode->writeback_fid &&
520 (vma->vm_flags & VM_SHARED) &&
521 (vma->vm_flags & VM_WRITE)) {
522 /*
523 * clone a fid and add it to writeback_fid
524 * we do it during mmap instead of
525 * page dirty time via write_begin/page_mkwrite
526 * because we want write after unlink usecase
527 * to work.
528 */
529 fid = v9fs_writeback_fid(file_dentry(filp));
530 if (IS_ERR(fid)) {
531 retval = PTR_ERR(fid);
532 mutex_unlock(&v9inode->v_mutex);
533 return retval;
534 }
535 v9inode->writeback_fid = (void *) fid;
536 }
537 mutex_unlock(&v9inode->v_mutex);
538
539 retval = generic_file_mmap(filp, vma);
540 if (!retval)
541 vma->vm_ops = &v9fs_mmap_file_vm_ops;
542
543 return retval;
544}
545
546static vm_fault_t
547v9fs_vm_page_mkwrite(struct vm_fault *vmf)
548{
549 struct v9fs_inode *v9inode;
550 struct page *page = vmf->page;
551 struct file *filp = vmf->vma->vm_file;
552 struct inode *inode = file_inode(filp);
553
554
555 p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
556 page, (unsigned long)filp->private_data);
557
558 /* Update file times before taking page lock */
559 file_update_time(filp);
560
561 v9inode = V9FS_I(inode);
562 /* make sure the cache has finished storing the page */
563 v9fs_fscache_wait_on_page_write(inode, page);
564 BUG_ON(!v9inode->writeback_fid);
565 lock_page(page);
566 if (page->mapping != inode->i_mapping)
567 goto out_unlock;
568 wait_for_stable_page(page);
569
570 return VM_FAULT_LOCKED;
571out_unlock:
572 unlock_page(page);
573 return VM_FAULT_NOPAGE;
574}
575
576/**
577 * v9fs_mmap_file_read - read from a file
578 * @filp: file pointer to read
579 * @data: user data buffer to read data into
580 * @count: size of buffer
581 * @offset: offset at which to read data
582 *
583 */
584static ssize_t
585v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
586{
587 /* TODO: Check if there are dirty pages */
588 return v9fs_file_read_iter(iocb, to);
589}
590
591/**
592 * v9fs_mmap_file_write - write to a file
593 * @filp: file pointer to write
594 * @data: data buffer to write data from
595 * @count: size of buffer
596 * @offset: offset at which to write data
597 *
598 */
599static ssize_t
600v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
601{
602 /*
603 * TODO: invalidate mmaps on filp's inode between
604 * offset and offset+count
605 */
606 return v9fs_file_write_iter(iocb, from);
607}
608
609static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
610{
611 struct inode *inode;
612
613 struct writeback_control wbc = {
614 .nr_to_write = LONG_MAX,
615 .sync_mode = WB_SYNC_ALL,
616 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
617 /* absolute end, byte at end included */
618 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
619 (vma->vm_end - vma->vm_start - 1),
620 };
621
622 if (!(vma->vm_flags & VM_SHARED))
623 return;
624
625 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
626
627 inode = file_inode(vma->vm_file);
628
629 if (!mapping_can_writeback(inode->i_mapping))
630 wbc.nr_to_write = 0;
631
632 might_sleep();
633 sync_inode(inode, &wbc);
634}
635
636
637static const struct vm_operations_struct v9fs_file_vm_ops = {
638 .fault = filemap_fault,
639 .map_pages = filemap_map_pages,
640 .page_mkwrite = v9fs_vm_page_mkwrite,
641};
642
643static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
644 .close = v9fs_mmap_vm_close,
645 .fault = filemap_fault,
646 .map_pages = filemap_map_pages,
647 .page_mkwrite = v9fs_vm_page_mkwrite,
648};
649
650
651const struct file_operations v9fs_cached_file_operations = {
652 .llseek = generic_file_llseek,
653 .read_iter = generic_file_read_iter,
654 .write_iter = generic_file_write_iter,
655 .open = v9fs_file_open,
656 .release = v9fs_dir_release,
657 .lock = v9fs_file_lock,
658 .mmap = v9fs_file_mmap,
659 .splice_read = generic_file_splice_read,
660 .splice_write = iter_file_splice_write,
661 .fsync = v9fs_file_fsync,
662};
663
664const struct file_operations v9fs_cached_file_operations_dotl = {
665 .llseek = generic_file_llseek,
666 .read_iter = generic_file_read_iter,
667 .write_iter = generic_file_write_iter,
668 .open = v9fs_file_open,
669 .release = v9fs_dir_release,
670 .lock = v9fs_file_lock_dotl,
671 .flock = v9fs_file_flock_dotl,
672 .mmap = v9fs_file_mmap,
673 .splice_read = generic_file_splice_read,
674 .splice_write = iter_file_splice_write,
675 .fsync = v9fs_file_fsync_dotl,
676};
677
678const struct file_operations v9fs_file_operations = {
679 .llseek = generic_file_llseek,
680 .read_iter = v9fs_file_read_iter,
681 .write_iter = v9fs_file_write_iter,
682 .open = v9fs_file_open,
683 .release = v9fs_dir_release,
684 .lock = v9fs_file_lock,
685 .mmap = generic_file_readonly_mmap,
686 .splice_read = generic_file_splice_read,
687 .splice_write = iter_file_splice_write,
688 .fsync = v9fs_file_fsync,
689};
690
691const struct file_operations v9fs_file_operations_dotl = {
692 .llseek = generic_file_llseek,
693 .read_iter = v9fs_file_read_iter,
694 .write_iter = v9fs_file_write_iter,
695 .open = v9fs_file_open,
696 .release = v9fs_dir_release,
697 .lock = v9fs_file_lock_dotl,
698 .flock = v9fs_file_flock_dotl,
699 .mmap = generic_file_readonly_mmap,
700 .splice_read = generic_file_splice_read,
701 .splice_write = iter_file_splice_write,
702 .fsync = v9fs_file_fsync_dotl,
703};
704
705const struct file_operations v9fs_mmap_file_operations = {
706 .llseek = generic_file_llseek,
707 .read_iter = v9fs_mmap_file_read_iter,
708 .write_iter = v9fs_mmap_file_write_iter,
709 .open = v9fs_file_open,
710 .release = v9fs_dir_release,
711 .lock = v9fs_file_lock,
712 .mmap = v9fs_mmap_file_mmap,
713 .splice_read = generic_file_splice_read,
714 .splice_write = iter_file_splice_write,
715 .fsync = v9fs_file_fsync,
716};
717
718const struct file_operations v9fs_mmap_file_operations_dotl = {
719 .llseek = generic_file_llseek,
720 .read_iter = v9fs_mmap_file_read_iter,
721 .write_iter = v9fs_mmap_file_write_iter,
722 .open = v9fs_file_open,
723 .release = v9fs_dir_release,
724 .lock = v9fs_file_lock_dotl,
725 .flock = v9fs_file_flock_dotl,
726 .mmap = v9fs_mmap_file_mmap,
727 .splice_read = generic_file_splice_read,
728 .splice_write = iter_file_splice_write,
729 .fsync = v9fs_file_fsync_dotl,
730};