Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file contians vfs file ops for 9P2000.
4 *
5 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
7 */
8
9#include <linux/module.h>
10#include <linux/errno.h>
11#include <linux/fs.h>
12#include <linux/filelock.h>
13#include <linux/sched.h>
14#include <linux/file.h>
15#include <linux/stat.h>
16#include <linux/string.h>
17#include <linux/list.h>
18#include <linux/pagemap.h>
19#include <linux/utsname.h>
20#include <linux/uaccess.h>
21#include <linux/uio.h>
22#include <linux/slab.h>
23#include <net/9p/9p.h>
24#include <net/9p/client.h>
25
26#include "v9fs.h"
27#include "v9fs_vfs.h"
28#include "fid.h"
29#include "cache.h"
30
31static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
32
33/**
34 * v9fs_file_open - open a file (or directory)
35 * @inode: inode to be opened
36 * @file: file being opened
37 *
38 */
39
40int v9fs_file_open(struct inode *inode, struct file *file)
41{
42 int err;
43 struct v9fs_session_info *v9ses;
44 struct p9_fid *fid;
45 int omode;
46
47 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
48 v9ses = v9fs_inode2v9ses(inode);
49 if (v9fs_proto_dotl(v9ses))
50 omode = v9fs_open_to_dotl_flags(file->f_flags);
51 else
52 omode = v9fs_uflags2omode(file->f_flags,
53 v9fs_proto_dotu(v9ses));
54 fid = file->private_data;
55 if (!fid) {
56 fid = v9fs_fid_clone(file_dentry(file));
57 if (IS_ERR(fid))
58 return PTR_ERR(fid);
59
60 if ((v9ses->cache & CACHE_WRITEBACK) && (omode & P9_OWRITE)) {
61 int writeback_omode = (omode & ~P9_OWRITE) | P9_ORDWR;
62
63 p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, try opening O_RDWR\n");
64 err = p9_client_open(fid, writeback_omode);
65 if (err < 0) {
66 p9_debug(P9_DEBUG_CACHE, "could not open O_RDWR, disabling caches\n");
67 err = p9_client_open(fid, omode);
68 fid->mode |= P9L_DIRECT;
69 }
70 } else {
71 err = p9_client_open(fid, omode);
72 }
73 if (err < 0) {
74 p9_fid_put(fid);
75 return err;
76 }
77 if ((file->f_flags & O_APPEND) &&
78 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
79 generic_file_llseek(file, 0, SEEK_END);
80
81 file->private_data = fid;
82 }
83
84#ifdef CONFIG_9P_FSCACHE
85 if (v9ses->cache & CACHE_FSCACHE)
86 fscache_use_cookie(v9fs_inode_cookie(V9FS_I(inode)),
87 file->f_mode & FMODE_WRITE);
88#endif
89 v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags);
90 v9fs_open_fid_add(inode, &fid);
91 return 0;
92}
93
94/**
95 * v9fs_file_lock - lock a file (or directory)
96 * @filp: file to be locked
97 * @cmd: lock command
98 * @fl: file lock structure
99 *
100 * Bugs: this looks like a local only lock, we should extend into 9P
101 * by using open exclusive
102 */
103
104static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
105{
106 struct inode *inode = file_inode(filp);
107
108 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
109
110 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) {
111 filemap_write_and_wait(inode->i_mapping);
112 invalidate_mapping_pages(&inode->i_data, 0, -1);
113 }
114
115 return 0;
116}
117
118static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
119{
120 struct p9_flock flock;
121 struct p9_fid *fid;
122 uint8_t status = P9_LOCK_ERROR;
123 int res = 0;
124 struct v9fs_session_info *v9ses;
125
126 fid = filp->private_data;
127 BUG_ON(fid == NULL);
128
129 BUG_ON((fl->c.flc_flags & FL_POSIX) != FL_POSIX);
130
131 res = locks_lock_file_wait(filp, fl);
132 if (res < 0)
133 goto out;
134
135 /* convert posix lock to p9 tlock args */
136 memset(&flock, 0, sizeof(flock));
137 /* map the lock type */
138 switch (fl->c.flc_type) {
139 case F_RDLCK:
140 flock.type = P9_LOCK_TYPE_RDLCK;
141 break;
142 case F_WRLCK:
143 flock.type = P9_LOCK_TYPE_WRLCK;
144 break;
145 case F_UNLCK:
146 flock.type = P9_LOCK_TYPE_UNLCK;
147 break;
148 }
149 flock.start = fl->fl_start;
150 if (fl->fl_end == OFFSET_MAX)
151 flock.length = 0;
152 else
153 flock.length = fl->fl_end - fl->fl_start + 1;
154 flock.proc_id = fl->c.flc_pid;
155 flock.client_id = fid->clnt->name;
156 if (IS_SETLKW(cmd))
157 flock.flags = P9_LOCK_FLAGS_BLOCK;
158
159 v9ses = v9fs_inode2v9ses(file_inode(filp));
160
161 /*
162 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
163 * for lock request, keep on trying
164 */
165 for (;;) {
166 res = p9_client_lock_dotl(fid, &flock, &status);
167 if (res < 0)
168 goto out_unlock;
169
170 if (status != P9_LOCK_BLOCKED)
171 break;
172 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
173 break;
174 if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
175 != 0)
176 break;
177 /*
178 * p9_client_lock_dotl overwrites flock.client_id with the
179 * server message, free and reuse the client name
180 */
181 if (flock.client_id != fid->clnt->name) {
182 kfree(flock.client_id);
183 flock.client_id = fid->clnt->name;
184 }
185 }
186
187 /* map 9p status to VFS status */
188 switch (status) {
189 case P9_LOCK_SUCCESS:
190 res = 0;
191 break;
192 case P9_LOCK_BLOCKED:
193 res = -EAGAIN;
194 break;
195 default:
196 WARN_ONCE(1, "unknown lock status code: %d\n", status);
197 fallthrough;
198 case P9_LOCK_ERROR:
199 case P9_LOCK_GRACE:
200 res = -ENOLCK;
201 break;
202 }
203
204out_unlock:
205 /*
206 * incase server returned error for lock request, revert
207 * it locally
208 */
209 if (res < 0 && fl->c.flc_type != F_UNLCK) {
210 unsigned char type = fl->c.flc_type;
211
212 fl->c.flc_type = F_UNLCK;
213 /* Even if this fails we want to return the remote error */
214 locks_lock_file_wait(filp, fl);
215 fl->c.flc_type = type;
216 }
217 if (flock.client_id != fid->clnt->name)
218 kfree(flock.client_id);
219out:
220 return res;
221}
222
223static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
224{
225 struct p9_getlock glock;
226 struct p9_fid *fid;
227 int res = 0;
228
229 fid = filp->private_data;
230 BUG_ON(fid == NULL);
231
232 posix_test_lock(filp, fl);
233 /*
234 * if we have a conflicting lock locally, no need to validate
235 * with server
236 */
237 if (fl->c.flc_type != F_UNLCK)
238 return res;
239
240 /* convert posix lock to p9 tgetlock args */
241 memset(&glock, 0, sizeof(glock));
242 glock.type = P9_LOCK_TYPE_UNLCK;
243 glock.start = fl->fl_start;
244 if (fl->fl_end == OFFSET_MAX)
245 glock.length = 0;
246 else
247 glock.length = fl->fl_end - fl->fl_start + 1;
248 glock.proc_id = fl->c.flc_pid;
249 glock.client_id = fid->clnt->name;
250
251 res = p9_client_getlock_dotl(fid, &glock);
252 if (res < 0)
253 goto out;
254 /* map 9p lock type to os lock type */
255 switch (glock.type) {
256 case P9_LOCK_TYPE_RDLCK:
257 fl->c.flc_type = F_RDLCK;
258 break;
259 case P9_LOCK_TYPE_WRLCK:
260 fl->c.flc_type = F_WRLCK;
261 break;
262 case P9_LOCK_TYPE_UNLCK:
263 fl->c.flc_type = F_UNLCK;
264 break;
265 }
266 if (glock.type != P9_LOCK_TYPE_UNLCK) {
267 fl->fl_start = glock.start;
268 if (glock.length == 0)
269 fl->fl_end = OFFSET_MAX;
270 else
271 fl->fl_end = glock.start + glock.length - 1;
272 fl->c.flc_pid = -glock.proc_id;
273 }
274out:
275 if (glock.client_id != fid->clnt->name)
276 kfree(glock.client_id);
277 return res;
278}
279
280/**
281 * v9fs_file_lock_dotl - lock a file (or directory)
282 * @filp: file to be locked
283 * @cmd: lock command
284 * @fl: file lock structure
285 *
286 */
287
288static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
289{
290 struct inode *inode = file_inode(filp);
291 int ret = -ENOLCK;
292
293 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
294 filp, cmd, fl, filp);
295
296 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) {
297 filemap_write_and_wait(inode->i_mapping);
298 invalidate_mapping_pages(&inode->i_data, 0, -1);
299 }
300
301 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
302 ret = v9fs_file_do_lock(filp, cmd, fl);
303 else if (IS_GETLK(cmd))
304 ret = v9fs_file_getlock(filp, fl);
305 else
306 ret = -EINVAL;
307 return ret;
308}
309
310/**
311 * v9fs_file_flock_dotl - lock a file
312 * @filp: file to be locked
313 * @cmd: lock command
314 * @fl: file lock structure
315 *
316 */
317
318static int v9fs_file_flock_dotl(struct file *filp, int cmd,
319 struct file_lock *fl)
320{
321 struct inode *inode = file_inode(filp);
322 int ret = -ENOLCK;
323
324 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
325 filp, cmd, fl, filp);
326
327 if (!(fl->c.flc_flags & FL_FLOCK))
328 goto out_err;
329
330 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->c.flc_type != F_UNLCK) {
331 filemap_write_and_wait(inode->i_mapping);
332 invalidate_mapping_pages(&inode->i_data, 0, -1);
333 }
334 /* Convert flock to posix lock */
335 fl->c.flc_flags |= FL_POSIX;
336 fl->c.flc_flags ^= FL_FLOCK;
337
338 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
339 ret = v9fs_file_do_lock(filp, cmd, fl);
340 else
341 ret = -EINVAL;
342out_err:
343 return ret;
344}
345
346/**
347 * v9fs_file_read_iter - read from a file
348 * @iocb: The operation parameters
349 * @to: The buffer to read into
350 *
351 */
352static ssize_t
353v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
354{
355 struct p9_fid *fid = iocb->ki_filp->private_data;
356
357 p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
358 fid->fid, iov_iter_count(to), iocb->ki_pos);
359
360 if (fid->mode & P9L_DIRECT)
361 return netfs_unbuffered_read_iter(iocb, to);
362
363 p9_debug(P9_DEBUG_VFS, "(cached)\n");
364 return netfs_file_read_iter(iocb, to);
365}
366
367/*
368 * v9fs_file_splice_read - splice-read from a file
369 * @in: The 9p file to read from
370 * @ppos: Where to find/update the file position
371 * @pipe: The pipe to splice into
372 * @len: The maximum amount of data to splice
373 * @flags: SPLICE_F_* flags
374 */
375static ssize_t v9fs_file_splice_read(struct file *in, loff_t *ppos,
376 struct pipe_inode_info *pipe,
377 size_t len, unsigned int flags)
378{
379 struct p9_fid *fid = in->private_data;
380
381 p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
382 fid->fid, len, *ppos);
383
384 if (fid->mode & P9L_DIRECT)
385 return copy_splice_read(in, ppos, pipe, len, flags);
386 return filemap_splice_read(in, ppos, pipe, len, flags);
387}
388
389/**
390 * v9fs_file_write_iter - write to a file
391 * @iocb: The operation parameters
392 * @from: The data to write
393 *
394 */
395static ssize_t
396v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
397{
398 struct file *file = iocb->ki_filp;
399 struct p9_fid *fid = file->private_data;
400
401 p9_debug(P9_DEBUG_VFS, "fid %d\n", fid->fid);
402
403 if (fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))
404 return netfs_unbuffered_write_iter(iocb, from);
405
406 p9_debug(P9_DEBUG_CACHE, "(cached)\n");
407 return netfs_file_write_iter(iocb, from);
408}
409
410static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
411 int datasync)
412{
413 struct p9_fid *fid;
414 struct inode *inode = filp->f_mapping->host;
415 struct p9_wstat wstat;
416 int retval;
417
418 retval = file_write_and_wait_range(filp, start, end);
419 if (retval)
420 return retval;
421
422 inode_lock(inode);
423 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
424
425 fid = filp->private_data;
426 v9fs_blank_wstat(&wstat);
427
428 retval = p9_client_wstat(fid, &wstat);
429 inode_unlock(inode);
430
431 return retval;
432}
433
434int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
435 int datasync)
436{
437 struct p9_fid *fid;
438 struct inode *inode = filp->f_mapping->host;
439 int retval;
440
441 retval = file_write_and_wait_range(filp, start, end);
442 if (retval)
443 return retval;
444
445 inode_lock(inode);
446 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
447
448 fid = filp->private_data;
449
450 retval = p9_client_fsync(fid, datasync);
451 inode_unlock(inode);
452
453 return retval;
454}
455
456static int
457v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
458{
459 int retval;
460 struct inode *inode = file_inode(filp);
461 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
462
463 p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp);
464
465 if (!(v9ses->cache & CACHE_WRITEBACK)) {
466 p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
467 return generic_file_readonly_mmap(filp, vma);
468 }
469
470 retval = generic_file_mmap(filp, vma);
471 if (!retval)
472 vma->vm_ops = &v9fs_mmap_file_vm_ops;
473
474 return retval;
475}
476
477static vm_fault_t
478v9fs_vm_page_mkwrite(struct vm_fault *vmf)
479{
480 return netfs_page_mkwrite(vmf, NULL);
481}
482
483static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
484{
485 struct inode *inode;
486
487 struct writeback_control wbc = {
488 .nr_to_write = LONG_MAX,
489 .sync_mode = WB_SYNC_ALL,
490 .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
491 /* absolute end, byte at end included */
492 .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
493 (vma->vm_end - vma->vm_start - 1),
494 };
495
496 if (!(vma->vm_flags & VM_SHARED))
497 return;
498
499 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
500
501 inode = file_inode(vma->vm_file);
502 filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
503}
504
505static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
506 .close = v9fs_mmap_vm_close,
507 .fault = filemap_fault,
508 .map_pages = filemap_map_pages,
509 .page_mkwrite = v9fs_vm_page_mkwrite,
510};
511
512const struct file_operations v9fs_file_operations = {
513 .llseek = generic_file_llseek,
514 .read_iter = v9fs_file_read_iter,
515 .write_iter = v9fs_file_write_iter,
516 .open = v9fs_file_open,
517 .release = v9fs_dir_release,
518 .lock = v9fs_file_lock,
519 .mmap = generic_file_readonly_mmap,
520 .splice_read = v9fs_file_splice_read,
521 .splice_write = iter_file_splice_write,
522 .fsync = v9fs_file_fsync,
523 .setlease = simple_nosetlease,
524};
525
526const struct file_operations v9fs_file_operations_dotl = {
527 .llseek = generic_file_llseek,
528 .read_iter = v9fs_file_read_iter,
529 .write_iter = v9fs_file_write_iter,
530 .open = v9fs_file_open,
531 .release = v9fs_dir_release,
532 .lock = v9fs_file_lock_dotl,
533 .flock = v9fs_file_flock_dotl,
534 .mmap = v9fs_file_mmap,
535 .splice_read = v9fs_file_splice_read,
536 .splice_write = iter_file_splice_write,
537 .fsync = v9fs_file_fsync_dotl,
538 .setlease = simple_nosetlease,
539};
1/*
2 * linux/fs/9p/vfs_file.c
3 *
4 * This file contians vfs file ops for 9P2000.
5 *
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/errno.h>
28#include <linux/fs.h>
29#include <linux/sched.h>
30#include <linux/file.h>
31#include <linux/stat.h>
32#include <linux/string.h>
33#include <linux/inet.h>
34#include <linux/list.h>
35#include <linux/pagemap.h>
36#include <linux/utsname.h>
37#include <asm/uaccess.h>
38#include <linux/idr.h>
39#include <net/9p/9p.h>
40#include <net/9p/client.h>
41
42#include "v9fs.h"
43#include "v9fs_vfs.h"
44#include "fid.h"
45#include "cache.h"
46
47static const struct vm_operations_struct v9fs_file_vm_ops;
48static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
49
50/**
51 * v9fs_file_open - open a file (or directory)
52 * @inode: inode to be opened
53 * @file: file being opened
54 *
55 */
56
57int v9fs_file_open(struct inode *inode, struct file *file)
58{
59 int err;
60 struct v9fs_inode *v9inode;
61 struct v9fs_session_info *v9ses;
62 struct p9_fid *fid;
63 int omode;
64
65 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
66 v9inode = V9FS_I(inode);
67 v9ses = v9fs_inode2v9ses(inode);
68 if (v9fs_proto_dotl(v9ses))
69 omode = v9fs_open_to_dotl_flags(file->f_flags);
70 else
71 omode = v9fs_uflags2omode(file->f_flags,
72 v9fs_proto_dotu(v9ses));
73 fid = file->private_data;
74 if (!fid) {
75 fid = v9fs_fid_clone(file->f_path.dentry);
76 if (IS_ERR(fid))
77 return PTR_ERR(fid);
78
79 err = p9_client_open(fid, omode);
80 if (err < 0) {
81 p9_client_clunk(fid);
82 return err;
83 }
84 if ((file->f_flags & O_APPEND) &&
85 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
86 generic_file_llseek(file, 0, SEEK_END);
87 }
88
89 file->private_data = fid;
90 mutex_lock(&v9inode->v_mutex);
91 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
92 !v9inode->writeback_fid &&
93 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
94 /*
95 * clone a fid and add it to writeback_fid
96 * we do it during open time instead of
97 * page dirty time via write_begin/page_mkwrite
98 * because we want write after unlink usecase
99 * to work.
100 */
101 fid = v9fs_writeback_fid(file->f_path.dentry);
102 if (IS_ERR(fid)) {
103 err = PTR_ERR(fid);
104 mutex_unlock(&v9inode->v_mutex);
105 goto out_error;
106 }
107 v9inode->writeback_fid = (void *) fid;
108 }
109 mutex_unlock(&v9inode->v_mutex);
110 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
111 v9fs_cache_inode_set_cookie(inode, file);
112 return 0;
113out_error:
114 p9_client_clunk(file->private_data);
115 file->private_data = NULL;
116 return err;
117}
118
119/**
120 * v9fs_file_lock - lock a file (or directory)
121 * @filp: file to be locked
122 * @cmd: lock command
123 * @fl: file lock structure
124 *
125 * Bugs: this looks like a local only lock, we should extend into 9P
126 * by using open exclusive
127 */
128
129static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
130{
131 int res = 0;
132 struct inode *inode = file_inode(filp);
133
134 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
135
136 /* No mandatory locks */
137 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
138 return -ENOLCK;
139
140 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
141 filemap_write_and_wait(inode->i_mapping);
142 invalidate_mapping_pages(&inode->i_data, 0, -1);
143 }
144
145 return res;
146}
147
148static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
149{
150 struct p9_flock flock;
151 struct p9_fid *fid;
152 uint8_t status;
153 int res = 0;
154 unsigned char fl_type;
155
156 fid = filp->private_data;
157 BUG_ON(fid == NULL);
158
159 if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
160 BUG();
161
162 res = posix_lock_file_wait(filp, fl);
163 if (res < 0)
164 goto out;
165
166 /* convert posix lock to p9 tlock args */
167 memset(&flock, 0, sizeof(flock));
168 /* map the lock type */
169 switch (fl->fl_type) {
170 case F_RDLCK:
171 flock.type = P9_LOCK_TYPE_RDLCK;
172 break;
173 case F_WRLCK:
174 flock.type = P9_LOCK_TYPE_WRLCK;
175 break;
176 case F_UNLCK:
177 flock.type = P9_LOCK_TYPE_UNLCK;
178 break;
179 }
180 flock.start = fl->fl_start;
181 if (fl->fl_end == OFFSET_MAX)
182 flock.length = 0;
183 else
184 flock.length = fl->fl_end - fl->fl_start + 1;
185 flock.proc_id = fl->fl_pid;
186 flock.client_id = fid->clnt->name;
187 if (IS_SETLKW(cmd))
188 flock.flags = P9_LOCK_FLAGS_BLOCK;
189
190 /*
191 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
192 * for lock request, keep on trying
193 */
194 for (;;) {
195 res = p9_client_lock_dotl(fid, &flock, &status);
196 if (res < 0)
197 break;
198
199 if (status != P9_LOCK_BLOCKED)
200 break;
201 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
202 break;
203 if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
204 break;
205 }
206
207 /* map 9p status to VFS status */
208 switch (status) {
209 case P9_LOCK_SUCCESS:
210 res = 0;
211 break;
212 case P9_LOCK_BLOCKED:
213 res = -EAGAIN;
214 break;
215 case P9_LOCK_ERROR:
216 case P9_LOCK_GRACE:
217 res = -ENOLCK;
218 break;
219 default:
220 BUG();
221 }
222
223 /*
224 * incase server returned error for lock request, revert
225 * it locally
226 */
227 if (res < 0 && fl->fl_type != F_UNLCK) {
228 fl_type = fl->fl_type;
229 fl->fl_type = F_UNLCK;
230 res = posix_lock_file_wait(filp, fl);
231 fl->fl_type = fl_type;
232 }
233out:
234 return res;
235}
236
237static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
238{
239 struct p9_getlock glock;
240 struct p9_fid *fid;
241 int res = 0;
242
243 fid = filp->private_data;
244 BUG_ON(fid == NULL);
245
246 posix_test_lock(filp, fl);
247 /*
248 * if we have a conflicting lock locally, no need to validate
249 * with server
250 */
251 if (fl->fl_type != F_UNLCK)
252 return res;
253
254 /* convert posix lock to p9 tgetlock args */
255 memset(&glock, 0, sizeof(glock));
256 glock.type = P9_LOCK_TYPE_UNLCK;
257 glock.start = fl->fl_start;
258 if (fl->fl_end == OFFSET_MAX)
259 glock.length = 0;
260 else
261 glock.length = fl->fl_end - fl->fl_start + 1;
262 glock.proc_id = fl->fl_pid;
263 glock.client_id = fid->clnt->name;
264
265 res = p9_client_getlock_dotl(fid, &glock);
266 if (res < 0)
267 return res;
268 /* map 9p lock type to os lock type */
269 switch (glock.type) {
270 case P9_LOCK_TYPE_RDLCK:
271 fl->fl_type = F_RDLCK;
272 break;
273 case P9_LOCK_TYPE_WRLCK:
274 fl->fl_type = F_WRLCK;
275 break;
276 case P9_LOCK_TYPE_UNLCK:
277 fl->fl_type = F_UNLCK;
278 break;
279 }
280 if (glock.type != P9_LOCK_TYPE_UNLCK) {
281 fl->fl_start = glock.start;
282 if (glock.length == 0)
283 fl->fl_end = OFFSET_MAX;
284 else
285 fl->fl_end = glock.start + glock.length - 1;
286 fl->fl_pid = glock.proc_id;
287 }
288 return res;
289}
290
291/**
292 * v9fs_file_lock_dotl - lock a file (or directory)
293 * @filp: file to be locked
294 * @cmd: lock command
295 * @fl: file lock structure
296 *
297 */
298
299static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
300{
301 struct inode *inode = file_inode(filp);
302 int ret = -ENOLCK;
303
304 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
305 filp, cmd, fl, filp->f_path.dentry->d_name.name);
306
307 /* No mandatory locks */
308 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
309 goto out_err;
310
311 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
312 filemap_write_and_wait(inode->i_mapping);
313 invalidate_mapping_pages(&inode->i_data, 0, -1);
314 }
315
316 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
317 ret = v9fs_file_do_lock(filp, cmd, fl);
318 else if (IS_GETLK(cmd))
319 ret = v9fs_file_getlock(filp, fl);
320 else
321 ret = -EINVAL;
322out_err:
323 return ret;
324}
325
326/**
327 * v9fs_file_flock_dotl - lock a file
328 * @filp: file to be locked
329 * @cmd: lock command
330 * @fl: file lock structure
331 *
332 */
333
334static int v9fs_file_flock_dotl(struct file *filp, int cmd,
335 struct file_lock *fl)
336{
337 struct inode *inode = file_inode(filp);
338 int ret = -ENOLCK;
339
340 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
341 filp, cmd, fl, filp->f_path.dentry->d_name.name);
342
343 /* No mandatory locks */
344 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
345 goto out_err;
346
347 if (!(fl->fl_flags & FL_FLOCK))
348 goto out_err;
349
350 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
351 filemap_write_and_wait(inode->i_mapping);
352 invalidate_mapping_pages(&inode->i_data, 0, -1);
353 }
354 /* Convert flock to posix lock */
355 fl->fl_owner = (fl_owner_t)filp;
356 fl->fl_start = 0;
357 fl->fl_end = OFFSET_MAX;
358 fl->fl_flags |= FL_POSIX;
359 fl->fl_flags ^= FL_FLOCK;
360
361 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
362 ret = v9fs_file_do_lock(filp, cmd, fl);
363 else
364 ret = -EINVAL;
365out_err:
366 return ret;
367}
368
369/**
370 * v9fs_fid_readn - read from a fid
371 * @fid: fid to read
372 * @data: data buffer to read data into
373 * @udata: user data buffer to read data into
374 * @count: size of buffer
375 * @offset: offset at which to read data
376 *
377 */
378ssize_t
379v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
380 u64 offset)
381{
382 int n, total, size;
383
384 p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
385 fid->fid, (long long unsigned)offset, count);
386 n = 0;
387 total = 0;
388 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
389 do {
390 n = p9_client_read(fid, data, udata, offset, count);
391 if (n <= 0)
392 break;
393
394 if (data)
395 data += n;
396 if (udata)
397 udata += n;
398
399 offset += n;
400 count -= n;
401 total += n;
402 } while (count > 0 && n == size);
403
404 if (n < 0)
405 total = n;
406
407 return total;
408}
409
410/**
411 * v9fs_file_readn - read from a file
412 * @filp: file pointer to read
413 * @data: data buffer to read data into
414 * @udata: user data buffer to read data into
415 * @count: size of buffer
416 * @offset: offset at which to read data
417 *
418 */
419ssize_t
420v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
421 u64 offset)
422{
423 return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
424}
425
426/**
427 * v9fs_file_read - read from a file
428 * @filp: file pointer to read
429 * @udata: user data buffer to read data into
430 * @count: size of buffer
431 * @offset: offset at which to read data
432 *
433 */
434
435static ssize_t
436v9fs_file_read(struct file *filp, char __user *udata, size_t count,
437 loff_t * offset)
438{
439 int ret;
440 struct p9_fid *fid;
441 size_t size;
442
443 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
444 fid = filp->private_data;
445
446 size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
447 if (count > size)
448 ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
449 else
450 ret = p9_client_read(fid, NULL, udata, *offset, count);
451
452 if (ret > 0)
453 *offset += ret;
454
455 return ret;
456}
457
458ssize_t
459v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
460 const char __user *data, size_t count,
461 loff_t *offset, int invalidate)
462{
463 int n;
464 loff_t i_size;
465 size_t total = 0;
466 loff_t origin = *offset;
467 unsigned long pg_start, pg_end;
468
469 p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
470 data, (int)count, (int)*offset);
471
472 do {
473 n = p9_client_write(fid, NULL, data+total, origin+total, count);
474 if (n <= 0)
475 break;
476 count -= n;
477 total += n;
478 } while (count > 0);
479
480 if (invalidate && (total > 0)) {
481 pg_start = origin >> PAGE_CACHE_SHIFT;
482 pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
483 if (inode->i_mapping && inode->i_mapping->nrpages)
484 invalidate_inode_pages2_range(inode->i_mapping,
485 pg_start, pg_end);
486 *offset += total;
487 i_size = i_size_read(inode);
488 if (*offset > i_size) {
489 inode_add_bytes(inode, *offset - i_size);
490 i_size_write(inode, *offset);
491 }
492 }
493 if (n < 0)
494 return n;
495
496 return total;
497}
498
499/**
500 * v9fs_file_write - write to a file
501 * @filp: file pointer to write
502 * @data: data buffer to write data from
503 * @count: size of buffer
504 * @offset: offset at which to write data
505 *
506 */
507static ssize_t
508v9fs_file_write(struct file *filp, const char __user * data,
509 size_t count, loff_t *offset)
510{
511 ssize_t retval = 0;
512 loff_t origin = *offset;
513
514
515 retval = generic_write_checks(filp, &origin, &count, 0);
516 if (retval)
517 goto out;
518
519 retval = -EINVAL;
520 if ((ssize_t) count < 0)
521 goto out;
522 retval = 0;
523 if (!count)
524 goto out;
525
526 retval = v9fs_file_write_internal(file_inode(filp),
527 filp->private_data,
528 data, count, &origin, 1);
529 /* update offset on successful write */
530 if (retval > 0)
531 *offset = origin;
532out:
533 return retval;
534}
535
536
537static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
538 int datasync)
539{
540 struct p9_fid *fid;
541 struct inode *inode = filp->f_mapping->host;
542 struct p9_wstat wstat;
543 int retval;
544
545 retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
546 if (retval)
547 return retval;
548
549 mutex_lock(&inode->i_mutex);
550 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
551
552 fid = filp->private_data;
553 v9fs_blank_wstat(&wstat);
554
555 retval = p9_client_wstat(fid, &wstat);
556 mutex_unlock(&inode->i_mutex);
557
558 return retval;
559}
560
561int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
562 int datasync)
563{
564 struct p9_fid *fid;
565 struct inode *inode = filp->f_mapping->host;
566 int retval;
567
568 retval = filemap_write_and_wait_range(inode->i_mapping, start, end);
569 if (retval)
570 return retval;
571
572 mutex_lock(&inode->i_mutex);
573 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
574
575 fid = filp->private_data;
576
577 retval = p9_client_fsync(fid, datasync);
578 mutex_unlock(&inode->i_mutex);
579
580 return retval;
581}
582
583static int
584v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
585{
586 int retval;
587
588
589 retval = generic_file_mmap(filp, vma);
590 if (!retval)
591 vma->vm_ops = &v9fs_file_vm_ops;
592
593 return retval;
594}
595
596static int
597v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
598{
599 int retval;
600 struct inode *inode;
601 struct v9fs_inode *v9inode;
602 struct p9_fid *fid;
603
604 inode = file_inode(filp);
605 v9inode = V9FS_I(inode);
606 mutex_lock(&v9inode->v_mutex);
607 if (!v9inode->writeback_fid &&
608 (vma->vm_flags & VM_WRITE)) {
609 /*
610 * clone a fid and add it to writeback_fid
611 * we do it during mmap instead of
612 * page dirty time via write_begin/page_mkwrite
613 * because we want write after unlink usecase
614 * to work.
615 */
616 fid = v9fs_writeback_fid(filp->f_path.dentry);
617 if (IS_ERR(fid)) {
618 retval = PTR_ERR(fid);
619 mutex_unlock(&v9inode->v_mutex);
620 return retval;
621 }
622 v9inode->writeback_fid = (void *) fid;
623 }
624 mutex_unlock(&v9inode->v_mutex);
625
626 retval = generic_file_mmap(filp, vma);
627 if (!retval)
628 vma->vm_ops = &v9fs_mmap_file_vm_ops;
629
630 return retval;
631}
632
633static int
634v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
635{
636 struct v9fs_inode *v9inode;
637 struct page *page = vmf->page;
638 struct file *filp = vma->vm_file;
639 struct inode *inode = file_inode(filp);
640
641
642 p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
643 page, (unsigned long)filp->private_data);
644
645 /* Update file times before taking page lock */
646 file_update_time(filp);
647
648 v9inode = V9FS_I(inode);
649 /* make sure the cache has finished storing the page */
650 v9fs_fscache_wait_on_page_write(inode, page);
651 BUG_ON(!v9inode->writeback_fid);
652 lock_page(page);
653 if (page->mapping != inode->i_mapping)
654 goto out_unlock;
655 wait_for_stable_page(page);
656
657 return VM_FAULT_LOCKED;
658out_unlock:
659 unlock_page(page);
660 return VM_FAULT_NOPAGE;
661}
662
663static ssize_t
664v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
665 loff_t *offsetp)
666{
667 loff_t size, offset;
668 struct inode *inode;
669 struct address_space *mapping;
670
671 offset = *offsetp;
672 mapping = filp->f_mapping;
673 inode = mapping->host;
674 if (!count)
675 return 0;
676 size = i_size_read(inode);
677 if (offset < size)
678 filemap_write_and_wait_range(mapping, offset,
679 offset + count - 1);
680
681 return v9fs_file_read(filp, udata, count, offsetp);
682}
683
684/**
685 * v9fs_cached_file_read - read from a file
686 * @filp: file pointer to read
687 * @udata: user data buffer to read data into
688 * @count: size of buffer
689 * @offset: offset at which to read data
690 *
691 */
692static ssize_t
693v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
694 loff_t *offset)
695{
696 if (filp->f_flags & O_DIRECT)
697 return v9fs_direct_read(filp, data, count, offset);
698 return do_sync_read(filp, data, count, offset);
699}
700
701/**
702 * v9fs_mmap_file_read - read from a file
703 * @filp: file pointer to read
704 * @udata: user data buffer to read data into
705 * @count: size of buffer
706 * @offset: offset at which to read data
707 *
708 */
709static ssize_t
710v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
711 loff_t *offset)
712{
713 /* TODO: Check if there are dirty pages */
714 return v9fs_file_read(filp, data, count, offset);
715}
716
717static ssize_t
718v9fs_direct_write(struct file *filp, const char __user * data,
719 size_t count, loff_t *offsetp)
720{
721 loff_t offset;
722 ssize_t retval;
723 struct inode *inode;
724 struct address_space *mapping;
725
726 offset = *offsetp;
727 mapping = filp->f_mapping;
728 inode = mapping->host;
729 if (!count)
730 return 0;
731
732 mutex_lock(&inode->i_mutex);
733 retval = filemap_write_and_wait_range(mapping, offset,
734 offset + count - 1);
735 if (retval)
736 goto err_out;
737 /*
738 * After a write we want buffered reads to be sure to go to disk to get
739 * the new data. We invalidate clean cached page from the region we're
740 * about to write. We do this *before* the write so that if we fail
741 * here we fall back to buffered write
742 */
743 if (mapping->nrpages) {
744 pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
745 pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
746
747 retval = invalidate_inode_pages2_range(mapping,
748 pg_start, pg_end);
749 /*
750 * If a page can not be invalidated, fall back
751 * to buffered write.
752 */
753 if (retval) {
754 if (retval == -EBUSY)
755 goto buff_write;
756 goto err_out;
757 }
758 }
759 retval = v9fs_file_write(filp, data, count, offsetp);
760err_out:
761 mutex_unlock(&inode->i_mutex);
762 return retval;
763
764buff_write:
765 mutex_unlock(&inode->i_mutex);
766 return do_sync_write(filp, data, count, offsetp);
767}
768
769/**
770 * v9fs_cached_file_write - write to a file
771 * @filp: file pointer to write
772 * @data: data buffer to write data from
773 * @count: size of buffer
774 * @offset: offset at which to write data
775 *
776 */
777static ssize_t
778v9fs_cached_file_write(struct file *filp, const char __user * data,
779 size_t count, loff_t *offset)
780{
781
782 if (filp->f_flags & O_DIRECT)
783 return v9fs_direct_write(filp, data, count, offset);
784 return do_sync_write(filp, data, count, offset);
785}
786
787
788/**
789 * v9fs_mmap_file_write - write to a file
790 * @filp: file pointer to write
791 * @data: data buffer to write data from
792 * @count: size of buffer
793 * @offset: offset at which to write data
794 *
795 */
796static ssize_t
797v9fs_mmap_file_write(struct file *filp, const char __user *data,
798 size_t count, loff_t *offset)
799{
800 /*
801 * TODO: invalidate mmaps on filp's inode between
802 * offset and offset+count
803 */
804 return v9fs_file_write(filp, data, count, offset);
805}
806
807static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
808{
809 struct inode *inode;
810
811 struct writeback_control wbc = {
812 .nr_to_write = LONG_MAX,
813 .sync_mode = WB_SYNC_ALL,
814 .range_start = vma->vm_pgoff * PAGE_SIZE,
815 /* absolute end, byte at end included */
816 .range_end = vma->vm_pgoff * PAGE_SIZE +
817 (vma->vm_end - vma->vm_start - 1),
818 };
819
820
821 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
822
823 inode = file_inode(vma->vm_file);
824
825 if (!mapping_cap_writeback_dirty(inode->i_mapping))
826 wbc.nr_to_write = 0;
827
828 might_sleep();
829 sync_inode(inode, &wbc);
830}
831
832
833static const struct vm_operations_struct v9fs_file_vm_ops = {
834 .fault = filemap_fault,
835 .map_pages = filemap_map_pages,
836 .page_mkwrite = v9fs_vm_page_mkwrite,
837 .remap_pages = generic_file_remap_pages,
838};
839
840static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
841 .close = v9fs_mmap_vm_close,
842 .fault = filemap_fault,
843 .map_pages = filemap_map_pages,
844 .page_mkwrite = v9fs_vm_page_mkwrite,
845 .remap_pages = generic_file_remap_pages,
846};
847
848
849const struct file_operations v9fs_cached_file_operations = {
850 .llseek = generic_file_llseek,
851 .read = v9fs_cached_file_read,
852 .write = v9fs_cached_file_write,
853 .aio_read = generic_file_aio_read,
854 .aio_write = generic_file_aio_write,
855 .open = v9fs_file_open,
856 .release = v9fs_dir_release,
857 .lock = v9fs_file_lock,
858 .mmap = v9fs_file_mmap,
859 .fsync = v9fs_file_fsync,
860};
861
862const struct file_operations v9fs_cached_file_operations_dotl = {
863 .llseek = generic_file_llseek,
864 .read = v9fs_cached_file_read,
865 .write = v9fs_cached_file_write,
866 .aio_read = generic_file_aio_read,
867 .aio_write = generic_file_aio_write,
868 .open = v9fs_file_open,
869 .release = v9fs_dir_release,
870 .lock = v9fs_file_lock_dotl,
871 .flock = v9fs_file_flock_dotl,
872 .mmap = v9fs_file_mmap,
873 .fsync = v9fs_file_fsync_dotl,
874};
875
876const struct file_operations v9fs_file_operations = {
877 .llseek = generic_file_llseek,
878 .read = v9fs_file_read,
879 .write = v9fs_file_write,
880 .open = v9fs_file_open,
881 .release = v9fs_dir_release,
882 .lock = v9fs_file_lock,
883 .mmap = generic_file_readonly_mmap,
884 .fsync = v9fs_file_fsync,
885};
886
887const struct file_operations v9fs_file_operations_dotl = {
888 .llseek = generic_file_llseek,
889 .read = v9fs_file_read,
890 .write = v9fs_file_write,
891 .open = v9fs_file_open,
892 .release = v9fs_dir_release,
893 .lock = v9fs_file_lock_dotl,
894 .flock = v9fs_file_flock_dotl,
895 .mmap = generic_file_readonly_mmap,
896 .fsync = v9fs_file_fsync_dotl,
897};
898
899const struct file_operations v9fs_mmap_file_operations = {
900 .llseek = generic_file_llseek,
901 .read = v9fs_mmap_file_read,
902 .write = v9fs_mmap_file_write,
903 .open = v9fs_file_open,
904 .release = v9fs_dir_release,
905 .lock = v9fs_file_lock,
906 .mmap = v9fs_mmap_file_mmap,
907 .fsync = v9fs_file_fsync,
908};
909
910const struct file_operations v9fs_mmap_file_operations_dotl = {
911 .llseek = generic_file_llseek,
912 .read = v9fs_mmap_file_read,
913 .write = v9fs_mmap_file_write,
914 .open = v9fs_file_open,
915 .release = v9fs_dir_release,
916 .lock = v9fs_file_lock_dotl,
917 .flock = v9fs_file_flock_dotl,
918 .mmap = v9fs_mmap_file_mmap,
919 .fsync = v9fs_file_fsync_dotl,
920};