Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/namei.h>
8#include <linux/writeback.h>
9
10#include "super.h"
11#include "mds_client.h"
12
13/*
14 * Ceph file operations
15 *
16 * Implement basic open/close functionality, and implement
17 * read/write.
18 *
19 * We implement three modes of file I/O:
20 * - buffered uses the generic_file_aio_{read,write} helpers
21 *
22 * - synchronous is used when there is multi-client read/write
23 * sharing, avoids the page cache, and synchronously waits for an
24 * ack from the OSD.
25 *
26 * - direct io takes the variant of the sync path that references
27 * user pages directly.
28 *
29 * fsync() flushes and waits on dirty pages, but just queues metadata
30 * for writeback: since the MDS can recover size and mtime there is no
31 * need to wait for MDS acknowledgement.
32 */
33
34
35/*
36 * Prepare an open request. Preallocate ceph_cap to avoid an
37 * inopportune ENOMEM later.
38 */
39static struct ceph_mds_request *
40prepare_open_request(struct super_block *sb, int flags, int create_mode)
41{
42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
43 struct ceph_mds_client *mdsc = fsc->mdsc;
44 struct ceph_mds_request *req;
45 int want_auth = USE_ANY_MDS;
46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
47
48 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
49 want_auth = USE_AUTH_MDS;
50
51 req = ceph_mdsc_create_request(mdsc, op, want_auth);
52 if (IS_ERR(req))
53 goto out;
54 req->r_fmode = ceph_flags_to_mode(flags);
55 req->r_args.open.flags = cpu_to_le32(flags);
56 req->r_args.open.mode = cpu_to_le32(create_mode);
57 req->r_args.open.preferred = cpu_to_le32(-1);
58out:
59 return req;
60}
61
62/*
63 * initialize private struct file data.
64 * if we fail, clean up by dropping fmode reference on the ceph_inode
65 */
66static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
67{
68 struct ceph_file_info *cf;
69 int ret = 0;
70
71 switch (inode->i_mode & S_IFMT) {
72 case S_IFREG:
73 case S_IFDIR:
74 dout("init_file %p %p 0%o (regular)\n", inode, file,
75 inode->i_mode);
76 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
77 if (cf == NULL) {
78 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
79 return -ENOMEM;
80 }
81 cf->fmode = fmode;
82 cf->next_offset = 2;
83 file->private_data = cf;
84 BUG_ON(inode->i_fop->release != ceph_release);
85 break;
86
87 case S_IFLNK:
88 dout("init_file %p %p 0%o (symlink)\n", inode, file,
89 inode->i_mode);
90 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
91 break;
92
93 default:
94 dout("init_file %p %p 0%o (special)\n", inode, file,
95 inode->i_mode);
96 /*
97 * we need to drop the open ref now, since we don't
98 * have .release set to ceph_release.
99 */
100 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
101 BUG_ON(inode->i_fop->release == ceph_release);
102
103 /* call the proper open fop */
104 ret = inode->i_fop->open(inode, file);
105 }
106 return ret;
107}
108
109/*
110 * If the filp already has private_data, that means the file was
111 * already opened by intent during lookup, and we do nothing.
112 *
113 * If we already have the requisite capabilities, we can satisfy
114 * the open request locally (no need to request new caps from the
115 * MDS). We do, however, need to inform the MDS (asynchronously)
116 * if our wanted caps set expands.
117 */
118int ceph_open(struct inode *inode, struct file *file)
119{
120 struct ceph_inode_info *ci = ceph_inode(inode);
121 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
122 struct ceph_mds_client *mdsc = fsc->mdsc;
123 struct ceph_mds_request *req;
124 struct ceph_file_info *cf = file->private_data;
125 struct inode *parent_inode = NULL;
126 int err;
127 int flags, fmode, wanted;
128
129 if (cf) {
130 dout("open file %p is already opened\n", file);
131 return 0;
132 }
133
134 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
135 flags = file->f_flags & ~(O_CREAT|O_EXCL);
136 if (S_ISDIR(inode->i_mode))
137 flags = O_DIRECTORY; /* mds likes to know */
138
139 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
140 ceph_vinop(inode), file, flags, file->f_flags);
141 fmode = ceph_flags_to_mode(flags);
142 wanted = ceph_caps_for_mode(fmode);
143
144 /* snapped files are read-only */
145 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
146 return -EROFS;
147
148 /* trivially open snapdir */
149 if (ceph_snap(inode) == CEPH_SNAPDIR) {
150 spin_lock(&inode->i_lock);
151 __ceph_get_fmode(ci, fmode);
152 spin_unlock(&inode->i_lock);
153 return ceph_init_file(inode, file, fmode);
154 }
155
156 /*
157 * No need to block if we have caps on the auth MDS (for
158 * write) or any MDS (for read). Update wanted set
159 * asynchronously.
160 */
161 spin_lock(&inode->i_lock);
162 if (__ceph_is_any_real_caps(ci) &&
163 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
164 int mds_wanted = __ceph_caps_mds_wanted(ci);
165 int issued = __ceph_caps_issued(ci, NULL);
166
167 dout("open %p fmode %d want %s issued %s using existing\n",
168 inode, fmode, ceph_cap_string(wanted),
169 ceph_cap_string(issued));
170 __ceph_get_fmode(ci, fmode);
171 spin_unlock(&inode->i_lock);
172
173 /* adjust wanted? */
174 if ((issued & wanted) != wanted &&
175 (mds_wanted & wanted) != wanted &&
176 ceph_snap(inode) != CEPH_SNAPDIR)
177 ceph_check_caps(ci, 0, NULL);
178
179 return ceph_init_file(inode, file, fmode);
180 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
181 (ci->i_snap_caps & wanted) == wanted) {
182 __ceph_get_fmode(ci, fmode);
183 spin_unlock(&inode->i_lock);
184 return ceph_init_file(inode, file, fmode);
185 }
186 spin_unlock(&inode->i_lock);
187
188 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
189 req = prepare_open_request(inode->i_sb, flags, 0);
190 if (IS_ERR(req)) {
191 err = PTR_ERR(req);
192 goto out;
193 }
194 req->r_inode = inode;
195 ihold(inode);
196 req->r_num_caps = 1;
197 if (flags & (O_CREAT|O_TRUNC))
198 parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
199 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
200 iput(parent_inode);
201 if (!err)
202 err = ceph_init_file(inode, file, req->r_fmode);
203 ceph_mdsc_put_request(req);
204 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
205out:
206 return err;
207}
208
209
210/*
211 * Do a lookup + open with a single request.
212 *
213 * If this succeeds, but some subsequent check in the vfs
214 * may_open() fails, the struct *file gets cleaned up (i.e.
215 * ceph_release gets called). So fear not!
216 */
217/*
218 * flags
219 * path_lookup_open -> LOOKUP_OPEN
220 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
221 */
222struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
223 struct nameidata *nd, int mode,
224 int locked_dir)
225{
226 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
227 struct ceph_mds_client *mdsc = fsc->mdsc;
228 struct file *file;
229 struct ceph_mds_request *req;
230 struct dentry *ret;
231 int err;
232 int flags = nd->intent.open.flags;
233
234 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
235 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
236
237 /* do the open */
238 req = prepare_open_request(dir->i_sb, flags, mode);
239 if (IS_ERR(req))
240 return ERR_CAST(req);
241 req->r_dentry = dget(dentry);
242 req->r_num_caps = 2;
243 if (flags & O_CREAT) {
244 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
245 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
246 }
247 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
248 err = ceph_mdsc_do_request(mdsc,
249 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
250 req);
251 err = ceph_handle_snapdir(req, dentry, err);
252 if (err)
253 goto out;
254 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
255 err = ceph_handle_notrace_create(dir, dentry);
256 if (err)
257 goto out;
258 file = lookup_instantiate_filp(nd, req->r_dentry, ceph_open);
259 if (IS_ERR(file))
260 err = PTR_ERR(file);
261out:
262 ret = ceph_finish_lookup(req, dentry, err);
263 ceph_mdsc_put_request(req);
264 dout("ceph_lookup_open result=%p\n", ret);
265 return ret;
266}
267
268int ceph_release(struct inode *inode, struct file *file)
269{
270 struct ceph_inode_info *ci = ceph_inode(inode);
271 struct ceph_file_info *cf = file->private_data;
272
273 dout("release inode %p file %p\n", inode, file);
274 ceph_put_fmode(ci, cf->fmode);
275 if (cf->last_readdir)
276 ceph_mdsc_put_request(cf->last_readdir);
277 kfree(cf->last_name);
278 kfree(cf->dir_info);
279 dput(cf->dentry);
280 kmem_cache_free(ceph_file_cachep, cf);
281
282 /* wake up anyone waiting for caps on this inode */
283 wake_up_all(&ci->i_cap_wq);
284 return 0;
285}
286
287/*
288 * Read a range of bytes striped over one or more objects. Iterate over
289 * objects we stripe over. (That's not atomic, but good enough for now.)
290 *
291 * If we get a short result from the OSD, check against i_size; we need to
292 * only return a short read to the caller if we hit EOF.
293 */
294static int striped_read(struct inode *inode,
295 u64 off, u64 len,
296 struct page **pages, int num_pages,
297 int *checkeof, bool o_direct,
298 unsigned long buf_align)
299{
300 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
301 struct ceph_inode_info *ci = ceph_inode(inode);
302 u64 pos, this_len;
303 int io_align, page_align;
304 int left, pages_left;
305 int read;
306 struct page **page_pos;
307 int ret;
308 bool hit_stripe, was_short;
309
310 /*
311 * we may need to do multiple reads. not atomic, unfortunately.
312 */
313 pos = off;
314 left = len;
315 page_pos = pages;
316 pages_left = num_pages;
317 read = 0;
318 io_align = off & ~PAGE_MASK;
319
320more:
321 if (o_direct)
322 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
323 else
324 page_align = pos & ~PAGE_MASK;
325 this_len = left;
326 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
327 &ci->i_layout, pos, &this_len,
328 ci->i_truncate_seq,
329 ci->i_truncate_size,
330 page_pos, pages_left, page_align);
331 if (ret == -ENOENT)
332 ret = 0;
333 hit_stripe = this_len < left;
334 was_short = ret >= 0 && ret < this_len;
335 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
336 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
337
338 if (ret > 0) {
339 int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
340
341 if (read < pos - off) {
342 dout(" zero gap %llu to %llu\n", off + read, pos);
343 ceph_zero_page_vector_range(page_align + read,
344 pos - off - read, pages);
345 }
346 pos += ret;
347 read = pos - off;
348 left -= ret;
349 page_pos += didpages;
350 pages_left -= didpages;
351
352 /* hit stripe? */
353 if (left && hit_stripe)
354 goto more;
355 }
356
357 if (was_short) {
358 /* did we bounce off eof? */
359 if (pos + left > inode->i_size)
360 *checkeof = 1;
361
362 /* zero trailing bytes (inside i_size) */
363 if (left > 0 && pos < inode->i_size) {
364 if (pos + left > inode->i_size)
365 left = inode->i_size - pos;
366
367 dout("zero tail %d\n", left);
368 ceph_zero_page_vector_range(page_align + read, left,
369 pages);
370 read += left;
371 }
372 }
373
374 if (ret >= 0)
375 ret = read;
376 dout("striped_read returns %d\n", ret);
377 return ret;
378}
379
380/*
381 * Completely synchronous read and write methods. Direct from __user
382 * buffer to osd, or directly to user pages (if O_DIRECT).
383 *
384 * If the read spans object boundary, just do multiple reads.
385 */
386static ssize_t ceph_sync_read(struct file *file, char __user *data,
387 unsigned len, loff_t *poff, int *checkeof)
388{
389 struct inode *inode = file->f_dentry->d_inode;
390 struct page **pages;
391 u64 off = *poff;
392 int num_pages, ret;
393
394 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
395 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
396
397 if (file->f_flags & O_DIRECT) {
398 num_pages = calc_pages_for((unsigned long)data, len);
399 pages = ceph_get_direct_page_vector(data, num_pages, true);
400 } else {
401 num_pages = calc_pages_for(off, len);
402 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
403 }
404 if (IS_ERR(pages))
405 return PTR_ERR(pages);
406
407 /*
408 * flush any page cache pages in this range. this
409 * will make concurrent normal and sync io slow,
410 * but it will at least behave sensibly when they are
411 * in sequence.
412 */
413 ret = filemap_write_and_wait(inode->i_mapping);
414 if (ret < 0)
415 goto done;
416
417 ret = striped_read(inode, off, len, pages, num_pages, checkeof,
418 file->f_flags & O_DIRECT,
419 (unsigned long)data & ~PAGE_MASK);
420
421 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
422 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
423 if (ret >= 0)
424 *poff = off + ret;
425
426done:
427 if (file->f_flags & O_DIRECT)
428 ceph_put_page_vector(pages, num_pages, true);
429 else
430 ceph_release_page_vector(pages, num_pages);
431 dout("sync_read result %d\n", ret);
432 return ret;
433}
434
435/*
436 * Write commit callback, called if we requested both an ACK and
437 * ONDISK commit reply from the OSD.
438 */
439static void sync_write_commit(struct ceph_osd_request *req,
440 struct ceph_msg *msg)
441{
442 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
443
444 dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
445 spin_lock(&ci->i_unsafe_lock);
446 list_del_init(&req->r_unsafe_item);
447 spin_unlock(&ci->i_unsafe_lock);
448 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
449}
450
451/*
452 * Synchronous write, straight from __user pointer or user pages (if
453 * O_DIRECT).
454 *
455 * If write spans object boundary, just do multiple writes. (For a
456 * correct atomic write, we should e.g. take write locks on all
457 * objects, rollback on failure, etc.)
458 */
459static ssize_t ceph_sync_write(struct file *file, const char __user *data,
460 size_t left, loff_t *offset)
461{
462 struct inode *inode = file->f_dentry->d_inode;
463 struct ceph_inode_info *ci = ceph_inode(inode);
464 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
465 struct ceph_osd_request *req;
466 struct page **pages;
467 int num_pages;
468 long long unsigned pos;
469 u64 len;
470 int written = 0;
471 int flags;
472 int do_sync = 0;
473 int check_caps = 0;
474 int page_align, io_align;
475 unsigned long buf_align;
476 int ret;
477 struct timespec mtime = CURRENT_TIME;
478
479 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
480 return -EROFS;
481
482 dout("sync_write on file %p %lld~%u %s\n", file, *offset,
483 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
484
485 if (file->f_flags & O_APPEND)
486 pos = i_size_read(inode);
487 else
488 pos = *offset;
489
490 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
491 if (ret < 0)
492 return ret;
493
494 ret = invalidate_inode_pages2_range(inode->i_mapping,
495 pos >> PAGE_CACHE_SHIFT,
496 (pos + left) >> PAGE_CACHE_SHIFT);
497 if (ret < 0)
498 dout("invalidate_inode_pages2_range returned %d\n", ret);
499
500 flags = CEPH_OSD_FLAG_ORDERSNAP |
501 CEPH_OSD_FLAG_ONDISK |
502 CEPH_OSD_FLAG_WRITE;
503 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
504 flags |= CEPH_OSD_FLAG_ACK;
505 else
506 do_sync = 1;
507
508 /*
509 * we may need to do multiple writes here if we span an object
510 * boundary. this isn't atomic, unfortunately. :(
511 */
512more:
513 io_align = pos & ~PAGE_MASK;
514 buf_align = (unsigned long)data & ~PAGE_MASK;
515 len = left;
516 if (file->f_flags & O_DIRECT) {
517 /* write from beginning of first page, regardless of
518 io alignment */
519 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
520 num_pages = calc_pages_for((unsigned long)data, len);
521 } else {
522 page_align = pos & ~PAGE_MASK;
523 num_pages = calc_pages_for(pos, len);
524 }
525 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
526 ceph_vino(inode), pos, &len,
527 CEPH_OSD_OP_WRITE, flags,
528 ci->i_snap_realm->cached_context,
529 do_sync,
530 ci->i_truncate_seq, ci->i_truncate_size,
531 &mtime, false, 2, page_align);
532 if (!req)
533 return -ENOMEM;
534
535 if (file->f_flags & O_DIRECT) {
536 pages = ceph_get_direct_page_vector(data, num_pages, false);
537 if (IS_ERR(pages)) {
538 ret = PTR_ERR(pages);
539 goto out;
540 }
541
542 /*
543 * throw out any page cache pages in this range. this
544 * may block.
545 */
546 truncate_inode_pages_range(inode->i_mapping, pos,
547 (pos+len) | (PAGE_CACHE_SIZE-1));
548 } else {
549 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
550 if (IS_ERR(pages)) {
551 ret = PTR_ERR(pages);
552 goto out;
553 }
554 ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
555 if (ret < 0) {
556 ceph_release_page_vector(pages, num_pages);
557 goto out;
558 }
559
560 if ((file->f_flags & O_SYNC) == 0) {
561 /* get a second commit callback */
562 req->r_safe_callback = sync_write_commit;
563 req->r_own_pages = 1;
564 }
565 }
566 req->r_pages = pages;
567 req->r_num_pages = num_pages;
568 req->r_inode = inode;
569
570 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
571 if (!ret) {
572 if (req->r_safe_callback) {
573 /*
574 * Add to inode unsafe list only after we
575 * start_request so that a tid has been assigned.
576 */
577 spin_lock(&ci->i_unsafe_lock);
578 list_add_tail(&req->r_unsafe_item,
579 &ci->i_unsafe_writes);
580 spin_unlock(&ci->i_unsafe_lock);
581 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
582 }
583
584 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
585 if (ret < 0 && req->r_safe_callback) {
586 spin_lock(&ci->i_unsafe_lock);
587 list_del_init(&req->r_unsafe_item);
588 spin_unlock(&ci->i_unsafe_lock);
589 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
590 }
591 }
592
593 if (file->f_flags & O_DIRECT)
594 ceph_put_page_vector(pages, num_pages, false);
595 else if (file->f_flags & O_SYNC)
596 ceph_release_page_vector(pages, num_pages);
597
598out:
599 ceph_osdc_put_request(req);
600 if (ret == 0) {
601 pos += len;
602 written += len;
603 left -= len;
604 data += written;
605 if (left)
606 goto more;
607
608 ret = written;
609 *offset = pos;
610 if (pos > i_size_read(inode))
611 check_caps = ceph_inode_set_size(inode, pos);
612 if (check_caps)
613 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
614 NULL);
615 }
616 return ret;
617}
618
619/*
620 * Wrap generic_file_aio_read with checks for cap bits on the inode.
621 * Atomically grab references, so that those bits are not released
622 * back to the MDS mid-read.
623 *
624 * Hmm, the sync read case isn't actually async... should it be?
625 */
626static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
627 unsigned long nr_segs, loff_t pos)
628{
629 struct file *filp = iocb->ki_filp;
630 struct ceph_file_info *fi = filp->private_data;
631 loff_t *ppos = &iocb->ki_pos;
632 size_t len = iov->iov_len;
633 struct inode *inode = filp->f_dentry->d_inode;
634 struct ceph_inode_info *ci = ceph_inode(inode);
635 void __user *base = iov->iov_base;
636 ssize_t ret;
637 int want, got = 0;
638 int checkeof = 0, read = 0;
639
640 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
641 inode, ceph_vinop(inode), pos, (unsigned)len, inode);
642again:
643 __ceph_do_pending_vmtruncate(inode);
644 if (fi->fmode & CEPH_FILE_MODE_LAZY)
645 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
646 else
647 want = CEPH_CAP_FILE_CACHE;
648 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
649 if (ret < 0)
650 goto out;
651 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
652 inode, ceph_vinop(inode), pos, (unsigned)len,
653 ceph_cap_string(got));
654
655 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
656 (iocb->ki_filp->f_flags & O_DIRECT) ||
657 (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
658 (fi->flags & CEPH_F_SYNC))
659 /* hmm, this isn't really async... */
660 ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
661 else
662 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
663
664out:
665 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
666 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
667 ceph_put_cap_refs(ci, got);
668
669 if (checkeof && ret >= 0) {
670 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
671
672 /* hit EOF or hole? */
673 if (statret == 0 && *ppos < inode->i_size) {
674 dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
675 read += ret;
676 base += ret;
677 len -= ret;
678 checkeof = 0;
679 goto again;
680 }
681 }
682 if (ret >= 0)
683 ret += read;
684
685 return ret;
686}
687
688/*
689 * Take cap references to avoid releasing caps to MDS mid-write.
690 *
691 * If we are synchronous, and write with an old snap context, the OSD
692 * may return EOLDSNAPC. In that case, retry the write.. _after_
693 * dropping our cap refs and allowing the pending snap to logically
694 * complete _before_ this write occurs.
695 *
696 * If we are near ENOSPC, write synchronously.
697 */
698static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
699 unsigned long nr_segs, loff_t pos)
700{
701 struct file *file = iocb->ki_filp;
702 struct ceph_file_info *fi = file->private_data;
703 struct inode *inode = file->f_dentry->d_inode;
704 struct ceph_inode_info *ci = ceph_inode(inode);
705 struct ceph_osd_client *osdc =
706 &ceph_sb_to_client(inode->i_sb)->client->osdc;
707 loff_t endoff = pos + iov->iov_len;
708 int want, got = 0;
709 int ret, err;
710
711 if (ceph_snap(inode) != CEPH_NOSNAP)
712 return -EROFS;
713
714retry_snap:
715 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
716 return -ENOSPC;
717 __ceph_do_pending_vmtruncate(inode);
718 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
719 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
720 inode->i_size);
721 if (fi->fmode & CEPH_FILE_MODE_LAZY)
722 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
723 else
724 want = CEPH_CAP_FILE_BUFFER;
725 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
726 if (ret < 0)
727 goto out_put;
728
729 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
730 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
731 ceph_cap_string(got));
732
733 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
734 (iocb->ki_filp->f_flags & O_DIRECT) ||
735 (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
736 (fi->flags & CEPH_F_SYNC)) {
737 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
738 &iocb->ki_pos);
739 } else {
740 /*
741 * buffered write; drop Fw early to avoid slow
742 * revocation if we get stuck on balance_dirty_pages
743 */
744 int dirty;
745
746 spin_lock(&inode->i_lock);
747 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
748 spin_unlock(&inode->i_lock);
749 ceph_put_cap_refs(ci, got);
750
751 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
752 if ((ret >= 0 || ret == -EIOCBQUEUED) &&
753 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
754 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
755 err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
756 if (err < 0)
757 ret = err;
758 }
759
760 if (dirty)
761 __mark_inode_dirty(inode, dirty);
762 goto out;
763 }
764
765 if (ret >= 0) {
766 int dirty;
767 spin_lock(&inode->i_lock);
768 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
769 spin_unlock(&inode->i_lock);
770 if (dirty)
771 __mark_inode_dirty(inode, dirty);
772 }
773
774out_put:
775 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
776 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
777 ceph_cap_string(got));
778 ceph_put_cap_refs(ci, got);
779
780out:
781 if (ret == -EOLDSNAPC) {
782 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
783 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
784 goto retry_snap;
785 }
786
787 return ret;
788}
789
790/*
791 * llseek. be sure to verify file size on SEEK_END.
792 */
793static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
794{
795 struct inode *inode = file->f_mapping->host;
796 int ret;
797
798 mutex_lock(&inode->i_mutex);
799 __ceph_do_pending_vmtruncate(inode);
800 if (origin != SEEK_CUR || origin != SEEK_SET) {
801 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
802 if (ret < 0) {
803 offset = ret;
804 goto out;
805 }
806 }
807
808 switch (origin) {
809 case SEEK_END:
810 offset += inode->i_size;
811 break;
812 case SEEK_CUR:
813 /*
814 * Here we special-case the lseek(fd, 0, SEEK_CUR)
815 * position-querying operation. Avoid rewriting the "same"
816 * f_pos value back to the file because a concurrent read(),
817 * write() or lseek() might have altered it
818 */
819 if (offset == 0) {
820 offset = file->f_pos;
821 goto out;
822 }
823 offset += file->f_pos;
824 break;
825 case SEEK_DATA:
826 if (offset >= inode->i_size) {
827 ret = -ENXIO;
828 goto out;
829 }
830 break;
831 case SEEK_HOLE:
832 if (offset >= inode->i_size) {
833 ret = -ENXIO;
834 goto out;
835 }
836 offset = inode->i_size;
837 break;
838 }
839
840 if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
841 offset = -EINVAL;
842 goto out;
843 }
844
845 /* Special lock needed here? */
846 if (offset != file->f_pos) {
847 file->f_pos = offset;
848 file->f_version = 0;
849 }
850
851out:
852 mutex_unlock(&inode->i_mutex);
853 return offset;
854}
855
856const struct file_operations ceph_file_fops = {
857 .open = ceph_open,
858 .release = ceph_release,
859 .llseek = ceph_llseek,
860 .read = do_sync_read,
861 .write = do_sync_write,
862 .aio_read = ceph_aio_read,
863 .aio_write = ceph_aio_write,
864 .mmap = ceph_mmap,
865 .fsync = ceph_fsync,
866 .lock = ceph_lock,
867 .flock = ceph_flock,
868 .splice_read = generic_file_splice_read,
869 .splice_write = generic_file_splice_write,
870 .unlocked_ioctl = ceph_ioctl,
871 .compat_ioctl = ceph_ioctl,
872};
873
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/slab.h>
7#include <linux/file.h>
8#include <linux/mount.h>
9#include <linux/namei.h>
10#include <linux/writeback.h>
11#include <linux/falloc.h>
12
13#include "super.h"
14#include "mds_client.h"
15#include "cache.h"
16
17static __le32 ceph_flags_sys2wire(u32 flags)
18{
19 u32 wire_flags = 0;
20
21 switch (flags & O_ACCMODE) {
22 case O_RDONLY:
23 wire_flags |= CEPH_O_RDONLY;
24 break;
25 case O_WRONLY:
26 wire_flags |= CEPH_O_WRONLY;
27 break;
28 case O_RDWR:
29 wire_flags |= CEPH_O_RDWR;
30 break;
31 }
32
33 flags &= ~O_ACCMODE;
34
35#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
36
37 ceph_sys2wire(O_CREAT);
38 ceph_sys2wire(O_EXCL);
39 ceph_sys2wire(O_TRUNC);
40 ceph_sys2wire(O_DIRECTORY);
41 ceph_sys2wire(O_NOFOLLOW);
42
43#undef ceph_sys2wire
44
45 if (flags)
46 dout("unused open flags: %x\n", flags);
47
48 return cpu_to_le32(wire_flags);
49}
50
51/*
52 * Ceph file operations
53 *
54 * Implement basic open/close functionality, and implement
55 * read/write.
56 *
57 * We implement three modes of file I/O:
58 * - buffered uses the generic_file_aio_{read,write} helpers
59 *
60 * - synchronous is used when there is multi-client read/write
61 * sharing, avoids the page cache, and synchronously waits for an
62 * ack from the OSD.
63 *
64 * - direct io takes the variant of the sync path that references
65 * user pages directly.
66 *
67 * fsync() flushes and waits on dirty pages, but just queues metadata
68 * for writeback: since the MDS can recover size and mtime there is no
69 * need to wait for MDS acknowledgement.
70 */
71
72/*
73 * How many pages to get in one call to iov_iter_get_pages(). This
74 * determines the size of the on-stack array used as a buffer.
75 */
76#define ITER_GET_BVECS_PAGES 64
77
78static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
79 struct bio_vec *bvecs)
80{
81 size_t size = 0;
82 int bvec_idx = 0;
83
84 if (maxsize > iov_iter_count(iter))
85 maxsize = iov_iter_count(iter);
86
87 while (size < maxsize) {
88 struct page *pages[ITER_GET_BVECS_PAGES];
89 ssize_t bytes;
90 size_t start;
91 int idx = 0;
92
93 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
94 ITER_GET_BVECS_PAGES, &start);
95 if (bytes < 0)
96 return size ?: bytes;
97
98 iov_iter_advance(iter, bytes);
99 size += bytes;
100
101 for ( ; bytes; idx++, bvec_idx++) {
102 struct bio_vec bv = {
103 .bv_page = pages[idx],
104 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
105 .bv_offset = start,
106 };
107
108 bvecs[bvec_idx] = bv;
109 bytes -= bv.bv_len;
110 start = 0;
111 }
112 }
113
114 return size;
115}
116
117/*
118 * iov_iter_get_pages() only considers one iov_iter segment, no matter
119 * what maxsize or maxpages are given. For ITER_BVEC that is a single
120 * page.
121 *
122 * Attempt to get up to @maxsize bytes worth of pages from @iter.
123 * Return the number of bytes in the created bio_vec array, or an error.
124 */
125static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
126 struct bio_vec **bvecs, int *num_bvecs)
127{
128 struct bio_vec *bv;
129 size_t orig_count = iov_iter_count(iter);
130 ssize_t bytes;
131 int npages;
132
133 iov_iter_truncate(iter, maxsize);
134 npages = iov_iter_npages(iter, INT_MAX);
135 iov_iter_reexpand(iter, orig_count);
136
137 /*
138 * __iter_get_bvecs() may populate only part of the array -- zero it
139 * out.
140 */
141 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
142 if (!bv)
143 return -ENOMEM;
144
145 bytes = __iter_get_bvecs(iter, maxsize, bv);
146 if (bytes < 0) {
147 /*
148 * No pages were pinned -- just free the array.
149 */
150 kvfree(bv);
151 return bytes;
152 }
153
154 *bvecs = bv;
155 *num_bvecs = npages;
156 return bytes;
157}
158
159static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
160{
161 int i;
162
163 for (i = 0; i < num_bvecs; i++) {
164 if (bvecs[i].bv_page) {
165 if (should_dirty)
166 set_page_dirty_lock(bvecs[i].bv_page);
167 put_page(bvecs[i].bv_page);
168 }
169 }
170 kvfree(bvecs);
171}
172
173/*
174 * Prepare an open request. Preallocate ceph_cap to avoid an
175 * inopportune ENOMEM later.
176 */
177static struct ceph_mds_request *
178prepare_open_request(struct super_block *sb, int flags, int create_mode)
179{
180 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
181 struct ceph_mds_client *mdsc = fsc->mdsc;
182 struct ceph_mds_request *req;
183 int want_auth = USE_ANY_MDS;
184 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
185
186 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
187 want_auth = USE_AUTH_MDS;
188
189 req = ceph_mdsc_create_request(mdsc, op, want_auth);
190 if (IS_ERR(req))
191 goto out;
192 req->r_fmode = ceph_flags_to_mode(flags);
193 req->r_args.open.flags = ceph_flags_sys2wire(flags);
194 req->r_args.open.mode = cpu_to_le32(create_mode);
195out:
196 return req;
197}
198
199static int ceph_init_file_info(struct inode *inode, struct file *file,
200 int fmode, bool isdir)
201{
202 struct ceph_file_info *fi;
203
204 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
205 inode->i_mode, isdir ? "dir" : "regular");
206 BUG_ON(inode->i_fop->release != ceph_release);
207
208 if (isdir) {
209 struct ceph_dir_file_info *dfi =
210 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
211 if (!dfi) {
212 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
213 return -ENOMEM;
214 }
215
216 file->private_data = dfi;
217 fi = &dfi->file_info;
218 dfi->next_offset = 2;
219 dfi->readdir_cache_idx = -1;
220 } else {
221 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
222 if (!fi) {
223 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
224 return -ENOMEM;
225 }
226
227 file->private_data = fi;
228 }
229
230 fi->fmode = fmode;
231 spin_lock_init(&fi->rw_contexts_lock);
232 INIT_LIST_HEAD(&fi->rw_contexts);
233
234 return 0;
235}
236
237/*
238 * initialize private struct file data.
239 * if we fail, clean up by dropping fmode reference on the ceph_inode
240 */
241static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
242{
243 int ret = 0;
244
245 switch (inode->i_mode & S_IFMT) {
246 case S_IFREG:
247 ceph_fscache_register_inode_cookie(inode);
248 ceph_fscache_file_set_cookie(inode, file);
249 case S_IFDIR:
250 ret = ceph_init_file_info(inode, file, fmode,
251 S_ISDIR(inode->i_mode));
252 if (ret)
253 return ret;
254 break;
255
256 case S_IFLNK:
257 dout("init_file %p %p 0%o (symlink)\n", inode, file,
258 inode->i_mode);
259 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
260 break;
261
262 default:
263 dout("init_file %p %p 0%o (special)\n", inode, file,
264 inode->i_mode);
265 /*
266 * we need to drop the open ref now, since we don't
267 * have .release set to ceph_release.
268 */
269 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
270 BUG_ON(inode->i_fop->release == ceph_release);
271
272 /* call the proper open fop */
273 ret = inode->i_fop->open(inode, file);
274 }
275 return ret;
276}
277
278/*
279 * try renew caps after session gets killed.
280 */
281int ceph_renew_caps(struct inode *inode)
282{
283 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
284 struct ceph_inode_info *ci = ceph_inode(inode);
285 struct ceph_mds_request *req;
286 int err, flags, wanted;
287
288 spin_lock(&ci->i_ceph_lock);
289 wanted = __ceph_caps_file_wanted(ci);
290 if (__ceph_is_any_real_caps(ci) &&
291 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
292 int issued = __ceph_caps_issued(ci, NULL);
293 spin_unlock(&ci->i_ceph_lock);
294 dout("renew caps %p want %s issued %s updating mds_wanted\n",
295 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
296 ceph_check_caps(ci, 0, NULL);
297 return 0;
298 }
299 spin_unlock(&ci->i_ceph_lock);
300
301 flags = 0;
302 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
303 flags = O_RDWR;
304 else if (wanted & CEPH_CAP_FILE_RD)
305 flags = O_RDONLY;
306 else if (wanted & CEPH_CAP_FILE_WR)
307 flags = O_WRONLY;
308#ifdef O_LAZY
309 if (wanted & CEPH_CAP_FILE_LAZYIO)
310 flags |= O_LAZY;
311#endif
312
313 req = prepare_open_request(inode->i_sb, flags, 0);
314 if (IS_ERR(req)) {
315 err = PTR_ERR(req);
316 goto out;
317 }
318
319 req->r_inode = inode;
320 ihold(inode);
321 req->r_num_caps = 1;
322 req->r_fmode = -1;
323
324 err = ceph_mdsc_do_request(mdsc, NULL, req);
325 ceph_mdsc_put_request(req);
326out:
327 dout("renew caps %p open result=%d\n", inode, err);
328 return err < 0 ? err : 0;
329}
330
331/*
332 * If we already have the requisite capabilities, we can satisfy
333 * the open request locally (no need to request new caps from the
334 * MDS). We do, however, need to inform the MDS (asynchronously)
335 * if our wanted caps set expands.
336 */
337int ceph_open(struct inode *inode, struct file *file)
338{
339 struct ceph_inode_info *ci = ceph_inode(inode);
340 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
341 struct ceph_mds_client *mdsc = fsc->mdsc;
342 struct ceph_mds_request *req;
343 struct ceph_file_info *fi = file->private_data;
344 int err;
345 int flags, fmode, wanted;
346
347 if (fi) {
348 dout("open file %p is already opened\n", file);
349 return 0;
350 }
351
352 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
353 flags = file->f_flags & ~(O_CREAT|O_EXCL);
354 if (S_ISDIR(inode->i_mode))
355 flags = O_DIRECTORY; /* mds likes to know */
356
357 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
358 ceph_vinop(inode), file, flags, file->f_flags);
359 fmode = ceph_flags_to_mode(flags);
360 wanted = ceph_caps_for_mode(fmode);
361
362 /* snapped files are read-only */
363 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
364 return -EROFS;
365
366 /* trivially open snapdir */
367 if (ceph_snap(inode) == CEPH_SNAPDIR) {
368 spin_lock(&ci->i_ceph_lock);
369 __ceph_get_fmode(ci, fmode);
370 spin_unlock(&ci->i_ceph_lock);
371 return ceph_init_file(inode, file, fmode);
372 }
373
374 /*
375 * No need to block if we have caps on the auth MDS (for
376 * write) or any MDS (for read). Update wanted set
377 * asynchronously.
378 */
379 spin_lock(&ci->i_ceph_lock);
380 if (__ceph_is_any_real_caps(ci) &&
381 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
382 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
383 int issued = __ceph_caps_issued(ci, NULL);
384
385 dout("open %p fmode %d want %s issued %s using existing\n",
386 inode, fmode, ceph_cap_string(wanted),
387 ceph_cap_string(issued));
388 __ceph_get_fmode(ci, fmode);
389 spin_unlock(&ci->i_ceph_lock);
390
391 /* adjust wanted? */
392 if ((issued & wanted) != wanted &&
393 (mds_wanted & wanted) != wanted &&
394 ceph_snap(inode) != CEPH_SNAPDIR)
395 ceph_check_caps(ci, 0, NULL);
396
397 return ceph_init_file(inode, file, fmode);
398 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
399 (ci->i_snap_caps & wanted) == wanted) {
400 __ceph_get_fmode(ci, fmode);
401 spin_unlock(&ci->i_ceph_lock);
402 return ceph_init_file(inode, file, fmode);
403 }
404
405 spin_unlock(&ci->i_ceph_lock);
406
407 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
408 req = prepare_open_request(inode->i_sb, flags, 0);
409 if (IS_ERR(req)) {
410 err = PTR_ERR(req);
411 goto out;
412 }
413 req->r_inode = inode;
414 ihold(inode);
415
416 req->r_num_caps = 1;
417 err = ceph_mdsc_do_request(mdsc, NULL, req);
418 if (!err)
419 err = ceph_init_file(inode, file, req->r_fmode);
420 ceph_mdsc_put_request(req);
421 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
422out:
423 return err;
424}
425
426
427/*
428 * Do a lookup + open with a single request. If we get a non-existent
429 * file or symlink, return 1 so the VFS can retry.
430 */
431int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
432 struct file *file, unsigned flags, umode_t mode,
433 int *opened)
434{
435 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
436 struct ceph_mds_client *mdsc = fsc->mdsc;
437 struct ceph_mds_request *req;
438 struct dentry *dn;
439 struct ceph_acls_info acls = {};
440 int mask;
441 int err;
442
443 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
444 dir, dentry, dentry,
445 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
446
447 if (dentry->d_name.len > NAME_MAX)
448 return -ENAMETOOLONG;
449
450 if (flags & O_CREAT) {
451 if (ceph_quota_is_max_files_exceeded(dir))
452 return -EDQUOT;
453 err = ceph_pre_init_acls(dir, &mode, &acls);
454 if (err < 0)
455 return err;
456 }
457
458 /* do the open */
459 req = prepare_open_request(dir->i_sb, flags, mode);
460 if (IS_ERR(req)) {
461 err = PTR_ERR(req);
462 goto out_acl;
463 }
464 req->r_dentry = dget(dentry);
465 req->r_num_caps = 2;
466 if (flags & O_CREAT) {
467 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
468 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
469 if (acls.pagelist) {
470 req->r_pagelist = acls.pagelist;
471 acls.pagelist = NULL;
472 }
473 }
474
475 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
476 if (ceph_security_xattr_wanted(dir))
477 mask |= CEPH_CAP_XATTR_SHARED;
478 req->r_args.open.mask = cpu_to_le32(mask);
479
480 req->r_parent = dir;
481 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
482 err = ceph_mdsc_do_request(mdsc,
483 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
484 req);
485 err = ceph_handle_snapdir(req, dentry, err);
486 if (err)
487 goto out_req;
488
489 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
490 err = ceph_handle_notrace_create(dir, dentry);
491
492 if (d_in_lookup(dentry)) {
493 dn = ceph_finish_lookup(req, dentry, err);
494 if (IS_ERR(dn))
495 err = PTR_ERR(dn);
496 } else {
497 /* we were given a hashed negative dentry */
498 dn = NULL;
499 }
500 if (err)
501 goto out_req;
502 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
503 /* make vfs retry on splice, ENOENT, or symlink */
504 dout("atomic_open finish_no_open on dn %p\n", dn);
505 err = finish_no_open(file, dn);
506 } else {
507 dout("atomic_open finish_open on dn %p\n", dn);
508 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
509 ceph_init_inode_acls(d_inode(dentry), &acls);
510 *opened |= FILE_CREATED;
511 }
512 err = finish_open(file, dentry, ceph_open, opened);
513 }
514out_req:
515 if (!req->r_err && req->r_target_inode)
516 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
517 ceph_mdsc_put_request(req);
518out_acl:
519 ceph_release_acls_info(&acls);
520 dout("atomic_open result=%d\n", err);
521 return err;
522}
523
524int ceph_release(struct inode *inode, struct file *file)
525{
526 struct ceph_inode_info *ci = ceph_inode(inode);
527
528 if (S_ISDIR(inode->i_mode)) {
529 struct ceph_dir_file_info *dfi = file->private_data;
530 dout("release inode %p dir file %p\n", inode, file);
531 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
532
533 ceph_put_fmode(ci, dfi->file_info.fmode);
534
535 if (dfi->last_readdir)
536 ceph_mdsc_put_request(dfi->last_readdir);
537 kfree(dfi->last_name);
538 kfree(dfi->dir_info);
539 kmem_cache_free(ceph_dir_file_cachep, dfi);
540 } else {
541 struct ceph_file_info *fi = file->private_data;
542 dout("release inode %p regular file %p\n", inode, file);
543 WARN_ON(!list_empty(&fi->rw_contexts));
544
545 ceph_put_fmode(ci, fi->fmode);
546 kmem_cache_free(ceph_file_cachep, fi);
547 }
548
549 /* wake up anyone waiting for caps on this inode */
550 wake_up_all(&ci->i_cap_wq);
551 return 0;
552}
553
554enum {
555 HAVE_RETRIED = 1,
556 CHECK_EOF = 2,
557 READ_INLINE = 3,
558};
559
560/*
561 * Read a range of bytes striped over one or more objects. Iterate over
562 * objects we stripe over. (That's not atomic, but good enough for now.)
563 *
564 * If we get a short result from the OSD, check against i_size; we need to
565 * only return a short read to the caller if we hit EOF.
566 */
567static int striped_read(struct inode *inode,
568 u64 pos, u64 len,
569 struct page **pages, int num_pages,
570 int page_align, int *checkeof)
571{
572 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
573 struct ceph_inode_info *ci = ceph_inode(inode);
574 u64 this_len;
575 loff_t i_size;
576 int page_idx;
577 int ret, read = 0;
578 bool hit_stripe, was_short;
579
580 /*
581 * we may need to do multiple reads. not atomic, unfortunately.
582 */
583more:
584 this_len = len;
585 page_idx = (page_align + read) >> PAGE_SHIFT;
586 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
587 &ci->i_layout, pos, &this_len,
588 ci->i_truncate_seq, ci->i_truncate_size,
589 pages + page_idx, num_pages - page_idx,
590 ((page_align + read) & ~PAGE_MASK));
591 if (ret == -ENOENT)
592 ret = 0;
593 hit_stripe = this_len < len;
594 was_short = ret >= 0 && ret < this_len;
595 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
596 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
597
598 i_size = i_size_read(inode);
599 if (ret >= 0) {
600 if (was_short && (pos + ret < i_size)) {
601 int zlen = min(this_len - ret, i_size - pos - ret);
602 int zoff = page_align + read + ret;
603 dout(" zero gap %llu to %llu\n",
604 pos + ret, pos + ret + zlen);
605 ceph_zero_page_vector_range(zoff, zlen, pages);
606 ret += zlen;
607 }
608
609 read += ret;
610 pos += ret;
611 len -= ret;
612
613 /* hit stripe and need continue*/
614 if (len && hit_stripe && pos < i_size)
615 goto more;
616 }
617
618 if (read > 0) {
619 ret = read;
620 /* did we bounce off eof? */
621 if (pos + len > i_size)
622 *checkeof = CHECK_EOF;
623 }
624
625 dout("striped_read returns %d\n", ret);
626 return ret;
627}
628
629/*
630 * Completely synchronous read and write methods. Direct from __user
631 * buffer to osd, or directly to user pages (if O_DIRECT).
632 *
633 * If the read spans object boundary, just do multiple reads.
634 */
635static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
636 int *checkeof)
637{
638 struct file *file = iocb->ki_filp;
639 struct inode *inode = file_inode(file);
640 struct page **pages;
641 u64 off = iocb->ki_pos;
642 int num_pages;
643 ssize_t ret;
644 size_t len = iov_iter_count(to);
645
646 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
647 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
648
649 if (!len)
650 return 0;
651 /*
652 * flush any page cache pages in this range. this
653 * will make concurrent normal and sync io slow,
654 * but it will at least behave sensibly when they are
655 * in sequence.
656 */
657 ret = filemap_write_and_wait_range(inode->i_mapping, off,
658 off + len);
659 if (ret < 0)
660 return ret;
661
662 if (unlikely(to->type & ITER_PIPE)) {
663 size_t page_off;
664 ret = iov_iter_get_pages_alloc(to, &pages, len,
665 &page_off);
666 if (ret <= 0)
667 return -ENOMEM;
668 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
669
670 ret = striped_read(inode, off, ret, pages, num_pages,
671 page_off, checkeof);
672 if (ret > 0) {
673 iov_iter_advance(to, ret);
674 off += ret;
675 } else {
676 iov_iter_advance(to, 0);
677 }
678 ceph_put_page_vector(pages, num_pages, false);
679 } else {
680 num_pages = calc_pages_for(off, len);
681 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
682 if (IS_ERR(pages))
683 return PTR_ERR(pages);
684
685 ret = striped_read(inode, off, len, pages, num_pages,
686 (off & ~PAGE_MASK), checkeof);
687 if (ret > 0) {
688 int l, k = 0;
689 size_t left = ret;
690
691 while (left) {
692 size_t page_off = off & ~PAGE_MASK;
693 size_t copy = min_t(size_t, left,
694 PAGE_SIZE - page_off);
695 l = copy_page_to_iter(pages[k++], page_off,
696 copy, to);
697 off += l;
698 left -= l;
699 if (l < copy)
700 break;
701 }
702 }
703 ceph_release_page_vector(pages, num_pages);
704 }
705
706 if (off > iocb->ki_pos) {
707 ret = off - iocb->ki_pos;
708 iocb->ki_pos = off;
709 }
710
711 dout("sync_read result %zd\n", ret);
712 return ret;
713}
714
715struct ceph_aio_request {
716 struct kiocb *iocb;
717 size_t total_len;
718 bool write;
719 bool should_dirty;
720 int error;
721 struct list_head osd_reqs;
722 unsigned num_reqs;
723 atomic_t pending_reqs;
724 struct timespec mtime;
725 struct ceph_cap_flush *prealloc_cf;
726};
727
728struct ceph_aio_work {
729 struct work_struct work;
730 struct ceph_osd_request *req;
731};
732
733static void ceph_aio_retry_work(struct work_struct *work);
734
735static void ceph_aio_complete(struct inode *inode,
736 struct ceph_aio_request *aio_req)
737{
738 struct ceph_inode_info *ci = ceph_inode(inode);
739 int ret;
740
741 if (!atomic_dec_and_test(&aio_req->pending_reqs))
742 return;
743
744 ret = aio_req->error;
745 if (!ret)
746 ret = aio_req->total_len;
747
748 dout("ceph_aio_complete %p rc %d\n", inode, ret);
749
750 if (ret >= 0 && aio_req->write) {
751 int dirty;
752
753 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
754 if (endoff > i_size_read(inode)) {
755 if (ceph_inode_set_size(inode, endoff))
756 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
757 }
758
759 spin_lock(&ci->i_ceph_lock);
760 ci->i_inline_version = CEPH_INLINE_NONE;
761 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
762 &aio_req->prealloc_cf);
763 spin_unlock(&ci->i_ceph_lock);
764 if (dirty)
765 __mark_inode_dirty(inode, dirty);
766
767 }
768
769 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
770 CEPH_CAP_FILE_RD));
771
772 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
773
774 ceph_free_cap_flush(aio_req->prealloc_cf);
775 kfree(aio_req);
776}
777
778static void ceph_aio_complete_req(struct ceph_osd_request *req)
779{
780 int rc = req->r_result;
781 struct inode *inode = req->r_inode;
782 struct ceph_aio_request *aio_req = req->r_priv;
783 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
784
785 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
786 BUG_ON(!osd_data->num_bvecs);
787
788 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
789 inode, rc, osd_data->bvec_pos.iter.bi_size);
790
791 if (rc == -EOLDSNAPC) {
792 struct ceph_aio_work *aio_work;
793 BUG_ON(!aio_req->write);
794
795 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
796 if (aio_work) {
797 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
798 aio_work->req = req;
799 queue_work(ceph_inode_to_client(inode)->wb_wq,
800 &aio_work->work);
801 return;
802 }
803 rc = -ENOMEM;
804 } else if (!aio_req->write) {
805 if (rc == -ENOENT)
806 rc = 0;
807 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
808 struct iov_iter i;
809 int zlen = osd_data->bvec_pos.iter.bi_size - rc;
810
811 /*
812 * If read is satisfied by single OSD request,
813 * it can pass EOF. Otherwise read is within
814 * i_size.
815 */
816 if (aio_req->num_reqs == 1) {
817 loff_t i_size = i_size_read(inode);
818 loff_t endoff = aio_req->iocb->ki_pos + rc;
819 if (endoff < i_size)
820 zlen = min_t(size_t, zlen,
821 i_size - endoff);
822 aio_req->total_len = rc + zlen;
823 }
824
825 iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
826 osd_data->num_bvecs,
827 osd_data->bvec_pos.iter.bi_size);
828 iov_iter_advance(&i, rc);
829 iov_iter_zero(zlen, &i);
830 }
831 }
832
833 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
834 aio_req->should_dirty);
835 ceph_osdc_put_request(req);
836
837 if (rc < 0)
838 cmpxchg(&aio_req->error, 0, rc);
839
840 ceph_aio_complete(inode, aio_req);
841 return;
842}
843
844static void ceph_aio_retry_work(struct work_struct *work)
845{
846 struct ceph_aio_work *aio_work =
847 container_of(work, struct ceph_aio_work, work);
848 struct ceph_osd_request *orig_req = aio_work->req;
849 struct ceph_aio_request *aio_req = orig_req->r_priv;
850 struct inode *inode = orig_req->r_inode;
851 struct ceph_inode_info *ci = ceph_inode(inode);
852 struct ceph_snap_context *snapc;
853 struct ceph_osd_request *req;
854 int ret;
855
856 spin_lock(&ci->i_ceph_lock);
857 if (__ceph_have_pending_cap_snap(ci)) {
858 struct ceph_cap_snap *capsnap =
859 list_last_entry(&ci->i_cap_snaps,
860 struct ceph_cap_snap,
861 ci_item);
862 snapc = ceph_get_snap_context(capsnap->context);
863 } else {
864 BUG_ON(!ci->i_head_snapc);
865 snapc = ceph_get_snap_context(ci->i_head_snapc);
866 }
867 spin_unlock(&ci->i_ceph_lock);
868
869 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
870 false, GFP_NOFS);
871 if (!req) {
872 ret = -ENOMEM;
873 req = orig_req;
874 goto out;
875 }
876
877 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
878 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
879 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
880
881 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
882 if (ret) {
883 ceph_osdc_put_request(req);
884 req = orig_req;
885 goto out;
886 }
887
888 req->r_ops[0] = orig_req->r_ops[0];
889
890 req->r_mtime = aio_req->mtime;
891 req->r_data_offset = req->r_ops[0].extent.offset;
892
893 ceph_osdc_put_request(orig_req);
894
895 req->r_callback = ceph_aio_complete_req;
896 req->r_inode = inode;
897 req->r_priv = aio_req;
898 req->r_abort_on_full = true;
899
900 ret = ceph_osdc_start_request(req->r_osdc, req, false);
901out:
902 if (ret < 0) {
903 req->r_result = ret;
904 ceph_aio_complete_req(req);
905 }
906
907 ceph_put_snap_context(snapc);
908 kfree(aio_work);
909}
910
911static ssize_t
912ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
913 struct ceph_snap_context *snapc,
914 struct ceph_cap_flush **pcf)
915{
916 struct file *file = iocb->ki_filp;
917 struct inode *inode = file_inode(file);
918 struct ceph_inode_info *ci = ceph_inode(inode);
919 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
920 struct ceph_vino vino;
921 struct ceph_osd_request *req;
922 struct bio_vec *bvecs;
923 struct ceph_aio_request *aio_req = NULL;
924 int num_pages = 0;
925 int flags;
926 int ret;
927 struct timespec mtime = current_time(inode);
928 size_t count = iov_iter_count(iter);
929 loff_t pos = iocb->ki_pos;
930 bool write = iov_iter_rw(iter) == WRITE;
931 bool should_dirty = !write && iter_is_iovec(iter);
932
933 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
934 return -EROFS;
935
936 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
937 (write ? "write" : "read"), file, pos, (unsigned)count,
938 snapc, snapc->seq);
939
940 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
941 if (ret < 0)
942 return ret;
943
944 if (write) {
945 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
946 pos >> PAGE_SHIFT,
947 (pos + count) >> PAGE_SHIFT);
948 if (ret2 < 0)
949 dout("invalidate_inode_pages2_range returned %d\n", ret2);
950
951 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
952 } else {
953 flags = CEPH_OSD_FLAG_READ;
954 }
955
956 while (iov_iter_count(iter) > 0) {
957 u64 size = iov_iter_count(iter);
958 ssize_t len;
959
960 if (write)
961 size = min_t(u64, size, fsc->mount_options->wsize);
962 else
963 size = min_t(u64, size, fsc->mount_options->rsize);
964
965 vino = ceph_vino(inode);
966 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
967 vino, pos, &size, 0,
968 1,
969 write ? CEPH_OSD_OP_WRITE :
970 CEPH_OSD_OP_READ,
971 flags, snapc,
972 ci->i_truncate_seq,
973 ci->i_truncate_size,
974 false);
975 if (IS_ERR(req)) {
976 ret = PTR_ERR(req);
977 break;
978 }
979
980 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
981 if (len < 0) {
982 ceph_osdc_put_request(req);
983 ret = len;
984 break;
985 }
986 if (len != size)
987 osd_req_op_extent_update(req, 0, len);
988
989 /*
990 * To simplify error handling, allow AIO when IO within i_size
991 * or IO can be satisfied by single OSD request.
992 */
993 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
994 (len == count || pos + count <= i_size_read(inode))) {
995 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
996 if (aio_req) {
997 aio_req->iocb = iocb;
998 aio_req->write = write;
999 aio_req->should_dirty = should_dirty;
1000 INIT_LIST_HEAD(&aio_req->osd_reqs);
1001 if (write) {
1002 aio_req->mtime = mtime;
1003 swap(aio_req->prealloc_cf, *pcf);
1004 }
1005 }
1006 /* ignore error */
1007 }
1008
1009 if (write) {
1010 /*
1011 * throw out any page cache pages in this range. this
1012 * may block.
1013 */
1014 truncate_inode_pages_range(inode->i_mapping, pos,
1015 (pos+len) | (PAGE_SIZE - 1));
1016
1017 req->r_mtime = mtime;
1018 }
1019
1020 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1021
1022 if (aio_req) {
1023 aio_req->total_len += len;
1024 aio_req->num_reqs++;
1025 atomic_inc(&aio_req->pending_reqs);
1026
1027 req->r_callback = ceph_aio_complete_req;
1028 req->r_inode = inode;
1029 req->r_priv = aio_req;
1030 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
1031
1032 pos += len;
1033 continue;
1034 }
1035
1036 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1037 if (!ret)
1038 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1039
1040 size = i_size_read(inode);
1041 if (!write) {
1042 if (ret == -ENOENT)
1043 ret = 0;
1044 if (ret >= 0 && ret < len && pos + ret < size) {
1045 struct iov_iter i;
1046 int zlen = min_t(size_t, len - ret,
1047 size - pos - ret);
1048
1049 iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
1050 len);
1051 iov_iter_advance(&i, ret);
1052 iov_iter_zero(zlen, &i);
1053 ret += zlen;
1054 }
1055 if (ret >= 0)
1056 len = ret;
1057 }
1058
1059 put_bvecs(bvecs, num_pages, should_dirty);
1060 ceph_osdc_put_request(req);
1061 if (ret < 0)
1062 break;
1063
1064 pos += len;
1065 if (!write && pos >= size)
1066 break;
1067
1068 if (write && pos > size) {
1069 if (ceph_inode_set_size(inode, pos))
1070 ceph_check_caps(ceph_inode(inode),
1071 CHECK_CAPS_AUTHONLY,
1072 NULL);
1073 }
1074 }
1075
1076 if (aio_req) {
1077 LIST_HEAD(osd_reqs);
1078
1079 if (aio_req->num_reqs == 0) {
1080 kfree(aio_req);
1081 return ret;
1082 }
1083
1084 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1085 CEPH_CAP_FILE_RD);
1086
1087 list_splice(&aio_req->osd_reqs, &osd_reqs);
1088 while (!list_empty(&osd_reqs)) {
1089 req = list_first_entry(&osd_reqs,
1090 struct ceph_osd_request,
1091 r_unsafe_item);
1092 list_del_init(&req->r_unsafe_item);
1093 if (ret >= 0)
1094 ret = ceph_osdc_start_request(req->r_osdc,
1095 req, false);
1096 if (ret < 0) {
1097 req->r_result = ret;
1098 ceph_aio_complete_req(req);
1099 }
1100 }
1101 return -EIOCBQUEUED;
1102 }
1103
1104 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1105 ret = pos - iocb->ki_pos;
1106 iocb->ki_pos = pos;
1107 }
1108 return ret;
1109}
1110
1111/*
1112 * Synchronous write, straight from __user pointer or user pages.
1113 *
1114 * If write spans object boundary, just do multiple writes. (For a
1115 * correct atomic write, we should e.g. take write locks on all
1116 * objects, rollback on failure, etc.)
1117 */
1118static ssize_t
1119ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1120 struct ceph_snap_context *snapc)
1121{
1122 struct file *file = iocb->ki_filp;
1123 struct inode *inode = file_inode(file);
1124 struct ceph_inode_info *ci = ceph_inode(inode);
1125 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1126 struct ceph_vino vino;
1127 struct ceph_osd_request *req;
1128 struct page **pages;
1129 u64 len;
1130 int num_pages;
1131 int written = 0;
1132 int flags;
1133 int ret;
1134 bool check_caps = false;
1135 struct timespec mtime = current_time(inode);
1136 size_t count = iov_iter_count(from);
1137
1138 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1139 return -EROFS;
1140
1141 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1142 file, pos, (unsigned)count, snapc, snapc->seq);
1143
1144 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1145 if (ret < 0)
1146 return ret;
1147
1148 ret = invalidate_inode_pages2_range(inode->i_mapping,
1149 pos >> PAGE_SHIFT,
1150 (pos + count) >> PAGE_SHIFT);
1151 if (ret < 0)
1152 dout("invalidate_inode_pages2_range returned %d\n", ret);
1153
1154 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1155
1156 while ((len = iov_iter_count(from)) > 0) {
1157 size_t left;
1158 int n;
1159
1160 vino = ceph_vino(inode);
1161 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1162 vino, pos, &len, 0, 1,
1163 CEPH_OSD_OP_WRITE, flags, snapc,
1164 ci->i_truncate_seq,
1165 ci->i_truncate_size,
1166 false);
1167 if (IS_ERR(req)) {
1168 ret = PTR_ERR(req);
1169 break;
1170 }
1171
1172 /*
1173 * write from beginning of first page,
1174 * regardless of io alignment
1175 */
1176 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177
1178 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1179 if (IS_ERR(pages)) {
1180 ret = PTR_ERR(pages);
1181 goto out;
1182 }
1183
1184 left = len;
1185 for (n = 0; n < num_pages; n++) {
1186 size_t plen = min_t(size_t, left, PAGE_SIZE);
1187 ret = copy_page_from_iter(pages[n], 0, plen, from);
1188 if (ret != plen) {
1189 ret = -EFAULT;
1190 break;
1191 }
1192 left -= ret;
1193 }
1194
1195 if (ret < 0) {
1196 ceph_release_page_vector(pages, num_pages);
1197 goto out;
1198 }
1199
1200 req->r_inode = inode;
1201
1202 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1203 false, true);
1204
1205 req->r_mtime = mtime;
1206 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1207 if (!ret)
1208 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1209
1210out:
1211 ceph_osdc_put_request(req);
1212 if (ret != 0) {
1213 ceph_set_error_write(ci);
1214 break;
1215 }
1216
1217 ceph_clear_error_write(ci);
1218 pos += len;
1219 written += len;
1220 if (pos > i_size_read(inode)) {
1221 check_caps = ceph_inode_set_size(inode, pos);
1222 if (check_caps)
1223 ceph_check_caps(ceph_inode(inode),
1224 CHECK_CAPS_AUTHONLY,
1225 NULL);
1226 }
1227
1228 }
1229
1230 if (ret != -EOLDSNAPC && written > 0) {
1231 ret = written;
1232 iocb->ki_pos = pos;
1233 }
1234 return ret;
1235}
1236
1237/*
1238 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1239 * Atomically grab references, so that those bits are not released
1240 * back to the MDS mid-read.
1241 *
1242 * Hmm, the sync read case isn't actually async... should it be?
1243 */
1244static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1245{
1246 struct file *filp = iocb->ki_filp;
1247 struct ceph_file_info *fi = filp->private_data;
1248 size_t len = iov_iter_count(to);
1249 struct inode *inode = file_inode(filp);
1250 struct ceph_inode_info *ci = ceph_inode(inode);
1251 struct page *pinned_page = NULL;
1252 ssize_t ret;
1253 int want, got = 0;
1254 int retry_op = 0, read = 0;
1255
1256again:
1257 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1258 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1259
1260 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1261 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1262 else
1263 want = CEPH_CAP_FILE_CACHE;
1264 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1265 if (ret < 0)
1266 return ret;
1267
1268 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1269 (iocb->ki_flags & IOCB_DIRECT) ||
1270 (fi->flags & CEPH_F_SYNC)) {
1271
1272 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1273 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1274 ceph_cap_string(got));
1275
1276 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1277 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1278 ret = ceph_direct_read_write(iocb, to,
1279 NULL, NULL);
1280 if (ret >= 0 && ret < len)
1281 retry_op = CHECK_EOF;
1282 } else {
1283 ret = ceph_sync_read(iocb, to, &retry_op);
1284 }
1285 } else {
1286 retry_op = READ_INLINE;
1287 }
1288 } else {
1289 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1290 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1291 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1292 ceph_cap_string(got));
1293 ceph_add_rw_context(fi, &rw_ctx);
1294 ret = generic_file_read_iter(iocb, to);
1295 ceph_del_rw_context(fi, &rw_ctx);
1296 }
1297 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1298 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1299 if (pinned_page) {
1300 put_page(pinned_page);
1301 pinned_page = NULL;
1302 }
1303 ceph_put_cap_refs(ci, got);
1304 if (retry_op > HAVE_RETRIED && ret >= 0) {
1305 int statret;
1306 struct page *page = NULL;
1307 loff_t i_size;
1308 if (retry_op == READ_INLINE) {
1309 page = __page_cache_alloc(GFP_KERNEL);
1310 if (!page)
1311 return -ENOMEM;
1312 }
1313
1314 statret = __ceph_do_getattr(inode, page,
1315 CEPH_STAT_CAP_INLINE_DATA, !!page);
1316 if (statret < 0) {
1317 if (page)
1318 __free_page(page);
1319 if (statret == -ENODATA) {
1320 BUG_ON(retry_op != READ_INLINE);
1321 goto again;
1322 }
1323 return statret;
1324 }
1325
1326 i_size = i_size_read(inode);
1327 if (retry_op == READ_INLINE) {
1328 BUG_ON(ret > 0 || read > 0);
1329 if (iocb->ki_pos < i_size &&
1330 iocb->ki_pos < PAGE_SIZE) {
1331 loff_t end = min_t(loff_t, i_size,
1332 iocb->ki_pos + len);
1333 end = min_t(loff_t, end, PAGE_SIZE);
1334 if (statret < end)
1335 zero_user_segment(page, statret, end);
1336 ret = copy_page_to_iter(page,
1337 iocb->ki_pos & ~PAGE_MASK,
1338 end - iocb->ki_pos, to);
1339 iocb->ki_pos += ret;
1340 read += ret;
1341 }
1342 if (iocb->ki_pos < i_size && read < len) {
1343 size_t zlen = min_t(size_t, len - read,
1344 i_size - iocb->ki_pos);
1345 ret = iov_iter_zero(zlen, to);
1346 iocb->ki_pos += ret;
1347 read += ret;
1348 }
1349 __free_pages(page, 0);
1350 return read;
1351 }
1352
1353 /* hit EOF or hole? */
1354 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1355 ret < len) {
1356 dout("sync_read hit hole, ppos %lld < size %lld"
1357 ", reading more\n", iocb->ki_pos, i_size);
1358
1359 read += ret;
1360 len -= ret;
1361 retry_op = HAVE_RETRIED;
1362 goto again;
1363 }
1364 }
1365
1366 if (ret >= 0)
1367 ret += read;
1368
1369 return ret;
1370}
1371
1372/*
1373 * Take cap references to avoid releasing caps to MDS mid-write.
1374 *
1375 * If we are synchronous, and write with an old snap context, the OSD
1376 * may return EOLDSNAPC. In that case, retry the write.. _after_
1377 * dropping our cap refs and allowing the pending snap to logically
1378 * complete _before_ this write occurs.
1379 *
1380 * If we are near ENOSPC, write synchronously.
1381 */
1382static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1383{
1384 struct file *file = iocb->ki_filp;
1385 struct ceph_file_info *fi = file->private_data;
1386 struct inode *inode = file_inode(file);
1387 struct ceph_inode_info *ci = ceph_inode(inode);
1388 struct ceph_osd_client *osdc =
1389 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1390 struct ceph_cap_flush *prealloc_cf;
1391 ssize_t count, written = 0;
1392 int err, want, got;
1393 loff_t pos;
1394
1395 if (ceph_snap(inode) != CEPH_NOSNAP)
1396 return -EROFS;
1397
1398 prealloc_cf = ceph_alloc_cap_flush();
1399 if (!prealloc_cf)
1400 return -ENOMEM;
1401
1402retry_snap:
1403 inode_lock(inode);
1404
1405 /* We can write back this queue in page reclaim */
1406 current->backing_dev_info = inode_to_bdi(inode);
1407
1408 if (iocb->ki_flags & IOCB_APPEND) {
1409 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1410 if (err < 0)
1411 goto out;
1412 }
1413
1414 err = generic_write_checks(iocb, from);
1415 if (err <= 0)
1416 goto out;
1417
1418 pos = iocb->ki_pos;
1419 count = iov_iter_count(from);
1420 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1421 err = -EDQUOT;
1422 goto out;
1423 }
1424
1425 err = file_remove_privs(file);
1426 if (err)
1427 goto out;
1428
1429 err = file_update_time(file);
1430 if (err)
1431 goto out;
1432
1433 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1434 err = ceph_uninline_data(file, NULL);
1435 if (err < 0)
1436 goto out;
1437 }
1438
1439 /* FIXME: not complete since it doesn't account for being at quota */
1440 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1441 err = -ENOSPC;
1442 goto out;
1443 }
1444
1445 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1446 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1447 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1448 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1449 else
1450 want = CEPH_CAP_FILE_BUFFER;
1451 got = 0;
1452 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1453 &got, NULL);
1454 if (err < 0)
1455 goto out;
1456
1457 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1458 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1459
1460 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1461 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1462 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1463 struct ceph_snap_context *snapc;
1464 struct iov_iter data;
1465 inode_unlock(inode);
1466
1467 spin_lock(&ci->i_ceph_lock);
1468 if (__ceph_have_pending_cap_snap(ci)) {
1469 struct ceph_cap_snap *capsnap =
1470 list_last_entry(&ci->i_cap_snaps,
1471 struct ceph_cap_snap,
1472 ci_item);
1473 snapc = ceph_get_snap_context(capsnap->context);
1474 } else {
1475 BUG_ON(!ci->i_head_snapc);
1476 snapc = ceph_get_snap_context(ci->i_head_snapc);
1477 }
1478 spin_unlock(&ci->i_ceph_lock);
1479
1480 /* we might need to revert back to that point */
1481 data = *from;
1482 if (iocb->ki_flags & IOCB_DIRECT)
1483 written = ceph_direct_read_write(iocb, &data, snapc,
1484 &prealloc_cf);
1485 else
1486 written = ceph_sync_write(iocb, &data, pos, snapc);
1487 if (written > 0)
1488 iov_iter_advance(from, written);
1489 ceph_put_snap_context(snapc);
1490 } else {
1491 /*
1492 * No need to acquire the i_truncate_mutex. Because
1493 * the MDS revokes Fwb caps before sending truncate
1494 * message to us. We can't get Fwb cap while there
1495 * are pending vmtruncate. So write and vmtruncate
1496 * can not run at the same time
1497 */
1498 written = generic_perform_write(file, from, pos);
1499 if (likely(written >= 0))
1500 iocb->ki_pos = pos + written;
1501 inode_unlock(inode);
1502 }
1503
1504 if (written >= 0) {
1505 int dirty;
1506
1507 spin_lock(&ci->i_ceph_lock);
1508 ci->i_inline_version = CEPH_INLINE_NONE;
1509 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1510 &prealloc_cf);
1511 spin_unlock(&ci->i_ceph_lock);
1512 if (dirty)
1513 __mark_inode_dirty(inode, dirty);
1514 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1515 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1516 }
1517
1518 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1519 inode, ceph_vinop(inode), pos, (unsigned)count,
1520 ceph_cap_string(got));
1521 ceph_put_cap_refs(ci, got);
1522
1523 if (written == -EOLDSNAPC) {
1524 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1525 inode, ceph_vinop(inode), pos, (unsigned)count);
1526 goto retry_snap;
1527 }
1528
1529 if (written >= 0) {
1530 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1531 iocb->ki_flags |= IOCB_DSYNC;
1532 written = generic_write_sync(iocb, written);
1533 }
1534
1535 goto out_unlocked;
1536
1537out:
1538 inode_unlock(inode);
1539out_unlocked:
1540 ceph_free_cap_flush(prealloc_cf);
1541 current->backing_dev_info = NULL;
1542 return written ? written : err;
1543}
1544
1545/*
1546 * llseek. be sure to verify file size on SEEK_END.
1547 */
1548static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1549{
1550 struct inode *inode = file->f_mapping->host;
1551 loff_t i_size;
1552 loff_t ret;
1553
1554 inode_lock(inode);
1555
1556 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1557 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1558 if (ret < 0)
1559 goto out;
1560 }
1561
1562 i_size = i_size_read(inode);
1563 switch (whence) {
1564 case SEEK_END:
1565 offset += i_size;
1566 break;
1567 case SEEK_CUR:
1568 /*
1569 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1570 * position-querying operation. Avoid rewriting the "same"
1571 * f_pos value back to the file because a concurrent read(),
1572 * write() or lseek() might have altered it
1573 */
1574 if (offset == 0) {
1575 ret = file->f_pos;
1576 goto out;
1577 }
1578 offset += file->f_pos;
1579 break;
1580 case SEEK_DATA:
1581 if (offset < 0 || offset >= i_size) {
1582 ret = -ENXIO;
1583 goto out;
1584 }
1585 break;
1586 case SEEK_HOLE:
1587 if (offset < 0 || offset >= i_size) {
1588 ret = -ENXIO;
1589 goto out;
1590 }
1591 offset = i_size;
1592 break;
1593 }
1594
1595 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1596
1597out:
1598 inode_unlock(inode);
1599 return ret;
1600}
1601
1602static inline void ceph_zero_partial_page(
1603 struct inode *inode, loff_t offset, unsigned size)
1604{
1605 struct page *page;
1606 pgoff_t index = offset >> PAGE_SHIFT;
1607
1608 page = find_lock_page(inode->i_mapping, index);
1609 if (page) {
1610 wait_on_page_writeback(page);
1611 zero_user(page, offset & (PAGE_SIZE - 1), size);
1612 unlock_page(page);
1613 put_page(page);
1614 }
1615}
1616
1617static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1618 loff_t length)
1619{
1620 loff_t nearly = round_up(offset, PAGE_SIZE);
1621 if (offset < nearly) {
1622 loff_t size = nearly - offset;
1623 if (length < size)
1624 size = length;
1625 ceph_zero_partial_page(inode, offset, size);
1626 offset += size;
1627 length -= size;
1628 }
1629 if (length >= PAGE_SIZE) {
1630 loff_t size = round_down(length, PAGE_SIZE);
1631 truncate_pagecache_range(inode, offset, offset + size - 1);
1632 offset += size;
1633 length -= size;
1634 }
1635 if (length)
1636 ceph_zero_partial_page(inode, offset, length);
1637}
1638
1639static int ceph_zero_partial_object(struct inode *inode,
1640 loff_t offset, loff_t *length)
1641{
1642 struct ceph_inode_info *ci = ceph_inode(inode);
1643 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1644 struct ceph_osd_request *req;
1645 int ret = 0;
1646 loff_t zero = 0;
1647 int op;
1648
1649 if (!length) {
1650 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1651 length = &zero;
1652 } else {
1653 op = CEPH_OSD_OP_ZERO;
1654 }
1655
1656 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1657 ceph_vino(inode),
1658 offset, length,
1659 0, 1, op,
1660 CEPH_OSD_FLAG_WRITE,
1661 NULL, 0, 0, false);
1662 if (IS_ERR(req)) {
1663 ret = PTR_ERR(req);
1664 goto out;
1665 }
1666
1667 req->r_mtime = inode->i_mtime;
1668 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1669 if (!ret) {
1670 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1671 if (ret == -ENOENT)
1672 ret = 0;
1673 }
1674 ceph_osdc_put_request(req);
1675
1676out:
1677 return ret;
1678}
1679
1680static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1681{
1682 int ret = 0;
1683 struct ceph_inode_info *ci = ceph_inode(inode);
1684 s32 stripe_unit = ci->i_layout.stripe_unit;
1685 s32 stripe_count = ci->i_layout.stripe_count;
1686 s32 object_size = ci->i_layout.object_size;
1687 u64 object_set_size = object_size * stripe_count;
1688 u64 nearly, t;
1689
1690 /* round offset up to next period boundary */
1691 nearly = offset + object_set_size - 1;
1692 t = nearly;
1693 nearly -= do_div(t, object_set_size);
1694
1695 while (length && offset < nearly) {
1696 loff_t size = length;
1697 ret = ceph_zero_partial_object(inode, offset, &size);
1698 if (ret < 0)
1699 return ret;
1700 offset += size;
1701 length -= size;
1702 }
1703 while (length >= object_set_size) {
1704 int i;
1705 loff_t pos = offset;
1706 for (i = 0; i < stripe_count; ++i) {
1707 ret = ceph_zero_partial_object(inode, pos, NULL);
1708 if (ret < 0)
1709 return ret;
1710 pos += stripe_unit;
1711 }
1712 offset += object_set_size;
1713 length -= object_set_size;
1714 }
1715 while (length) {
1716 loff_t size = length;
1717 ret = ceph_zero_partial_object(inode, offset, &size);
1718 if (ret < 0)
1719 return ret;
1720 offset += size;
1721 length -= size;
1722 }
1723 return ret;
1724}
1725
1726static long ceph_fallocate(struct file *file, int mode,
1727 loff_t offset, loff_t length)
1728{
1729 struct ceph_file_info *fi = file->private_data;
1730 struct inode *inode = file_inode(file);
1731 struct ceph_inode_info *ci = ceph_inode(inode);
1732 struct ceph_osd_client *osdc =
1733 &ceph_inode_to_client(inode)->client->osdc;
1734 struct ceph_cap_flush *prealloc_cf;
1735 int want, got = 0;
1736 int dirty;
1737 int ret = 0;
1738 loff_t endoff = 0;
1739 loff_t size;
1740
1741 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1742 return -EOPNOTSUPP;
1743
1744 if (!S_ISREG(inode->i_mode))
1745 return -EOPNOTSUPP;
1746
1747 prealloc_cf = ceph_alloc_cap_flush();
1748 if (!prealloc_cf)
1749 return -ENOMEM;
1750
1751 inode_lock(inode);
1752
1753 if (ceph_snap(inode) != CEPH_NOSNAP) {
1754 ret = -EROFS;
1755 goto unlock;
1756 }
1757
1758 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
1759 ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
1760 ret = -EDQUOT;
1761 goto unlock;
1762 }
1763
1764 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1765 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1766 ret = -ENOSPC;
1767 goto unlock;
1768 }
1769
1770 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1771 ret = ceph_uninline_data(file, NULL);
1772 if (ret < 0)
1773 goto unlock;
1774 }
1775
1776 size = i_size_read(inode);
1777 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
1778 endoff = offset + length;
1779 ret = inode_newsize_ok(inode, endoff);
1780 if (ret)
1781 goto unlock;
1782 }
1783
1784 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1785 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1786 else
1787 want = CEPH_CAP_FILE_BUFFER;
1788
1789 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1790 if (ret < 0)
1791 goto unlock;
1792
1793 if (mode & FALLOC_FL_PUNCH_HOLE) {
1794 if (offset < size)
1795 ceph_zero_pagecache_range(inode, offset, length);
1796 ret = ceph_zero_objects(inode, offset, length);
1797 } else if (endoff > size) {
1798 truncate_pagecache_range(inode, size, -1);
1799 if (ceph_inode_set_size(inode, endoff))
1800 ceph_check_caps(ceph_inode(inode),
1801 CHECK_CAPS_AUTHONLY, NULL);
1802 }
1803
1804 if (!ret) {
1805 spin_lock(&ci->i_ceph_lock);
1806 ci->i_inline_version = CEPH_INLINE_NONE;
1807 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1808 &prealloc_cf);
1809 spin_unlock(&ci->i_ceph_lock);
1810 if (dirty)
1811 __mark_inode_dirty(inode, dirty);
1812 if ((endoff > size) &&
1813 ceph_quota_is_max_bytes_approaching(inode, endoff))
1814 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1815 }
1816
1817 ceph_put_cap_refs(ci, got);
1818unlock:
1819 inode_unlock(inode);
1820 ceph_free_cap_flush(prealloc_cf);
1821 return ret;
1822}
1823
1824const struct file_operations ceph_file_fops = {
1825 .open = ceph_open,
1826 .release = ceph_release,
1827 .llseek = ceph_llseek,
1828 .read_iter = ceph_read_iter,
1829 .write_iter = ceph_write_iter,
1830 .mmap = ceph_mmap,
1831 .fsync = ceph_fsync,
1832 .lock = ceph_lock,
1833 .flock = ceph_flock,
1834 .splice_read = generic_file_splice_read,
1835 .splice_write = iter_file_splice_write,
1836 .unlocked_ioctl = ceph_ioctl,
1837 .compat_ioctl = ceph_ioctl,
1838 .fallocate = ceph_fallocate,
1839};
1840