Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/mount.h>
8#include <linux/namei.h>
9#include <linux/writeback.h>
10#include <linux/falloc.h>
11
12#include "super.h"
13#include "mds_client.h"
14#include "cache.h"
15
16/*
17 * Ceph file operations
18 *
19 * Implement basic open/close functionality, and implement
20 * read/write.
21 *
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
24 *
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
27 * ack from the OSD.
28 *
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
31 *
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
35 */
36
37/*
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
40 */
41static size_t dio_get_pagev_size(const struct iov_iter *it)
42{
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
45 size_t size;
46
47 size = iov->iov_len - it->iov_offset;
48 /*
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
51 */
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54 size += iov->iov_len;
55 }
56 dout("dio_get_pagevlen len = %zu\n", size);
57 return size;
58}
59
60/*
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
64 */
65static struct page **
66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
68{
69 struct iov_iter tmp_it = *it;
70 size_t align;
71 struct page **pages;
72 int ret = 0, idx, npages;
73
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 (PAGE_SIZE - 1);
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 if (!pages) {
79 pages = vmalloc(sizeof(*pages) * npages);
80 if (!pages)
81 return ERR_PTR(-ENOMEM);
82 }
83
84 for (idx = 0; idx < npages; ) {
85 size_t start;
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
88 if (ret < 0)
89 goto fail;
90
91 iov_iter_advance(&tmp_it, ret);
92 nbytes -= ret;
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 }
95
96 BUG_ON(nbytes != 0);
97 *num_pages = npages;
98 *page_align = align;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 return pages;
101fail:
102 ceph_put_page_vector(pages, idx, false);
103 return ERR_PTR(ret);
104}
105
106/*
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
109 */
110static struct ceph_mds_request *
111prepare_open_request(struct super_block *sb, int flags, int create_mode)
112{
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
121
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 if (IS_ERR(req))
124 goto out;
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
128out:
129 return req;
130}
131
132/*
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
135 */
136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137{
138 struct ceph_file_info *cf;
139 int ret = 0;
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
143
144 switch (inode->i_mode & S_IFMT) {
145 case S_IFREG:
146 /* First file open request creates the cookie, we want to keep
147 * this cookie around for the filetime of the inode as not to
148 * have to worry about fscache register / revoke / operation
149 * races.
150 *
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
153 */
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
157 case S_IFDIR:
158 dout("init_file %p %p 0%o (regular)\n", inode, file,
159 inode->i_mode);
160 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
161 if (cf == NULL) {
162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
163 return -ENOMEM;
164 }
165 cf->fmode = fmode;
166 cf->next_offset = 2;
167 cf->readdir_cache_idx = -1;
168 file->private_data = cf;
169 BUG_ON(inode->i_fop->release != ceph_release);
170 break;
171
172 case S_IFLNK:
173 dout("init_file %p %p 0%o (symlink)\n", inode, file,
174 inode->i_mode);
175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
176 break;
177
178 default:
179 dout("init_file %p %p 0%o (special)\n", inode, file,
180 inode->i_mode);
181 /*
182 * we need to drop the open ref now, since we don't
183 * have .release set to ceph_release.
184 */
185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
186 BUG_ON(inode->i_fop->release == ceph_release);
187
188 /* call the proper open fop */
189 ret = inode->i_fop->open(inode, file);
190 }
191 return ret;
192}
193
194/*
195 * If we already have the requisite capabilities, we can satisfy
196 * the open request locally (no need to request new caps from the
197 * MDS). We do, however, need to inform the MDS (asynchronously)
198 * if our wanted caps set expands.
199 */
200int ceph_open(struct inode *inode, struct file *file)
201{
202 struct ceph_inode_info *ci = ceph_inode(inode);
203 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
204 struct ceph_mds_client *mdsc = fsc->mdsc;
205 struct ceph_mds_request *req;
206 struct ceph_file_info *cf = file->private_data;
207 int err;
208 int flags, fmode, wanted;
209
210 if (cf) {
211 dout("open file %p is already opened\n", file);
212 return 0;
213 }
214
215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
216 flags = file->f_flags & ~(O_CREAT|O_EXCL);
217 if (S_ISDIR(inode->i_mode))
218 flags = O_DIRECTORY; /* mds likes to know */
219
220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
221 ceph_vinop(inode), file, flags, file->f_flags);
222 fmode = ceph_flags_to_mode(flags);
223 wanted = ceph_caps_for_mode(fmode);
224
225 /* snapped files are read-only */
226 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
227 return -EROFS;
228
229 /* trivially open snapdir */
230 if (ceph_snap(inode) == CEPH_SNAPDIR) {
231 spin_lock(&ci->i_ceph_lock);
232 __ceph_get_fmode(ci, fmode);
233 spin_unlock(&ci->i_ceph_lock);
234 return ceph_init_file(inode, file, fmode);
235 }
236
237 /*
238 * No need to block if we have caps on the auth MDS (for
239 * write) or any MDS (for read). Update wanted set
240 * asynchronously.
241 */
242 spin_lock(&ci->i_ceph_lock);
243 if (__ceph_is_any_real_caps(ci) &&
244 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
245 int mds_wanted = __ceph_caps_mds_wanted(ci);
246 int issued = __ceph_caps_issued(ci, NULL);
247
248 dout("open %p fmode %d want %s issued %s using existing\n",
249 inode, fmode, ceph_cap_string(wanted),
250 ceph_cap_string(issued));
251 __ceph_get_fmode(ci, fmode);
252 spin_unlock(&ci->i_ceph_lock);
253
254 /* adjust wanted? */
255 if ((issued & wanted) != wanted &&
256 (mds_wanted & wanted) != wanted &&
257 ceph_snap(inode) != CEPH_SNAPDIR)
258 ceph_check_caps(ci, 0, NULL);
259
260 return ceph_init_file(inode, file, fmode);
261 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
262 (ci->i_snap_caps & wanted) == wanted) {
263 __ceph_get_fmode(ci, fmode);
264 spin_unlock(&ci->i_ceph_lock);
265 return ceph_init_file(inode, file, fmode);
266 }
267
268 spin_unlock(&ci->i_ceph_lock);
269
270 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
271 req = prepare_open_request(inode->i_sb, flags, 0);
272 if (IS_ERR(req)) {
273 err = PTR_ERR(req);
274 goto out;
275 }
276 req->r_inode = inode;
277 ihold(inode);
278
279 req->r_num_caps = 1;
280 err = ceph_mdsc_do_request(mdsc, NULL, req);
281 if (!err)
282 err = ceph_init_file(inode, file, req->r_fmode);
283 ceph_mdsc_put_request(req);
284 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
285out:
286 return err;
287}
288
289
290/*
291 * Do a lookup + open with a single request. If we get a non-existent
292 * file or symlink, return 1 so the VFS can retry.
293 */
294int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
295 struct file *file, unsigned flags, umode_t mode,
296 int *opened)
297{
298 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
299 struct ceph_mds_client *mdsc = fsc->mdsc;
300 struct ceph_mds_request *req;
301 struct dentry *dn;
302 struct ceph_acls_info acls = {};
303 int mask;
304 int err;
305
306 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
307 dir, dentry, dentry,
308 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
309
310 if (dentry->d_name.len > NAME_MAX)
311 return -ENAMETOOLONG;
312
313 err = ceph_init_dentry(dentry);
314 if (err < 0)
315 return err;
316
317 if (flags & O_CREAT) {
318 err = ceph_pre_init_acls(dir, &mode, &acls);
319 if (err < 0)
320 return err;
321 }
322
323 /* do the open */
324 req = prepare_open_request(dir->i_sb, flags, mode);
325 if (IS_ERR(req)) {
326 err = PTR_ERR(req);
327 goto out_acl;
328 }
329 req->r_dentry = dget(dentry);
330 req->r_num_caps = 2;
331 if (flags & O_CREAT) {
332 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
333 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
334 if (acls.pagelist) {
335 req->r_pagelist = acls.pagelist;
336 acls.pagelist = NULL;
337 }
338 }
339
340 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
341 if (ceph_security_xattr_wanted(dir))
342 mask |= CEPH_CAP_XATTR_SHARED;
343 req->r_args.open.mask = cpu_to_le32(mask);
344
345 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
346 err = ceph_mdsc_do_request(mdsc,
347 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
348 req);
349 err = ceph_handle_snapdir(req, dentry, err);
350 if (err)
351 goto out_req;
352
353 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
354 err = ceph_handle_notrace_create(dir, dentry);
355
356 if (d_unhashed(dentry)) {
357 dn = ceph_finish_lookup(req, dentry, err);
358 if (IS_ERR(dn))
359 err = PTR_ERR(dn);
360 } else {
361 /* we were given a hashed negative dentry */
362 dn = NULL;
363 }
364 if (err)
365 goto out_req;
366 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
367 /* make vfs retry on splice, ENOENT, or symlink */
368 dout("atomic_open finish_no_open on dn %p\n", dn);
369 err = finish_no_open(file, dn);
370 } else {
371 dout("atomic_open finish_open on dn %p\n", dn);
372 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
373 ceph_init_inode_acls(d_inode(dentry), &acls);
374 *opened |= FILE_CREATED;
375 }
376 err = finish_open(file, dentry, ceph_open, opened);
377 }
378out_req:
379 if (!req->r_err && req->r_target_inode)
380 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
381 ceph_mdsc_put_request(req);
382out_acl:
383 ceph_release_acls_info(&acls);
384 dout("atomic_open result=%d\n", err);
385 return err;
386}
387
388int ceph_release(struct inode *inode, struct file *file)
389{
390 struct ceph_inode_info *ci = ceph_inode(inode);
391 struct ceph_file_info *cf = file->private_data;
392
393 dout("release inode %p file %p\n", inode, file);
394 ceph_put_fmode(ci, cf->fmode);
395 if (cf->last_readdir)
396 ceph_mdsc_put_request(cf->last_readdir);
397 kfree(cf->last_name);
398 kfree(cf->dir_info);
399 kmem_cache_free(ceph_file_cachep, cf);
400
401 /* wake up anyone waiting for caps on this inode */
402 wake_up_all(&ci->i_cap_wq);
403 return 0;
404}
405
406enum {
407 HAVE_RETRIED = 1,
408 CHECK_EOF = 2,
409 READ_INLINE = 3,
410};
411
412/*
413 * Read a range of bytes striped over one or more objects. Iterate over
414 * objects we stripe over. (That's not atomic, but good enough for now.)
415 *
416 * If we get a short result from the OSD, check against i_size; we need to
417 * only return a short read to the caller if we hit EOF.
418 */
419static int striped_read(struct inode *inode,
420 u64 off, u64 len,
421 struct page **pages, int num_pages,
422 int *checkeof)
423{
424 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
425 struct ceph_inode_info *ci = ceph_inode(inode);
426 u64 pos, this_len, left;
427 loff_t i_size;
428 int page_align, pages_left;
429 int read, ret;
430 struct page **page_pos;
431 bool hit_stripe, was_short;
432
433 /*
434 * we may need to do multiple reads. not atomic, unfortunately.
435 */
436 pos = off;
437 left = len;
438 page_pos = pages;
439 pages_left = num_pages;
440 read = 0;
441
442more:
443 page_align = pos & ~PAGE_MASK;
444 this_len = left;
445 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
446 &ci->i_layout, pos, &this_len,
447 ci->i_truncate_seq,
448 ci->i_truncate_size,
449 page_pos, pages_left, page_align);
450 if (ret == -ENOENT)
451 ret = 0;
452 hit_stripe = this_len < left;
453 was_short = ret >= 0 && ret < this_len;
454 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
455 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
456
457 i_size = i_size_read(inode);
458 if (ret >= 0) {
459 int didpages;
460 if (was_short && (pos + ret < i_size)) {
461 int zlen = min(this_len - ret, i_size - pos - ret);
462 int zoff = (off & ~PAGE_MASK) + read + ret;
463 dout(" zero gap %llu to %llu\n",
464 pos + ret, pos + ret + zlen);
465 ceph_zero_page_vector_range(zoff, zlen, pages);
466 ret += zlen;
467 }
468
469 didpages = (page_align + ret) >> PAGE_SHIFT;
470 pos += ret;
471 read = pos - off;
472 left -= ret;
473 page_pos += didpages;
474 pages_left -= didpages;
475
476 /* hit stripe and need continue*/
477 if (left && hit_stripe && pos < i_size)
478 goto more;
479 }
480
481 if (read > 0) {
482 ret = read;
483 /* did we bounce off eof? */
484 if (pos + left > i_size)
485 *checkeof = CHECK_EOF;
486 }
487
488 dout("striped_read returns %d\n", ret);
489 return ret;
490}
491
492/*
493 * Completely synchronous read and write methods. Direct from __user
494 * buffer to osd, or directly to user pages (if O_DIRECT).
495 *
496 * If the read spans object boundary, just do multiple reads.
497 */
498static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
499 int *checkeof)
500{
501 struct file *file = iocb->ki_filp;
502 struct inode *inode = file_inode(file);
503 struct page **pages;
504 u64 off = iocb->ki_pos;
505 int num_pages, ret;
506 size_t len = iov_iter_count(i);
507
508 dout("sync_read on file %p %llu~%u %s\n", file, off,
509 (unsigned)len,
510 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
511
512 if (!len)
513 return 0;
514 /*
515 * flush any page cache pages in this range. this
516 * will make concurrent normal and sync io slow,
517 * but it will at least behave sensibly when they are
518 * in sequence.
519 */
520 ret = filemap_write_and_wait_range(inode->i_mapping, off,
521 off + len);
522 if (ret < 0)
523 return ret;
524
525 num_pages = calc_pages_for(off, len);
526 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
527 if (IS_ERR(pages))
528 return PTR_ERR(pages);
529 ret = striped_read(inode, off, len, pages,
530 num_pages, checkeof);
531 if (ret > 0) {
532 int l, k = 0;
533 size_t left = ret;
534
535 while (left) {
536 size_t page_off = off & ~PAGE_MASK;
537 size_t copy = min_t(size_t, left,
538 PAGE_SIZE - page_off);
539 l = copy_page_to_iter(pages[k++], page_off, copy, i);
540 off += l;
541 left -= l;
542 if (l < copy)
543 break;
544 }
545 }
546 ceph_release_page_vector(pages, num_pages);
547
548 if (off > iocb->ki_pos) {
549 ret = off - iocb->ki_pos;
550 iocb->ki_pos = off;
551 }
552
553 dout("sync_read result %d\n", ret);
554 return ret;
555}
556
557struct ceph_aio_request {
558 struct kiocb *iocb;
559 size_t total_len;
560 int write;
561 int error;
562 struct list_head osd_reqs;
563 unsigned num_reqs;
564 atomic_t pending_reqs;
565 struct timespec mtime;
566 struct ceph_cap_flush *prealloc_cf;
567};
568
569struct ceph_aio_work {
570 struct work_struct work;
571 struct ceph_osd_request *req;
572};
573
574static void ceph_aio_retry_work(struct work_struct *work);
575
576static void ceph_aio_complete(struct inode *inode,
577 struct ceph_aio_request *aio_req)
578{
579 struct ceph_inode_info *ci = ceph_inode(inode);
580 int ret;
581
582 if (!atomic_dec_and_test(&aio_req->pending_reqs))
583 return;
584
585 ret = aio_req->error;
586 if (!ret)
587 ret = aio_req->total_len;
588
589 dout("ceph_aio_complete %p rc %d\n", inode, ret);
590
591 if (ret >= 0 && aio_req->write) {
592 int dirty;
593
594 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
595 if (endoff > i_size_read(inode)) {
596 if (ceph_inode_set_size(inode, endoff))
597 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
598 }
599
600 spin_lock(&ci->i_ceph_lock);
601 ci->i_inline_version = CEPH_INLINE_NONE;
602 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
603 &aio_req->prealloc_cf);
604 spin_unlock(&ci->i_ceph_lock);
605 if (dirty)
606 __mark_inode_dirty(inode, dirty);
607
608 }
609
610 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
611 CEPH_CAP_FILE_RD));
612
613 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
614
615 ceph_free_cap_flush(aio_req->prealloc_cf);
616 kfree(aio_req);
617}
618
619static void ceph_aio_complete_req(struct ceph_osd_request *req,
620 struct ceph_msg *msg)
621{
622 int rc = req->r_result;
623 struct inode *inode = req->r_inode;
624 struct ceph_aio_request *aio_req = req->r_priv;
625 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
626 int num_pages = calc_pages_for((u64)osd_data->alignment,
627 osd_data->length);
628
629 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
630 inode, rc, osd_data->length);
631
632 if (rc == -EOLDSNAPC) {
633 struct ceph_aio_work *aio_work;
634 BUG_ON(!aio_req->write);
635
636 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
637 if (aio_work) {
638 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
639 aio_work->req = req;
640 queue_work(ceph_inode_to_client(inode)->wb_wq,
641 &aio_work->work);
642 return;
643 }
644 rc = -ENOMEM;
645 } else if (!aio_req->write) {
646 if (rc == -ENOENT)
647 rc = 0;
648 if (rc >= 0 && osd_data->length > rc) {
649 int zoff = osd_data->alignment + rc;
650 int zlen = osd_data->length - rc;
651 /*
652 * If read is satisfied by single OSD request,
653 * it can pass EOF. Otherwise read is within
654 * i_size.
655 */
656 if (aio_req->num_reqs == 1) {
657 loff_t i_size = i_size_read(inode);
658 loff_t endoff = aio_req->iocb->ki_pos + rc;
659 if (endoff < i_size)
660 zlen = min_t(size_t, zlen,
661 i_size - endoff);
662 aio_req->total_len = rc + zlen;
663 }
664
665 if (zlen > 0)
666 ceph_zero_page_vector_range(zoff, zlen,
667 osd_data->pages);
668 }
669 }
670
671 ceph_put_page_vector(osd_data->pages, num_pages, false);
672 ceph_osdc_put_request(req);
673
674 if (rc < 0)
675 cmpxchg(&aio_req->error, 0, rc);
676
677 ceph_aio_complete(inode, aio_req);
678 return;
679}
680
681static void ceph_aio_retry_work(struct work_struct *work)
682{
683 struct ceph_aio_work *aio_work =
684 container_of(work, struct ceph_aio_work, work);
685 struct ceph_osd_request *orig_req = aio_work->req;
686 struct ceph_aio_request *aio_req = orig_req->r_priv;
687 struct inode *inode = orig_req->r_inode;
688 struct ceph_inode_info *ci = ceph_inode(inode);
689 struct ceph_snap_context *snapc;
690 struct ceph_osd_request *req;
691 int ret;
692
693 spin_lock(&ci->i_ceph_lock);
694 if (__ceph_have_pending_cap_snap(ci)) {
695 struct ceph_cap_snap *capsnap =
696 list_last_entry(&ci->i_cap_snaps,
697 struct ceph_cap_snap,
698 ci_item);
699 snapc = ceph_get_snap_context(capsnap->context);
700 } else {
701 BUG_ON(!ci->i_head_snapc);
702 snapc = ceph_get_snap_context(ci->i_head_snapc);
703 }
704 spin_unlock(&ci->i_ceph_lock);
705
706 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
707 false, GFP_NOFS);
708 if (!req) {
709 ret = -ENOMEM;
710 req = orig_req;
711 goto out;
712 }
713
714 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
715 CEPH_OSD_FLAG_ONDISK |
716 CEPH_OSD_FLAG_WRITE;
717 req->r_base_oloc = orig_req->r_base_oloc;
718 req->r_base_oid = orig_req->r_base_oid;
719
720 req->r_ops[0] = orig_req->r_ops[0];
721 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
722
723 ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
724 snapc, CEPH_NOSNAP, &aio_req->mtime);
725
726 ceph_osdc_put_request(orig_req);
727
728 req->r_callback = ceph_aio_complete_req;
729 req->r_inode = inode;
730 req->r_priv = aio_req;
731
732 ret = ceph_osdc_start_request(req->r_osdc, req, false);
733out:
734 if (ret < 0) {
735 req->r_result = ret;
736 ceph_aio_complete_req(req, NULL);
737 }
738
739 ceph_put_snap_context(snapc);
740 kfree(aio_work);
741}
742
743/*
744 * Write commit request unsafe callback, called to tell us when a
745 * request is unsafe (that is, in flight--has been handed to the
746 * messenger to send to its target osd). It is called again when
747 * we've received a response message indicating the request is
748 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
749 * is completed early (and unsuccessfully) due to a timeout or
750 * interrupt.
751 *
752 * This is used if we requested both an ACK and ONDISK commit reply
753 * from the OSD.
754 */
755static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
756{
757 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
758
759 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
760 unsafe ? "un" : "");
761 if (unsafe) {
762 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
763 spin_lock(&ci->i_unsafe_lock);
764 list_add_tail(&req->r_unsafe_item,
765 &ci->i_unsafe_writes);
766 spin_unlock(&ci->i_unsafe_lock);
767 } else {
768 spin_lock(&ci->i_unsafe_lock);
769 list_del_init(&req->r_unsafe_item);
770 spin_unlock(&ci->i_unsafe_lock);
771 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
772 }
773}
774
775
776static ssize_t
777ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
778 struct ceph_snap_context *snapc,
779 struct ceph_cap_flush **pcf)
780{
781 struct file *file = iocb->ki_filp;
782 struct inode *inode = file_inode(file);
783 struct ceph_inode_info *ci = ceph_inode(inode);
784 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
785 struct ceph_vino vino;
786 struct ceph_osd_request *req;
787 struct page **pages;
788 struct ceph_aio_request *aio_req = NULL;
789 int num_pages = 0;
790 int flags;
791 int ret;
792 struct timespec mtime = current_fs_time(inode->i_sb);
793 size_t count = iov_iter_count(iter);
794 loff_t pos = iocb->ki_pos;
795 bool write = iov_iter_rw(iter) == WRITE;
796
797 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
798 return -EROFS;
799
800 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
801 (write ? "write" : "read"), file, pos, (unsigned)count);
802
803 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
804 if (ret < 0)
805 return ret;
806
807 if (write) {
808 ret = invalidate_inode_pages2_range(inode->i_mapping,
809 pos >> PAGE_SHIFT,
810 (pos + count) >> PAGE_SHIFT);
811 if (ret < 0)
812 dout("invalidate_inode_pages2_range returned %d\n", ret);
813
814 flags = CEPH_OSD_FLAG_ORDERSNAP |
815 CEPH_OSD_FLAG_ONDISK |
816 CEPH_OSD_FLAG_WRITE;
817 } else {
818 flags = CEPH_OSD_FLAG_READ;
819 }
820
821 while (iov_iter_count(iter) > 0) {
822 u64 size = dio_get_pagev_size(iter);
823 size_t start = 0;
824 ssize_t len;
825
826 vino = ceph_vino(inode);
827 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
828 vino, pos, &size, 0,
829 /*include a 'startsync' command*/
830 write ? 2 : 1,
831 write ? CEPH_OSD_OP_WRITE :
832 CEPH_OSD_OP_READ,
833 flags, snapc,
834 ci->i_truncate_seq,
835 ci->i_truncate_size,
836 false);
837 if (IS_ERR(req)) {
838 ret = PTR_ERR(req);
839 break;
840 }
841
842 len = size;
843 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
844 if (IS_ERR(pages)) {
845 ceph_osdc_put_request(req);
846 ret = PTR_ERR(pages);
847 break;
848 }
849
850 /*
851 * To simplify error handling, allow AIO when IO within i_size
852 * or IO can be satisfied by single OSD request.
853 */
854 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
855 (len == count || pos + count <= i_size_read(inode))) {
856 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
857 if (aio_req) {
858 aio_req->iocb = iocb;
859 aio_req->write = write;
860 INIT_LIST_HEAD(&aio_req->osd_reqs);
861 if (write) {
862 aio_req->mtime = mtime;
863 swap(aio_req->prealloc_cf, *pcf);
864 }
865 }
866 /* ignore error */
867 }
868
869 if (write) {
870 /*
871 * throw out any page cache pages in this range. this
872 * may block.
873 */
874 truncate_inode_pages_range(inode->i_mapping, pos,
875 (pos+len) | (PAGE_SIZE - 1));
876
877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
878 }
879
880
881 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
882 false, false);
883
884 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
885
886 if (aio_req) {
887 aio_req->total_len += len;
888 aio_req->num_reqs++;
889 atomic_inc(&aio_req->pending_reqs);
890
891 req->r_callback = ceph_aio_complete_req;
892 req->r_inode = inode;
893 req->r_priv = aio_req;
894 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
895
896 pos += len;
897 iov_iter_advance(iter, len);
898 continue;
899 }
900
901 ret = ceph_osdc_start_request(req->r_osdc, req, false);
902 if (!ret)
903 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
904
905 size = i_size_read(inode);
906 if (!write) {
907 if (ret == -ENOENT)
908 ret = 0;
909 if (ret >= 0 && ret < len && pos + ret < size) {
910 int zlen = min_t(size_t, len - ret,
911 size - pos - ret);
912 ceph_zero_page_vector_range(start + ret, zlen,
913 pages);
914 ret += zlen;
915 }
916 if (ret >= 0)
917 len = ret;
918 }
919
920 ceph_put_page_vector(pages, num_pages, false);
921
922 ceph_osdc_put_request(req);
923 if (ret < 0)
924 break;
925
926 pos += len;
927 iov_iter_advance(iter, len);
928
929 if (!write && pos >= size)
930 break;
931
932 if (write && pos > size) {
933 if (ceph_inode_set_size(inode, pos))
934 ceph_check_caps(ceph_inode(inode),
935 CHECK_CAPS_AUTHONLY,
936 NULL);
937 }
938 }
939
940 if (aio_req) {
941 if (aio_req->num_reqs == 0) {
942 kfree(aio_req);
943 return ret;
944 }
945
946 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
947 CEPH_CAP_FILE_RD);
948
949 while (!list_empty(&aio_req->osd_reqs)) {
950 req = list_first_entry(&aio_req->osd_reqs,
951 struct ceph_osd_request,
952 r_unsafe_item);
953 list_del_init(&req->r_unsafe_item);
954 if (ret >= 0)
955 ret = ceph_osdc_start_request(req->r_osdc,
956 req, false);
957 if (ret < 0) {
958 req->r_result = ret;
959 ceph_aio_complete_req(req, NULL);
960 }
961 }
962 return -EIOCBQUEUED;
963 }
964
965 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
966 ret = pos - iocb->ki_pos;
967 iocb->ki_pos = pos;
968 }
969 return ret;
970}
971
972/*
973 * Synchronous write, straight from __user pointer or user pages.
974 *
975 * If write spans object boundary, just do multiple writes. (For a
976 * correct atomic write, we should e.g. take write locks on all
977 * objects, rollback on failure, etc.)
978 */
979static ssize_t
980ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
981 struct ceph_snap_context *snapc)
982{
983 struct file *file = iocb->ki_filp;
984 struct inode *inode = file_inode(file);
985 struct ceph_inode_info *ci = ceph_inode(inode);
986 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
987 struct ceph_vino vino;
988 struct ceph_osd_request *req;
989 struct page **pages;
990 u64 len;
991 int num_pages;
992 int written = 0;
993 int flags;
994 int check_caps = 0;
995 int ret;
996 struct timespec mtime = current_fs_time(inode->i_sb);
997 size_t count = iov_iter_count(from);
998
999 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1000 return -EROFS;
1001
1002 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1003
1004 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1005 if (ret < 0)
1006 return ret;
1007
1008 ret = invalidate_inode_pages2_range(inode->i_mapping,
1009 pos >> PAGE_SHIFT,
1010 (pos + count) >> PAGE_SHIFT);
1011 if (ret < 0)
1012 dout("invalidate_inode_pages2_range returned %d\n", ret);
1013
1014 flags = CEPH_OSD_FLAG_ORDERSNAP |
1015 CEPH_OSD_FLAG_ONDISK |
1016 CEPH_OSD_FLAG_WRITE |
1017 CEPH_OSD_FLAG_ACK;
1018
1019 while ((len = iov_iter_count(from)) > 0) {
1020 size_t left;
1021 int n;
1022
1023 vino = ceph_vino(inode);
1024 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1025 vino, pos, &len, 0, 1,
1026 CEPH_OSD_OP_WRITE, flags, snapc,
1027 ci->i_truncate_seq,
1028 ci->i_truncate_size,
1029 false);
1030 if (IS_ERR(req)) {
1031 ret = PTR_ERR(req);
1032 break;
1033 }
1034
1035 /*
1036 * write from beginning of first page,
1037 * regardless of io alignment
1038 */
1039 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1040
1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1042 if (IS_ERR(pages)) {
1043 ret = PTR_ERR(pages);
1044 goto out;
1045 }
1046
1047 left = len;
1048 for (n = 0; n < num_pages; n++) {
1049 size_t plen = min_t(size_t, left, PAGE_SIZE);
1050 ret = copy_page_from_iter(pages[n], 0, plen, from);
1051 if (ret != plen) {
1052 ret = -EFAULT;
1053 break;
1054 }
1055 left -= ret;
1056 }
1057
1058 if (ret < 0) {
1059 ceph_release_page_vector(pages, num_pages);
1060 goto out;
1061 }
1062
1063 /* get a second commit callback */
1064 req->r_unsafe_callback = ceph_sync_write_unsafe;
1065 req->r_inode = inode;
1066
1067 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1068 false, true);
1069
1070 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
1071 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
1072
1073 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1074 if (!ret)
1075 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1076
1077out:
1078 ceph_osdc_put_request(req);
1079 if (ret == 0) {
1080 pos += len;
1081 written += len;
1082
1083 if (pos > i_size_read(inode)) {
1084 check_caps = ceph_inode_set_size(inode, pos);
1085 if (check_caps)
1086 ceph_check_caps(ceph_inode(inode),
1087 CHECK_CAPS_AUTHONLY,
1088 NULL);
1089 }
1090 } else
1091 break;
1092 }
1093
1094 if (ret != -EOLDSNAPC && written > 0) {
1095 ret = written;
1096 iocb->ki_pos = pos;
1097 }
1098 return ret;
1099}
1100
1101/*
1102 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1103 * Atomically grab references, so that those bits are not released
1104 * back to the MDS mid-read.
1105 *
1106 * Hmm, the sync read case isn't actually async... should it be?
1107 */
1108static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1109{
1110 struct file *filp = iocb->ki_filp;
1111 struct ceph_file_info *fi = filp->private_data;
1112 size_t len = iov_iter_count(to);
1113 struct inode *inode = file_inode(filp);
1114 struct ceph_inode_info *ci = ceph_inode(inode);
1115 struct page *pinned_page = NULL;
1116 ssize_t ret;
1117 int want, got = 0;
1118 int retry_op = 0, read = 0;
1119
1120again:
1121 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1122 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1123
1124 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1125 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1126 else
1127 want = CEPH_CAP_FILE_CACHE;
1128 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1129 if (ret < 0)
1130 return ret;
1131
1132 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1133 (iocb->ki_flags & IOCB_DIRECT) ||
1134 (fi->flags & CEPH_F_SYNC)) {
1135
1136 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1137 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1138 ceph_cap_string(got));
1139
1140 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1141 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1142 ret = ceph_direct_read_write(iocb, to,
1143 NULL, NULL);
1144 if (ret >= 0 && ret < len)
1145 retry_op = CHECK_EOF;
1146 } else {
1147 ret = ceph_sync_read(iocb, to, &retry_op);
1148 }
1149 } else {
1150 retry_op = READ_INLINE;
1151 }
1152 } else {
1153 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1154 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1155 ceph_cap_string(got));
1156
1157 ret = generic_file_read_iter(iocb, to);
1158 }
1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1161 if (pinned_page) {
1162 put_page(pinned_page);
1163 pinned_page = NULL;
1164 }
1165 ceph_put_cap_refs(ci, got);
1166 if (retry_op > HAVE_RETRIED && ret >= 0) {
1167 int statret;
1168 struct page *page = NULL;
1169 loff_t i_size;
1170 if (retry_op == READ_INLINE) {
1171 page = __page_cache_alloc(GFP_KERNEL);
1172 if (!page)
1173 return -ENOMEM;
1174 }
1175
1176 statret = __ceph_do_getattr(inode, page,
1177 CEPH_STAT_CAP_INLINE_DATA, !!page);
1178 if (statret < 0) {
1179 __free_page(page);
1180 if (statret == -ENODATA) {
1181 BUG_ON(retry_op != READ_INLINE);
1182 goto again;
1183 }
1184 return statret;
1185 }
1186
1187 i_size = i_size_read(inode);
1188 if (retry_op == READ_INLINE) {
1189 BUG_ON(ret > 0 || read > 0);
1190 if (iocb->ki_pos < i_size &&
1191 iocb->ki_pos < PAGE_SIZE) {
1192 loff_t end = min_t(loff_t, i_size,
1193 iocb->ki_pos + len);
1194 end = min_t(loff_t, end, PAGE_SIZE);
1195 if (statret < end)
1196 zero_user_segment(page, statret, end);
1197 ret = copy_page_to_iter(page,
1198 iocb->ki_pos & ~PAGE_MASK,
1199 end - iocb->ki_pos, to);
1200 iocb->ki_pos += ret;
1201 read += ret;
1202 }
1203 if (iocb->ki_pos < i_size && read < len) {
1204 size_t zlen = min_t(size_t, len - read,
1205 i_size - iocb->ki_pos);
1206 ret = iov_iter_zero(zlen, to);
1207 iocb->ki_pos += ret;
1208 read += ret;
1209 }
1210 __free_pages(page, 0);
1211 return read;
1212 }
1213
1214 /* hit EOF or hole? */
1215 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1216 ret < len) {
1217 dout("sync_read hit hole, ppos %lld < size %lld"
1218 ", reading more\n", iocb->ki_pos, i_size);
1219
1220 read += ret;
1221 len -= ret;
1222 retry_op = HAVE_RETRIED;
1223 goto again;
1224 }
1225 }
1226
1227 if (ret >= 0)
1228 ret += read;
1229
1230 return ret;
1231}
1232
1233/*
1234 * Take cap references to avoid releasing caps to MDS mid-write.
1235 *
1236 * If we are synchronous, and write with an old snap context, the OSD
1237 * may return EOLDSNAPC. In that case, retry the write.. _after_
1238 * dropping our cap refs and allowing the pending snap to logically
1239 * complete _before_ this write occurs.
1240 *
1241 * If we are near ENOSPC, write synchronously.
1242 */
1243static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1244{
1245 struct file *file = iocb->ki_filp;
1246 struct ceph_file_info *fi = file->private_data;
1247 struct inode *inode = file_inode(file);
1248 struct ceph_inode_info *ci = ceph_inode(inode);
1249 struct ceph_osd_client *osdc =
1250 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1251 struct ceph_cap_flush *prealloc_cf;
1252 ssize_t count, written = 0;
1253 int err, want, got;
1254 loff_t pos;
1255
1256 if (ceph_snap(inode) != CEPH_NOSNAP)
1257 return -EROFS;
1258
1259 prealloc_cf = ceph_alloc_cap_flush();
1260 if (!prealloc_cf)
1261 return -ENOMEM;
1262
1263 inode_lock(inode);
1264
1265 /* We can write back this queue in page reclaim */
1266 current->backing_dev_info = inode_to_bdi(inode);
1267
1268 if (iocb->ki_flags & IOCB_APPEND) {
1269 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1270 if (err < 0)
1271 goto out;
1272 }
1273
1274 err = generic_write_checks(iocb, from);
1275 if (err <= 0)
1276 goto out;
1277
1278 pos = iocb->ki_pos;
1279 count = iov_iter_count(from);
1280 err = file_remove_privs(file);
1281 if (err)
1282 goto out;
1283
1284 err = file_update_time(file);
1285 if (err)
1286 goto out;
1287
1288 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1289 err = ceph_uninline_data(file, NULL);
1290 if (err < 0)
1291 goto out;
1292 }
1293
1294retry_snap:
1295 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
1296 err = -ENOSPC;
1297 goto out;
1298 }
1299
1300 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1301 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1302 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1303 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1304 else
1305 want = CEPH_CAP_FILE_BUFFER;
1306 got = 0;
1307 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1308 &got, NULL);
1309 if (err < 0)
1310 goto out;
1311
1312 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1313 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1314
1315 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1316 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1317 struct ceph_snap_context *snapc;
1318 struct iov_iter data;
1319 inode_unlock(inode);
1320
1321 spin_lock(&ci->i_ceph_lock);
1322 if (__ceph_have_pending_cap_snap(ci)) {
1323 struct ceph_cap_snap *capsnap =
1324 list_last_entry(&ci->i_cap_snaps,
1325 struct ceph_cap_snap,
1326 ci_item);
1327 snapc = ceph_get_snap_context(capsnap->context);
1328 } else {
1329 BUG_ON(!ci->i_head_snapc);
1330 snapc = ceph_get_snap_context(ci->i_head_snapc);
1331 }
1332 spin_unlock(&ci->i_ceph_lock);
1333
1334 /* we might need to revert back to that point */
1335 data = *from;
1336 if (iocb->ki_flags & IOCB_DIRECT)
1337 written = ceph_direct_read_write(iocb, &data, snapc,
1338 &prealloc_cf);
1339 else
1340 written = ceph_sync_write(iocb, &data, pos, snapc);
1341 if (written == -EOLDSNAPC) {
1342 dout("aio_write %p %llx.%llx %llu~%u"
1343 "got EOLDSNAPC, retrying\n",
1344 inode, ceph_vinop(inode),
1345 pos, (unsigned)count);
1346 inode_lock(inode);
1347 goto retry_snap;
1348 }
1349 if (written > 0)
1350 iov_iter_advance(from, written);
1351 ceph_put_snap_context(snapc);
1352 } else {
1353 loff_t old_size = i_size_read(inode);
1354 /*
1355 * No need to acquire the i_truncate_mutex. Because
1356 * the MDS revokes Fwb caps before sending truncate
1357 * message to us. We can't get Fwb cap while there
1358 * are pending vmtruncate. So write and vmtruncate
1359 * can not run at the same time
1360 */
1361 written = generic_perform_write(file, from, pos);
1362 if (likely(written >= 0))
1363 iocb->ki_pos = pos + written;
1364 if (i_size_read(inode) > old_size)
1365 ceph_fscache_update_objectsize(inode);
1366 inode_unlock(inode);
1367 }
1368
1369 if (written >= 0) {
1370 int dirty;
1371 spin_lock(&ci->i_ceph_lock);
1372 ci->i_inline_version = CEPH_INLINE_NONE;
1373 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1374 &prealloc_cf);
1375 spin_unlock(&ci->i_ceph_lock);
1376 if (dirty)
1377 __mark_inode_dirty(inode, dirty);
1378 }
1379
1380 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1381 inode, ceph_vinop(inode), pos, (unsigned)count,
1382 ceph_cap_string(got));
1383 ceph_put_cap_refs(ci, got);
1384
1385 if (written >= 0 &&
1386 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1387 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
1388 err = vfs_fsync_range(file, pos, pos + written - 1, 1);
1389 if (err < 0)
1390 written = err;
1391 }
1392
1393 goto out_unlocked;
1394
1395out:
1396 inode_unlock(inode);
1397out_unlocked:
1398 ceph_free_cap_flush(prealloc_cf);
1399 current->backing_dev_info = NULL;
1400 return written ? written : err;
1401}
1402
1403/*
1404 * llseek. be sure to verify file size on SEEK_END.
1405 */
1406static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1407{
1408 struct inode *inode = file->f_mapping->host;
1409 loff_t i_size;
1410 int ret;
1411
1412 inode_lock(inode);
1413
1414 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1415 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1416 if (ret < 0) {
1417 offset = ret;
1418 goto out;
1419 }
1420 }
1421
1422 i_size = i_size_read(inode);
1423 switch (whence) {
1424 case SEEK_END:
1425 offset += i_size;
1426 break;
1427 case SEEK_CUR:
1428 /*
1429 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1430 * position-querying operation. Avoid rewriting the "same"
1431 * f_pos value back to the file because a concurrent read(),
1432 * write() or lseek() might have altered it
1433 */
1434 if (offset == 0) {
1435 offset = file->f_pos;
1436 goto out;
1437 }
1438 offset += file->f_pos;
1439 break;
1440 case SEEK_DATA:
1441 if (offset >= i_size) {
1442 ret = -ENXIO;
1443 goto out;
1444 }
1445 break;
1446 case SEEK_HOLE:
1447 if (offset >= i_size) {
1448 ret = -ENXIO;
1449 goto out;
1450 }
1451 offset = i_size;
1452 break;
1453 }
1454
1455 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1456
1457out:
1458 inode_unlock(inode);
1459 return offset;
1460}
1461
1462static inline void ceph_zero_partial_page(
1463 struct inode *inode, loff_t offset, unsigned size)
1464{
1465 struct page *page;
1466 pgoff_t index = offset >> PAGE_SHIFT;
1467
1468 page = find_lock_page(inode->i_mapping, index);
1469 if (page) {
1470 wait_on_page_writeback(page);
1471 zero_user(page, offset & (PAGE_SIZE - 1), size);
1472 unlock_page(page);
1473 put_page(page);
1474 }
1475}
1476
1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1478 loff_t length)
1479{
1480 loff_t nearly = round_up(offset, PAGE_SIZE);
1481 if (offset < nearly) {
1482 loff_t size = nearly - offset;
1483 if (length < size)
1484 size = length;
1485 ceph_zero_partial_page(inode, offset, size);
1486 offset += size;
1487 length -= size;
1488 }
1489 if (length >= PAGE_SIZE) {
1490 loff_t size = round_down(length, PAGE_SIZE);
1491 truncate_pagecache_range(inode, offset, offset + size - 1);
1492 offset += size;
1493 length -= size;
1494 }
1495 if (length)
1496 ceph_zero_partial_page(inode, offset, length);
1497}
1498
1499static int ceph_zero_partial_object(struct inode *inode,
1500 loff_t offset, loff_t *length)
1501{
1502 struct ceph_inode_info *ci = ceph_inode(inode);
1503 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1504 struct ceph_osd_request *req;
1505 int ret = 0;
1506 loff_t zero = 0;
1507 int op;
1508
1509 if (!length) {
1510 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1511 length = &zero;
1512 } else {
1513 op = CEPH_OSD_OP_ZERO;
1514 }
1515
1516 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1517 ceph_vino(inode),
1518 offset, length,
1519 0, 1, op,
1520 CEPH_OSD_FLAG_WRITE |
1521 CEPH_OSD_FLAG_ONDISK,
1522 NULL, 0, 0, false);
1523 if (IS_ERR(req)) {
1524 ret = PTR_ERR(req);
1525 goto out;
1526 }
1527
1528 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1529 &inode->i_mtime);
1530
1531 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1532 if (!ret) {
1533 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1534 if (ret == -ENOENT)
1535 ret = 0;
1536 }
1537 ceph_osdc_put_request(req);
1538
1539out:
1540 return ret;
1541}
1542
1543static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1544{
1545 int ret = 0;
1546 struct ceph_inode_info *ci = ceph_inode(inode);
1547 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1548 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1549 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1550 u64 object_set_size = object_size * stripe_count;
1551 u64 nearly, t;
1552
1553 /* round offset up to next period boundary */
1554 nearly = offset + object_set_size - 1;
1555 t = nearly;
1556 nearly -= do_div(t, object_set_size);
1557
1558 while (length && offset < nearly) {
1559 loff_t size = length;
1560 ret = ceph_zero_partial_object(inode, offset, &size);
1561 if (ret < 0)
1562 return ret;
1563 offset += size;
1564 length -= size;
1565 }
1566 while (length >= object_set_size) {
1567 int i;
1568 loff_t pos = offset;
1569 for (i = 0; i < stripe_count; ++i) {
1570 ret = ceph_zero_partial_object(inode, pos, NULL);
1571 if (ret < 0)
1572 return ret;
1573 pos += stripe_unit;
1574 }
1575 offset += object_set_size;
1576 length -= object_set_size;
1577 }
1578 while (length) {
1579 loff_t size = length;
1580 ret = ceph_zero_partial_object(inode, offset, &size);
1581 if (ret < 0)
1582 return ret;
1583 offset += size;
1584 length -= size;
1585 }
1586 return ret;
1587}
1588
1589static long ceph_fallocate(struct file *file, int mode,
1590 loff_t offset, loff_t length)
1591{
1592 struct ceph_file_info *fi = file->private_data;
1593 struct inode *inode = file_inode(file);
1594 struct ceph_inode_info *ci = ceph_inode(inode);
1595 struct ceph_osd_client *osdc =
1596 &ceph_inode_to_client(inode)->client->osdc;
1597 struct ceph_cap_flush *prealloc_cf;
1598 int want, got = 0;
1599 int dirty;
1600 int ret = 0;
1601 loff_t endoff = 0;
1602 loff_t size;
1603
1604 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1605 return -EOPNOTSUPP;
1606
1607 if (!S_ISREG(inode->i_mode))
1608 return -EOPNOTSUPP;
1609
1610 prealloc_cf = ceph_alloc_cap_flush();
1611 if (!prealloc_cf)
1612 return -ENOMEM;
1613
1614 inode_lock(inode);
1615
1616 if (ceph_snap(inode) != CEPH_NOSNAP) {
1617 ret = -EROFS;
1618 goto unlock;
1619 }
1620
1621 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1622 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1623 ret = -ENOSPC;
1624 goto unlock;
1625 }
1626
1627 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1628 ret = ceph_uninline_data(file, NULL);
1629 if (ret < 0)
1630 goto unlock;
1631 }
1632
1633 size = i_size_read(inode);
1634 if (!(mode & FALLOC_FL_KEEP_SIZE))
1635 endoff = offset + length;
1636
1637 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1638 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1639 else
1640 want = CEPH_CAP_FILE_BUFFER;
1641
1642 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1643 if (ret < 0)
1644 goto unlock;
1645
1646 if (mode & FALLOC_FL_PUNCH_HOLE) {
1647 if (offset < size)
1648 ceph_zero_pagecache_range(inode, offset, length);
1649 ret = ceph_zero_objects(inode, offset, length);
1650 } else if (endoff > size) {
1651 truncate_pagecache_range(inode, size, -1);
1652 if (ceph_inode_set_size(inode, endoff))
1653 ceph_check_caps(ceph_inode(inode),
1654 CHECK_CAPS_AUTHONLY, NULL);
1655 }
1656
1657 if (!ret) {
1658 spin_lock(&ci->i_ceph_lock);
1659 ci->i_inline_version = CEPH_INLINE_NONE;
1660 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1661 &prealloc_cf);
1662 spin_unlock(&ci->i_ceph_lock);
1663 if (dirty)
1664 __mark_inode_dirty(inode, dirty);
1665 }
1666
1667 ceph_put_cap_refs(ci, got);
1668unlock:
1669 inode_unlock(inode);
1670 ceph_free_cap_flush(prealloc_cf);
1671 return ret;
1672}
1673
1674const struct file_operations ceph_file_fops = {
1675 .open = ceph_open,
1676 .release = ceph_release,
1677 .llseek = ceph_llseek,
1678 .read_iter = ceph_read_iter,
1679 .write_iter = ceph_write_iter,
1680 .mmap = ceph_mmap,
1681 .fsync = ceph_fsync,
1682 .lock = ceph_lock,
1683 .flock = ceph_flock,
1684 .splice_read = generic_file_splice_read,
1685 .splice_write = iter_file_splice_write,
1686 .unlocked_ioctl = ceph_ioctl,
1687 .compat_ioctl = ceph_ioctl,
1688 .fallocate = ceph_fallocate,
1689};
1690
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/mount.h>
8#include <linux/namei.h>
9#include <linux/writeback.h>
10#include <linux/falloc.h>
11
12#include "super.h"
13#include "mds_client.h"
14#include "cache.h"
15
16/*
17 * Ceph file operations
18 *
19 * Implement basic open/close functionality, and implement
20 * read/write.
21 *
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
24 *
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
27 * ack from the OSD.
28 *
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
31 *
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
35 */
36
37/*
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
40 */
41static size_t dio_get_pagev_size(const struct iov_iter *it)
42{
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
45 size_t size;
46
47 size = iov->iov_len - it->iov_offset;
48 /*
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
51 */
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54 size += iov->iov_len;
55 }
56 dout("dio_get_pagevlen len = %zu\n", size);
57 return size;
58}
59
60/*
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
64 */
65static struct page **
66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
68{
69 struct iov_iter tmp_it = *it;
70 size_t align;
71 struct page **pages;
72 int ret = 0, idx, npages;
73
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 (PAGE_SIZE - 1);
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 if (!pages) {
79 pages = vmalloc(sizeof(*pages) * npages);
80 if (!pages)
81 return ERR_PTR(-ENOMEM);
82 }
83
84 for (idx = 0; idx < npages; ) {
85 size_t start;
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
88 if (ret < 0)
89 goto fail;
90
91 iov_iter_advance(&tmp_it, ret);
92 nbytes -= ret;
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 }
95
96 BUG_ON(nbytes != 0);
97 *num_pages = npages;
98 *page_align = align;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 return pages;
101fail:
102 ceph_put_page_vector(pages, idx, false);
103 return ERR_PTR(ret);
104}
105
106/*
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
109 */
110static struct ceph_mds_request *
111prepare_open_request(struct super_block *sb, int flags, int create_mode)
112{
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
121
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 if (IS_ERR(req))
124 goto out;
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
128out:
129 return req;
130}
131
132/*
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
135 */
136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137{
138 struct ceph_file_info *cf;
139 int ret = 0;
140
141 switch (inode->i_mode & S_IFMT) {
142 case S_IFREG:
143 ceph_fscache_register_inode_cookie(inode);
144 ceph_fscache_file_set_cookie(inode, file);
145 case S_IFDIR:
146 dout("init_file %p %p 0%o (regular)\n", inode, file,
147 inode->i_mode);
148 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
149 if (cf == NULL) {
150 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
151 return -ENOMEM;
152 }
153 cf->fmode = fmode;
154 cf->next_offset = 2;
155 cf->readdir_cache_idx = -1;
156 file->private_data = cf;
157 BUG_ON(inode->i_fop->release != ceph_release);
158 break;
159
160 case S_IFLNK:
161 dout("init_file %p %p 0%o (symlink)\n", inode, file,
162 inode->i_mode);
163 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
164 break;
165
166 default:
167 dout("init_file %p %p 0%o (special)\n", inode, file,
168 inode->i_mode);
169 /*
170 * we need to drop the open ref now, since we don't
171 * have .release set to ceph_release.
172 */
173 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
174 BUG_ON(inode->i_fop->release == ceph_release);
175
176 /* call the proper open fop */
177 ret = inode->i_fop->open(inode, file);
178 }
179 return ret;
180}
181
182/*
183 * try renew caps after session gets killed.
184 */
185int ceph_renew_caps(struct inode *inode)
186{
187 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
188 struct ceph_inode_info *ci = ceph_inode(inode);
189 struct ceph_mds_request *req;
190 int err, flags, wanted;
191
192 spin_lock(&ci->i_ceph_lock);
193 wanted = __ceph_caps_file_wanted(ci);
194 if (__ceph_is_any_real_caps(ci) &&
195 (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
196 int issued = __ceph_caps_issued(ci, NULL);
197 spin_unlock(&ci->i_ceph_lock);
198 dout("renew caps %p want %s issued %s updating mds_wanted\n",
199 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
200 ceph_check_caps(ci, 0, NULL);
201 return 0;
202 }
203 spin_unlock(&ci->i_ceph_lock);
204
205 flags = 0;
206 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
207 flags = O_RDWR;
208 else if (wanted & CEPH_CAP_FILE_RD)
209 flags = O_RDONLY;
210 else if (wanted & CEPH_CAP_FILE_WR)
211 flags = O_WRONLY;
212#ifdef O_LAZY
213 if (wanted & CEPH_CAP_FILE_LAZYIO)
214 flags |= O_LAZY;
215#endif
216
217 req = prepare_open_request(inode->i_sb, flags, 0);
218 if (IS_ERR(req)) {
219 err = PTR_ERR(req);
220 goto out;
221 }
222
223 req->r_inode = inode;
224 ihold(inode);
225 req->r_num_caps = 1;
226 req->r_fmode = -1;
227
228 err = ceph_mdsc_do_request(mdsc, NULL, req);
229 ceph_mdsc_put_request(req);
230out:
231 dout("renew caps %p open result=%d\n", inode, err);
232 return err < 0 ? err : 0;
233}
234
235/*
236 * If we already have the requisite capabilities, we can satisfy
237 * the open request locally (no need to request new caps from the
238 * MDS). We do, however, need to inform the MDS (asynchronously)
239 * if our wanted caps set expands.
240 */
241int ceph_open(struct inode *inode, struct file *file)
242{
243 struct ceph_inode_info *ci = ceph_inode(inode);
244 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
245 struct ceph_mds_client *mdsc = fsc->mdsc;
246 struct ceph_mds_request *req;
247 struct ceph_file_info *cf = file->private_data;
248 int err;
249 int flags, fmode, wanted;
250
251 if (cf) {
252 dout("open file %p is already opened\n", file);
253 return 0;
254 }
255
256 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
257 flags = file->f_flags & ~(O_CREAT|O_EXCL);
258 if (S_ISDIR(inode->i_mode))
259 flags = O_DIRECTORY; /* mds likes to know */
260
261 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
262 ceph_vinop(inode), file, flags, file->f_flags);
263 fmode = ceph_flags_to_mode(flags);
264 wanted = ceph_caps_for_mode(fmode);
265
266 /* snapped files are read-only */
267 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
268 return -EROFS;
269
270 /* trivially open snapdir */
271 if (ceph_snap(inode) == CEPH_SNAPDIR) {
272 spin_lock(&ci->i_ceph_lock);
273 __ceph_get_fmode(ci, fmode);
274 spin_unlock(&ci->i_ceph_lock);
275 return ceph_init_file(inode, file, fmode);
276 }
277
278 /*
279 * No need to block if we have caps on the auth MDS (for
280 * write) or any MDS (for read). Update wanted set
281 * asynchronously.
282 */
283 spin_lock(&ci->i_ceph_lock);
284 if (__ceph_is_any_real_caps(ci) &&
285 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
286 int mds_wanted = __ceph_caps_mds_wanted(ci);
287 int issued = __ceph_caps_issued(ci, NULL);
288
289 dout("open %p fmode %d want %s issued %s using existing\n",
290 inode, fmode, ceph_cap_string(wanted),
291 ceph_cap_string(issued));
292 __ceph_get_fmode(ci, fmode);
293 spin_unlock(&ci->i_ceph_lock);
294
295 /* adjust wanted? */
296 if ((issued & wanted) != wanted &&
297 (mds_wanted & wanted) != wanted &&
298 ceph_snap(inode) != CEPH_SNAPDIR)
299 ceph_check_caps(ci, 0, NULL);
300
301 return ceph_init_file(inode, file, fmode);
302 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
303 (ci->i_snap_caps & wanted) == wanted) {
304 __ceph_get_fmode(ci, fmode);
305 spin_unlock(&ci->i_ceph_lock);
306 return ceph_init_file(inode, file, fmode);
307 }
308
309 spin_unlock(&ci->i_ceph_lock);
310
311 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
312 req = prepare_open_request(inode->i_sb, flags, 0);
313 if (IS_ERR(req)) {
314 err = PTR_ERR(req);
315 goto out;
316 }
317 req->r_inode = inode;
318 ihold(inode);
319
320 req->r_num_caps = 1;
321 err = ceph_mdsc_do_request(mdsc, NULL, req);
322 if (!err)
323 err = ceph_init_file(inode, file, req->r_fmode);
324 ceph_mdsc_put_request(req);
325 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
326out:
327 return err;
328}
329
330
331/*
332 * Do a lookup + open with a single request. If we get a non-existent
333 * file or symlink, return 1 so the VFS can retry.
334 */
335int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
336 struct file *file, unsigned flags, umode_t mode,
337 int *opened)
338{
339 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
340 struct ceph_mds_client *mdsc = fsc->mdsc;
341 struct ceph_mds_request *req;
342 struct dentry *dn;
343 struct ceph_acls_info acls = {};
344 int mask;
345 int err;
346
347 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
348 dir, dentry, dentry,
349 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
350
351 if (dentry->d_name.len > NAME_MAX)
352 return -ENAMETOOLONG;
353
354 if (flags & O_CREAT) {
355 err = ceph_pre_init_acls(dir, &mode, &acls);
356 if (err < 0)
357 return err;
358 }
359
360 /* do the open */
361 req = prepare_open_request(dir->i_sb, flags, mode);
362 if (IS_ERR(req)) {
363 err = PTR_ERR(req);
364 goto out_acl;
365 }
366 req->r_dentry = dget(dentry);
367 req->r_num_caps = 2;
368 if (flags & O_CREAT) {
369 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
370 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
371 if (acls.pagelist) {
372 req->r_pagelist = acls.pagelist;
373 acls.pagelist = NULL;
374 }
375 }
376
377 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
378 if (ceph_security_xattr_wanted(dir))
379 mask |= CEPH_CAP_XATTR_SHARED;
380 req->r_args.open.mask = cpu_to_le32(mask);
381
382 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
383 err = ceph_mdsc_do_request(mdsc,
384 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
385 req);
386 err = ceph_handle_snapdir(req, dentry, err);
387 if (err)
388 goto out_req;
389
390 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
391 err = ceph_handle_notrace_create(dir, dentry);
392
393 if (d_in_lookup(dentry)) {
394 dn = ceph_finish_lookup(req, dentry, err);
395 if (IS_ERR(dn))
396 err = PTR_ERR(dn);
397 } else {
398 /* we were given a hashed negative dentry */
399 dn = NULL;
400 }
401 if (err)
402 goto out_req;
403 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
404 /* make vfs retry on splice, ENOENT, or symlink */
405 dout("atomic_open finish_no_open on dn %p\n", dn);
406 err = finish_no_open(file, dn);
407 } else {
408 dout("atomic_open finish_open on dn %p\n", dn);
409 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
410 ceph_init_inode_acls(d_inode(dentry), &acls);
411 *opened |= FILE_CREATED;
412 }
413 err = finish_open(file, dentry, ceph_open, opened);
414 }
415out_req:
416 if (!req->r_err && req->r_target_inode)
417 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
418 ceph_mdsc_put_request(req);
419out_acl:
420 ceph_release_acls_info(&acls);
421 dout("atomic_open result=%d\n", err);
422 return err;
423}
424
425int ceph_release(struct inode *inode, struct file *file)
426{
427 struct ceph_inode_info *ci = ceph_inode(inode);
428 struct ceph_file_info *cf = file->private_data;
429
430 dout("release inode %p file %p\n", inode, file);
431 ceph_put_fmode(ci, cf->fmode);
432 if (cf->last_readdir)
433 ceph_mdsc_put_request(cf->last_readdir);
434 kfree(cf->last_name);
435 kfree(cf->dir_info);
436 kmem_cache_free(ceph_file_cachep, cf);
437
438 /* wake up anyone waiting for caps on this inode */
439 wake_up_all(&ci->i_cap_wq);
440 return 0;
441}
442
443enum {
444 HAVE_RETRIED = 1,
445 CHECK_EOF = 2,
446 READ_INLINE = 3,
447};
448
449/*
450 * Read a range of bytes striped over one or more objects. Iterate over
451 * objects we stripe over. (That's not atomic, but good enough for now.)
452 *
453 * If we get a short result from the OSD, check against i_size; we need to
454 * only return a short read to the caller if we hit EOF.
455 */
456static int striped_read(struct inode *inode,
457 u64 pos, u64 len,
458 struct page **pages, int num_pages,
459 int page_align, int *checkeof)
460{
461 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
462 struct ceph_inode_info *ci = ceph_inode(inode);
463 u64 this_len;
464 loff_t i_size;
465 int page_idx;
466 int ret, read = 0;
467 bool hit_stripe, was_short;
468
469 /*
470 * we may need to do multiple reads. not atomic, unfortunately.
471 */
472more:
473 this_len = len;
474 page_idx = (page_align + read) >> PAGE_SHIFT;
475 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
476 &ci->i_layout, pos, &this_len,
477 ci->i_truncate_seq, ci->i_truncate_size,
478 pages + page_idx, num_pages - page_idx,
479 ((page_align + read) & ~PAGE_MASK));
480 if (ret == -ENOENT)
481 ret = 0;
482 hit_stripe = this_len < len;
483 was_short = ret >= 0 && ret < this_len;
484 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
485 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
486
487 i_size = i_size_read(inode);
488 if (ret >= 0) {
489 if (was_short && (pos + ret < i_size)) {
490 int zlen = min(this_len - ret, i_size - pos - ret);
491 int zoff = page_align + read + ret;
492 dout(" zero gap %llu to %llu\n",
493 pos + ret, pos + ret + zlen);
494 ceph_zero_page_vector_range(zoff, zlen, pages);
495 ret += zlen;
496 }
497
498 read += ret;
499 pos += ret;
500 len -= ret;
501
502 /* hit stripe and need continue*/
503 if (len && hit_stripe && pos < i_size)
504 goto more;
505 }
506
507 if (read > 0) {
508 ret = read;
509 /* did we bounce off eof? */
510 if (pos + len > i_size)
511 *checkeof = CHECK_EOF;
512 }
513
514 dout("striped_read returns %d\n", ret);
515 return ret;
516}
517
518/*
519 * Completely synchronous read and write methods. Direct from __user
520 * buffer to osd, or directly to user pages (if O_DIRECT).
521 *
522 * If the read spans object boundary, just do multiple reads.
523 */
524static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
525 int *checkeof)
526{
527 struct file *file = iocb->ki_filp;
528 struct inode *inode = file_inode(file);
529 struct page **pages;
530 u64 off = iocb->ki_pos;
531 int num_pages;
532 ssize_t ret;
533 size_t len = iov_iter_count(to);
534
535 dout("sync_read on file %p %llu~%u %s\n", file, off,
536 (unsigned)len,
537 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
538
539 if (!len)
540 return 0;
541 /*
542 * flush any page cache pages in this range. this
543 * will make concurrent normal and sync io slow,
544 * but it will at least behave sensibly when they are
545 * in sequence.
546 */
547 ret = filemap_write_and_wait_range(inode->i_mapping, off,
548 off + len);
549 if (ret < 0)
550 return ret;
551
552 if (unlikely(to->type & ITER_PIPE)) {
553 size_t page_off;
554 ret = iov_iter_get_pages_alloc(to, &pages, len,
555 &page_off);
556 if (ret <= 0)
557 return -ENOMEM;
558 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
559
560 ret = striped_read(inode, off, ret, pages, num_pages,
561 page_off, checkeof);
562 if (ret > 0) {
563 iov_iter_advance(to, ret);
564 off += ret;
565 } else {
566 iov_iter_advance(to, 0);
567 }
568 ceph_put_page_vector(pages, num_pages, false);
569 } else {
570 num_pages = calc_pages_for(off, len);
571 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
572 if (IS_ERR(pages))
573 return PTR_ERR(pages);
574
575 ret = striped_read(inode, off, len, pages, num_pages,
576 (off & ~PAGE_MASK), checkeof);
577 if (ret > 0) {
578 int l, k = 0;
579 size_t left = ret;
580
581 while (left) {
582 size_t page_off = off & ~PAGE_MASK;
583 size_t copy = min_t(size_t, left,
584 PAGE_SIZE - page_off);
585 l = copy_page_to_iter(pages[k++], page_off,
586 copy, to);
587 off += l;
588 left -= l;
589 if (l < copy)
590 break;
591 }
592 }
593 ceph_release_page_vector(pages, num_pages);
594 }
595
596 if (off > iocb->ki_pos) {
597 ret = off - iocb->ki_pos;
598 iocb->ki_pos = off;
599 }
600
601 dout("sync_read result %zd\n", ret);
602 return ret;
603}
604
605struct ceph_aio_request {
606 struct kiocb *iocb;
607 size_t total_len;
608 int write;
609 int error;
610 struct list_head osd_reqs;
611 unsigned num_reqs;
612 atomic_t pending_reqs;
613 struct timespec mtime;
614 struct ceph_cap_flush *prealloc_cf;
615};
616
617struct ceph_aio_work {
618 struct work_struct work;
619 struct ceph_osd_request *req;
620};
621
622static void ceph_aio_retry_work(struct work_struct *work);
623
624static void ceph_aio_complete(struct inode *inode,
625 struct ceph_aio_request *aio_req)
626{
627 struct ceph_inode_info *ci = ceph_inode(inode);
628 int ret;
629
630 if (!atomic_dec_and_test(&aio_req->pending_reqs))
631 return;
632
633 ret = aio_req->error;
634 if (!ret)
635 ret = aio_req->total_len;
636
637 dout("ceph_aio_complete %p rc %d\n", inode, ret);
638
639 if (ret >= 0 && aio_req->write) {
640 int dirty;
641
642 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
643 if (endoff > i_size_read(inode)) {
644 if (ceph_inode_set_size(inode, endoff))
645 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
646 }
647
648 spin_lock(&ci->i_ceph_lock);
649 ci->i_inline_version = CEPH_INLINE_NONE;
650 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
651 &aio_req->prealloc_cf);
652 spin_unlock(&ci->i_ceph_lock);
653 if (dirty)
654 __mark_inode_dirty(inode, dirty);
655
656 }
657
658 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
659 CEPH_CAP_FILE_RD));
660
661 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
662
663 ceph_free_cap_flush(aio_req->prealloc_cf);
664 kfree(aio_req);
665}
666
667static void ceph_aio_complete_req(struct ceph_osd_request *req)
668{
669 int rc = req->r_result;
670 struct inode *inode = req->r_inode;
671 struct ceph_aio_request *aio_req = req->r_priv;
672 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
673 int num_pages = calc_pages_for((u64)osd_data->alignment,
674 osd_data->length);
675
676 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
677 inode, rc, osd_data->length);
678
679 if (rc == -EOLDSNAPC) {
680 struct ceph_aio_work *aio_work;
681 BUG_ON(!aio_req->write);
682
683 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
684 if (aio_work) {
685 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
686 aio_work->req = req;
687 queue_work(ceph_inode_to_client(inode)->wb_wq,
688 &aio_work->work);
689 return;
690 }
691 rc = -ENOMEM;
692 } else if (!aio_req->write) {
693 if (rc == -ENOENT)
694 rc = 0;
695 if (rc >= 0 && osd_data->length > rc) {
696 int zoff = osd_data->alignment + rc;
697 int zlen = osd_data->length - rc;
698 /*
699 * If read is satisfied by single OSD request,
700 * it can pass EOF. Otherwise read is within
701 * i_size.
702 */
703 if (aio_req->num_reqs == 1) {
704 loff_t i_size = i_size_read(inode);
705 loff_t endoff = aio_req->iocb->ki_pos + rc;
706 if (endoff < i_size)
707 zlen = min_t(size_t, zlen,
708 i_size - endoff);
709 aio_req->total_len = rc + zlen;
710 }
711
712 if (zlen > 0)
713 ceph_zero_page_vector_range(zoff, zlen,
714 osd_data->pages);
715 }
716 }
717
718 ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
719 ceph_osdc_put_request(req);
720
721 if (rc < 0)
722 cmpxchg(&aio_req->error, 0, rc);
723
724 ceph_aio_complete(inode, aio_req);
725 return;
726}
727
728static void ceph_aio_retry_work(struct work_struct *work)
729{
730 struct ceph_aio_work *aio_work =
731 container_of(work, struct ceph_aio_work, work);
732 struct ceph_osd_request *orig_req = aio_work->req;
733 struct ceph_aio_request *aio_req = orig_req->r_priv;
734 struct inode *inode = orig_req->r_inode;
735 struct ceph_inode_info *ci = ceph_inode(inode);
736 struct ceph_snap_context *snapc;
737 struct ceph_osd_request *req;
738 int ret;
739
740 spin_lock(&ci->i_ceph_lock);
741 if (__ceph_have_pending_cap_snap(ci)) {
742 struct ceph_cap_snap *capsnap =
743 list_last_entry(&ci->i_cap_snaps,
744 struct ceph_cap_snap,
745 ci_item);
746 snapc = ceph_get_snap_context(capsnap->context);
747 } else {
748 BUG_ON(!ci->i_head_snapc);
749 snapc = ceph_get_snap_context(ci->i_head_snapc);
750 }
751 spin_unlock(&ci->i_ceph_lock);
752
753 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
754 false, GFP_NOFS);
755 if (!req) {
756 ret = -ENOMEM;
757 req = orig_req;
758 goto out;
759 }
760
761 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
762 CEPH_OSD_FLAG_ONDISK |
763 CEPH_OSD_FLAG_WRITE;
764 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
765 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
766
767 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
768 if (ret) {
769 ceph_osdc_put_request(req);
770 req = orig_req;
771 goto out;
772 }
773
774 req->r_ops[0] = orig_req->r_ops[0];
775 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
776
777 req->r_mtime = aio_req->mtime;
778 req->r_data_offset = req->r_ops[0].extent.offset;
779
780 ceph_osdc_put_request(orig_req);
781
782 req->r_callback = ceph_aio_complete_req;
783 req->r_inode = inode;
784 req->r_priv = aio_req;
785
786 ret = ceph_osdc_start_request(req->r_osdc, req, false);
787out:
788 if (ret < 0) {
789 req->r_result = ret;
790 ceph_aio_complete_req(req);
791 }
792
793 ceph_put_snap_context(snapc);
794 kfree(aio_work);
795}
796
797/*
798 * Write commit request unsafe callback, called to tell us when a
799 * request is unsafe (that is, in flight--has been handed to the
800 * messenger to send to its target osd). It is called again when
801 * we've received a response message indicating the request is
802 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
803 * is completed early (and unsuccessfully) due to a timeout or
804 * interrupt.
805 *
806 * This is used if we requested both an ACK and ONDISK commit reply
807 * from the OSD.
808 */
809static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
810{
811 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
812
813 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
814 unsafe ? "un" : "");
815 if (unsafe) {
816 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
817 spin_lock(&ci->i_unsafe_lock);
818 list_add_tail(&req->r_unsafe_item,
819 &ci->i_unsafe_writes);
820 spin_unlock(&ci->i_unsafe_lock);
821
822 complete_all(&req->r_completion);
823 } else {
824 spin_lock(&ci->i_unsafe_lock);
825 list_del_init(&req->r_unsafe_item);
826 spin_unlock(&ci->i_unsafe_lock);
827 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
828 }
829}
830
831/*
832 * Wait on any unsafe replies for the given inode. First wait on the
833 * newest request, and make that the upper bound. Then, if there are
834 * more requests, keep waiting on the oldest as long as it is still older
835 * than the original request.
836 */
837void ceph_sync_write_wait(struct inode *inode)
838{
839 struct ceph_inode_info *ci = ceph_inode(inode);
840 struct list_head *head = &ci->i_unsafe_writes;
841 struct ceph_osd_request *req;
842 u64 last_tid;
843
844 if (!S_ISREG(inode->i_mode))
845 return;
846
847 spin_lock(&ci->i_unsafe_lock);
848 if (list_empty(head))
849 goto out;
850
851 /* set upper bound as _last_ entry in chain */
852
853 req = list_last_entry(head, struct ceph_osd_request,
854 r_unsafe_item);
855 last_tid = req->r_tid;
856
857 do {
858 ceph_osdc_get_request(req);
859 spin_unlock(&ci->i_unsafe_lock);
860
861 dout("sync_write_wait on tid %llu (until %llu)\n",
862 req->r_tid, last_tid);
863 wait_for_completion(&req->r_done_completion);
864 ceph_osdc_put_request(req);
865
866 spin_lock(&ci->i_unsafe_lock);
867 /*
868 * from here on look at first entry in chain, since we
869 * only want to wait for anything older than last_tid
870 */
871 if (list_empty(head))
872 break;
873 req = list_first_entry(head, struct ceph_osd_request,
874 r_unsafe_item);
875 } while (req->r_tid < last_tid);
876out:
877 spin_unlock(&ci->i_unsafe_lock);
878}
879
880static ssize_t
881ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
882 struct ceph_snap_context *snapc,
883 struct ceph_cap_flush **pcf)
884{
885 struct file *file = iocb->ki_filp;
886 struct inode *inode = file_inode(file);
887 struct ceph_inode_info *ci = ceph_inode(inode);
888 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
889 struct ceph_vino vino;
890 struct ceph_osd_request *req;
891 struct page **pages;
892 struct ceph_aio_request *aio_req = NULL;
893 int num_pages = 0;
894 int flags;
895 int ret;
896 struct timespec mtime = current_time(inode);
897 size_t count = iov_iter_count(iter);
898 loff_t pos = iocb->ki_pos;
899 bool write = iov_iter_rw(iter) == WRITE;
900
901 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
902 return -EROFS;
903
904 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
905 (write ? "write" : "read"), file, pos, (unsigned)count);
906
907 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
908 if (ret < 0)
909 return ret;
910
911 if (write) {
912 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
913 pos >> PAGE_SHIFT,
914 (pos + count) >> PAGE_SHIFT);
915 if (ret2 < 0)
916 dout("invalidate_inode_pages2_range returned %d\n", ret2);
917
918 flags = CEPH_OSD_FLAG_ORDERSNAP |
919 CEPH_OSD_FLAG_ONDISK |
920 CEPH_OSD_FLAG_WRITE;
921 } else {
922 flags = CEPH_OSD_FLAG_READ;
923 }
924
925 while (iov_iter_count(iter) > 0) {
926 u64 size = dio_get_pagev_size(iter);
927 size_t start = 0;
928 ssize_t len;
929
930 vino = ceph_vino(inode);
931 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
932 vino, pos, &size, 0,
933 /*include a 'startsync' command*/
934 write ? 2 : 1,
935 write ? CEPH_OSD_OP_WRITE :
936 CEPH_OSD_OP_READ,
937 flags, snapc,
938 ci->i_truncate_seq,
939 ci->i_truncate_size,
940 false);
941 if (IS_ERR(req)) {
942 ret = PTR_ERR(req);
943 break;
944 }
945
946 len = size;
947 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
948 if (IS_ERR(pages)) {
949 ceph_osdc_put_request(req);
950 ret = PTR_ERR(pages);
951 break;
952 }
953
954 /*
955 * To simplify error handling, allow AIO when IO within i_size
956 * or IO can be satisfied by single OSD request.
957 */
958 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
959 (len == count || pos + count <= i_size_read(inode))) {
960 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
961 if (aio_req) {
962 aio_req->iocb = iocb;
963 aio_req->write = write;
964 INIT_LIST_HEAD(&aio_req->osd_reqs);
965 if (write) {
966 aio_req->mtime = mtime;
967 swap(aio_req->prealloc_cf, *pcf);
968 }
969 }
970 /* ignore error */
971 }
972
973 if (write) {
974 /*
975 * throw out any page cache pages in this range. this
976 * may block.
977 */
978 truncate_inode_pages_range(inode->i_mapping, pos,
979 (pos+len) | (PAGE_SIZE - 1));
980
981 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
982 req->r_mtime = mtime;
983 }
984
985 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
986 false, false);
987
988 if (aio_req) {
989 aio_req->total_len += len;
990 aio_req->num_reqs++;
991 atomic_inc(&aio_req->pending_reqs);
992
993 req->r_callback = ceph_aio_complete_req;
994 req->r_inode = inode;
995 req->r_priv = aio_req;
996 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
997
998 pos += len;
999 iov_iter_advance(iter, len);
1000 continue;
1001 }
1002
1003 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1004 if (!ret)
1005 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1006
1007 size = i_size_read(inode);
1008 if (!write) {
1009 if (ret == -ENOENT)
1010 ret = 0;
1011 if (ret >= 0 && ret < len && pos + ret < size) {
1012 int zlen = min_t(size_t, len - ret,
1013 size - pos - ret);
1014 ceph_zero_page_vector_range(start + ret, zlen,
1015 pages);
1016 ret += zlen;
1017 }
1018 if (ret >= 0)
1019 len = ret;
1020 }
1021
1022 ceph_put_page_vector(pages, num_pages, !write);
1023
1024 ceph_osdc_put_request(req);
1025 if (ret < 0)
1026 break;
1027
1028 pos += len;
1029 iov_iter_advance(iter, len);
1030
1031 if (!write && pos >= size)
1032 break;
1033
1034 if (write && pos > size) {
1035 if (ceph_inode_set_size(inode, pos))
1036 ceph_check_caps(ceph_inode(inode),
1037 CHECK_CAPS_AUTHONLY,
1038 NULL);
1039 }
1040 }
1041
1042 if (aio_req) {
1043 LIST_HEAD(osd_reqs);
1044
1045 if (aio_req->num_reqs == 0) {
1046 kfree(aio_req);
1047 return ret;
1048 }
1049
1050 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1051 CEPH_CAP_FILE_RD);
1052
1053 list_splice(&aio_req->osd_reqs, &osd_reqs);
1054 while (!list_empty(&osd_reqs)) {
1055 req = list_first_entry(&osd_reqs,
1056 struct ceph_osd_request,
1057 r_unsafe_item);
1058 list_del_init(&req->r_unsafe_item);
1059 if (ret >= 0)
1060 ret = ceph_osdc_start_request(req->r_osdc,
1061 req, false);
1062 if (ret < 0) {
1063 req->r_result = ret;
1064 ceph_aio_complete_req(req);
1065 }
1066 }
1067 return -EIOCBQUEUED;
1068 }
1069
1070 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1071 ret = pos - iocb->ki_pos;
1072 iocb->ki_pos = pos;
1073 }
1074 return ret;
1075}
1076
1077/*
1078 * Synchronous write, straight from __user pointer or user pages.
1079 *
1080 * If write spans object boundary, just do multiple writes. (For a
1081 * correct atomic write, we should e.g. take write locks on all
1082 * objects, rollback on failure, etc.)
1083 */
1084static ssize_t
1085ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1086 struct ceph_snap_context *snapc)
1087{
1088 struct file *file = iocb->ki_filp;
1089 struct inode *inode = file_inode(file);
1090 struct ceph_inode_info *ci = ceph_inode(inode);
1091 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1092 struct ceph_vino vino;
1093 struct ceph_osd_request *req;
1094 struct page **pages;
1095 u64 len;
1096 int num_pages;
1097 int written = 0;
1098 int flags;
1099 int check_caps = 0;
1100 int ret;
1101 struct timespec mtime = current_time(inode);
1102 size_t count = iov_iter_count(from);
1103
1104 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1105 return -EROFS;
1106
1107 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1108
1109 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1110 if (ret < 0)
1111 return ret;
1112
1113 ret = invalidate_inode_pages2_range(inode->i_mapping,
1114 pos >> PAGE_SHIFT,
1115 (pos + count) >> PAGE_SHIFT);
1116 if (ret < 0)
1117 dout("invalidate_inode_pages2_range returned %d\n", ret);
1118
1119 flags = CEPH_OSD_FLAG_ORDERSNAP |
1120 CEPH_OSD_FLAG_ONDISK |
1121 CEPH_OSD_FLAG_WRITE |
1122 CEPH_OSD_FLAG_ACK;
1123
1124 while ((len = iov_iter_count(from)) > 0) {
1125 size_t left;
1126 int n;
1127
1128 vino = ceph_vino(inode);
1129 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1130 vino, pos, &len, 0, 1,
1131 CEPH_OSD_OP_WRITE, flags, snapc,
1132 ci->i_truncate_seq,
1133 ci->i_truncate_size,
1134 false);
1135 if (IS_ERR(req)) {
1136 ret = PTR_ERR(req);
1137 break;
1138 }
1139
1140 /*
1141 * write from beginning of first page,
1142 * regardless of io alignment
1143 */
1144 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145
1146 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1147 if (IS_ERR(pages)) {
1148 ret = PTR_ERR(pages);
1149 goto out;
1150 }
1151
1152 left = len;
1153 for (n = 0; n < num_pages; n++) {
1154 size_t plen = min_t(size_t, left, PAGE_SIZE);
1155 ret = copy_page_from_iter(pages[n], 0, plen, from);
1156 if (ret != plen) {
1157 ret = -EFAULT;
1158 break;
1159 }
1160 left -= ret;
1161 }
1162
1163 if (ret < 0) {
1164 ceph_release_page_vector(pages, num_pages);
1165 goto out;
1166 }
1167
1168 /* get a second commit callback */
1169 req->r_unsafe_callback = ceph_sync_write_unsafe;
1170 req->r_inode = inode;
1171
1172 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1173 false, true);
1174
1175 req->r_mtime = mtime;
1176 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1177 if (!ret)
1178 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1179
1180out:
1181 ceph_osdc_put_request(req);
1182 if (ret == 0) {
1183 pos += len;
1184 written += len;
1185
1186 if (pos > i_size_read(inode)) {
1187 check_caps = ceph_inode_set_size(inode, pos);
1188 if (check_caps)
1189 ceph_check_caps(ceph_inode(inode),
1190 CHECK_CAPS_AUTHONLY,
1191 NULL);
1192 }
1193 } else
1194 break;
1195 }
1196
1197 if (ret != -EOLDSNAPC && written > 0) {
1198 ret = written;
1199 iocb->ki_pos = pos;
1200 }
1201 return ret;
1202}
1203
1204/*
1205 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1206 * Atomically grab references, so that those bits are not released
1207 * back to the MDS mid-read.
1208 *
1209 * Hmm, the sync read case isn't actually async... should it be?
1210 */
1211static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1212{
1213 struct file *filp = iocb->ki_filp;
1214 struct ceph_file_info *fi = filp->private_data;
1215 size_t len = iov_iter_count(to);
1216 struct inode *inode = file_inode(filp);
1217 struct ceph_inode_info *ci = ceph_inode(inode);
1218 struct page *pinned_page = NULL;
1219 ssize_t ret;
1220 int want, got = 0;
1221 int retry_op = 0, read = 0;
1222
1223again:
1224 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1225 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1226
1227 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1228 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1229 else
1230 want = CEPH_CAP_FILE_CACHE;
1231 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1232 if (ret < 0)
1233 return ret;
1234
1235 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1236 (iocb->ki_flags & IOCB_DIRECT) ||
1237 (fi->flags & CEPH_F_SYNC)) {
1238
1239 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1240 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1241 ceph_cap_string(got));
1242
1243 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1244 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1245 ret = ceph_direct_read_write(iocb, to,
1246 NULL, NULL);
1247 if (ret >= 0 && ret < len)
1248 retry_op = CHECK_EOF;
1249 } else {
1250 ret = ceph_sync_read(iocb, to, &retry_op);
1251 }
1252 } else {
1253 retry_op = READ_INLINE;
1254 }
1255 } else {
1256 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1257 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1258 ceph_cap_string(got));
1259 current->journal_info = filp;
1260 ret = generic_file_read_iter(iocb, to);
1261 current->journal_info = NULL;
1262 }
1263 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1264 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1265 if (pinned_page) {
1266 put_page(pinned_page);
1267 pinned_page = NULL;
1268 }
1269 ceph_put_cap_refs(ci, got);
1270 if (retry_op > HAVE_RETRIED && ret >= 0) {
1271 int statret;
1272 struct page *page = NULL;
1273 loff_t i_size;
1274 if (retry_op == READ_INLINE) {
1275 page = __page_cache_alloc(GFP_KERNEL);
1276 if (!page)
1277 return -ENOMEM;
1278 }
1279
1280 statret = __ceph_do_getattr(inode, page,
1281 CEPH_STAT_CAP_INLINE_DATA, !!page);
1282 if (statret < 0) {
1283 if (page)
1284 __free_page(page);
1285 if (statret == -ENODATA) {
1286 BUG_ON(retry_op != READ_INLINE);
1287 goto again;
1288 }
1289 return statret;
1290 }
1291
1292 i_size = i_size_read(inode);
1293 if (retry_op == READ_INLINE) {
1294 BUG_ON(ret > 0 || read > 0);
1295 if (iocb->ki_pos < i_size &&
1296 iocb->ki_pos < PAGE_SIZE) {
1297 loff_t end = min_t(loff_t, i_size,
1298 iocb->ki_pos + len);
1299 end = min_t(loff_t, end, PAGE_SIZE);
1300 if (statret < end)
1301 zero_user_segment(page, statret, end);
1302 ret = copy_page_to_iter(page,
1303 iocb->ki_pos & ~PAGE_MASK,
1304 end - iocb->ki_pos, to);
1305 iocb->ki_pos += ret;
1306 read += ret;
1307 }
1308 if (iocb->ki_pos < i_size && read < len) {
1309 size_t zlen = min_t(size_t, len - read,
1310 i_size - iocb->ki_pos);
1311 ret = iov_iter_zero(zlen, to);
1312 iocb->ki_pos += ret;
1313 read += ret;
1314 }
1315 __free_pages(page, 0);
1316 return read;
1317 }
1318
1319 /* hit EOF or hole? */
1320 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1321 ret < len) {
1322 dout("sync_read hit hole, ppos %lld < size %lld"
1323 ", reading more\n", iocb->ki_pos, i_size);
1324
1325 read += ret;
1326 len -= ret;
1327 retry_op = HAVE_RETRIED;
1328 goto again;
1329 }
1330 }
1331
1332 if (ret >= 0)
1333 ret += read;
1334
1335 return ret;
1336}
1337
1338/*
1339 * Take cap references to avoid releasing caps to MDS mid-write.
1340 *
1341 * If we are synchronous, and write with an old snap context, the OSD
1342 * may return EOLDSNAPC. In that case, retry the write.. _after_
1343 * dropping our cap refs and allowing the pending snap to logically
1344 * complete _before_ this write occurs.
1345 *
1346 * If we are near ENOSPC, write synchronously.
1347 */
1348static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1349{
1350 struct file *file = iocb->ki_filp;
1351 struct ceph_file_info *fi = file->private_data;
1352 struct inode *inode = file_inode(file);
1353 struct ceph_inode_info *ci = ceph_inode(inode);
1354 struct ceph_osd_client *osdc =
1355 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1356 struct ceph_cap_flush *prealloc_cf;
1357 ssize_t count, written = 0;
1358 int err, want, got;
1359 loff_t pos;
1360
1361 if (ceph_snap(inode) != CEPH_NOSNAP)
1362 return -EROFS;
1363
1364 prealloc_cf = ceph_alloc_cap_flush();
1365 if (!prealloc_cf)
1366 return -ENOMEM;
1367
1368 inode_lock(inode);
1369
1370 /* We can write back this queue in page reclaim */
1371 current->backing_dev_info = inode_to_bdi(inode);
1372
1373 if (iocb->ki_flags & IOCB_APPEND) {
1374 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1375 if (err < 0)
1376 goto out;
1377 }
1378
1379 err = generic_write_checks(iocb, from);
1380 if (err <= 0)
1381 goto out;
1382
1383 pos = iocb->ki_pos;
1384 count = iov_iter_count(from);
1385 err = file_remove_privs(file);
1386 if (err)
1387 goto out;
1388
1389 err = file_update_time(file);
1390 if (err)
1391 goto out;
1392
1393 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1394 err = ceph_uninline_data(file, NULL);
1395 if (err < 0)
1396 goto out;
1397 }
1398
1399retry_snap:
1400 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1401 err = -ENOSPC;
1402 goto out;
1403 }
1404
1405 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1406 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1407 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1408 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1409 else
1410 want = CEPH_CAP_FILE_BUFFER;
1411 got = 0;
1412 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1413 &got, NULL);
1414 if (err < 0)
1415 goto out;
1416
1417 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1418 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1419
1420 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1421 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1422 struct ceph_snap_context *snapc;
1423 struct iov_iter data;
1424 inode_unlock(inode);
1425
1426 spin_lock(&ci->i_ceph_lock);
1427 if (__ceph_have_pending_cap_snap(ci)) {
1428 struct ceph_cap_snap *capsnap =
1429 list_last_entry(&ci->i_cap_snaps,
1430 struct ceph_cap_snap,
1431 ci_item);
1432 snapc = ceph_get_snap_context(capsnap->context);
1433 } else {
1434 BUG_ON(!ci->i_head_snapc);
1435 snapc = ceph_get_snap_context(ci->i_head_snapc);
1436 }
1437 spin_unlock(&ci->i_ceph_lock);
1438
1439 /* we might need to revert back to that point */
1440 data = *from;
1441 if (iocb->ki_flags & IOCB_DIRECT)
1442 written = ceph_direct_read_write(iocb, &data, snapc,
1443 &prealloc_cf);
1444 else
1445 written = ceph_sync_write(iocb, &data, pos, snapc);
1446 if (written == -EOLDSNAPC) {
1447 dout("aio_write %p %llx.%llx %llu~%u"
1448 "got EOLDSNAPC, retrying\n",
1449 inode, ceph_vinop(inode),
1450 pos, (unsigned)count);
1451 inode_lock(inode);
1452 goto retry_snap;
1453 }
1454 if (written > 0)
1455 iov_iter_advance(from, written);
1456 ceph_put_snap_context(snapc);
1457 } else {
1458 /*
1459 * No need to acquire the i_truncate_mutex. Because
1460 * the MDS revokes Fwb caps before sending truncate
1461 * message to us. We can't get Fwb cap while there
1462 * are pending vmtruncate. So write and vmtruncate
1463 * can not run at the same time
1464 */
1465 written = generic_perform_write(file, from, pos);
1466 if (likely(written >= 0))
1467 iocb->ki_pos = pos + written;
1468 inode_unlock(inode);
1469 }
1470
1471 if (written >= 0) {
1472 int dirty;
1473 spin_lock(&ci->i_ceph_lock);
1474 ci->i_inline_version = CEPH_INLINE_NONE;
1475 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1476 &prealloc_cf);
1477 spin_unlock(&ci->i_ceph_lock);
1478 if (dirty)
1479 __mark_inode_dirty(inode, dirty);
1480 }
1481
1482 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1483 inode, ceph_vinop(inode), pos, (unsigned)count,
1484 ceph_cap_string(got));
1485 ceph_put_cap_refs(ci, got);
1486
1487 if (written >= 0) {
1488 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1489 iocb->ki_flags |= IOCB_DSYNC;
1490
1491 written = generic_write_sync(iocb, written);
1492 }
1493
1494 goto out_unlocked;
1495
1496out:
1497 inode_unlock(inode);
1498out_unlocked:
1499 ceph_free_cap_flush(prealloc_cf);
1500 current->backing_dev_info = NULL;
1501 return written ? written : err;
1502}
1503
1504/*
1505 * llseek. be sure to verify file size on SEEK_END.
1506 */
1507static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1508{
1509 struct inode *inode = file->f_mapping->host;
1510 loff_t i_size;
1511 loff_t ret;
1512
1513 inode_lock(inode);
1514
1515 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1516 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1517 if (ret < 0)
1518 goto out;
1519 }
1520
1521 i_size = i_size_read(inode);
1522 switch (whence) {
1523 case SEEK_END:
1524 offset += i_size;
1525 break;
1526 case SEEK_CUR:
1527 /*
1528 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1529 * position-querying operation. Avoid rewriting the "same"
1530 * f_pos value back to the file because a concurrent read(),
1531 * write() or lseek() might have altered it
1532 */
1533 if (offset == 0) {
1534 ret = file->f_pos;
1535 goto out;
1536 }
1537 offset += file->f_pos;
1538 break;
1539 case SEEK_DATA:
1540 if (offset >= i_size) {
1541 ret = -ENXIO;
1542 goto out;
1543 }
1544 break;
1545 case SEEK_HOLE:
1546 if (offset >= i_size) {
1547 ret = -ENXIO;
1548 goto out;
1549 }
1550 offset = i_size;
1551 break;
1552 }
1553
1554 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1555
1556out:
1557 inode_unlock(inode);
1558 return ret;
1559}
1560
1561static inline void ceph_zero_partial_page(
1562 struct inode *inode, loff_t offset, unsigned size)
1563{
1564 struct page *page;
1565 pgoff_t index = offset >> PAGE_SHIFT;
1566
1567 page = find_lock_page(inode->i_mapping, index);
1568 if (page) {
1569 wait_on_page_writeback(page);
1570 zero_user(page, offset & (PAGE_SIZE - 1), size);
1571 unlock_page(page);
1572 put_page(page);
1573 }
1574}
1575
1576static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1577 loff_t length)
1578{
1579 loff_t nearly = round_up(offset, PAGE_SIZE);
1580 if (offset < nearly) {
1581 loff_t size = nearly - offset;
1582 if (length < size)
1583 size = length;
1584 ceph_zero_partial_page(inode, offset, size);
1585 offset += size;
1586 length -= size;
1587 }
1588 if (length >= PAGE_SIZE) {
1589 loff_t size = round_down(length, PAGE_SIZE);
1590 truncate_pagecache_range(inode, offset, offset + size - 1);
1591 offset += size;
1592 length -= size;
1593 }
1594 if (length)
1595 ceph_zero_partial_page(inode, offset, length);
1596}
1597
1598static int ceph_zero_partial_object(struct inode *inode,
1599 loff_t offset, loff_t *length)
1600{
1601 struct ceph_inode_info *ci = ceph_inode(inode);
1602 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1603 struct ceph_osd_request *req;
1604 int ret = 0;
1605 loff_t zero = 0;
1606 int op;
1607
1608 if (!length) {
1609 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1610 length = &zero;
1611 } else {
1612 op = CEPH_OSD_OP_ZERO;
1613 }
1614
1615 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1616 ceph_vino(inode),
1617 offset, length,
1618 0, 1, op,
1619 CEPH_OSD_FLAG_WRITE |
1620 CEPH_OSD_FLAG_ONDISK,
1621 NULL, 0, 0, false);
1622 if (IS_ERR(req)) {
1623 ret = PTR_ERR(req);
1624 goto out;
1625 }
1626
1627 req->r_mtime = inode->i_mtime;
1628 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1629 if (!ret) {
1630 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1631 if (ret == -ENOENT)
1632 ret = 0;
1633 }
1634 ceph_osdc_put_request(req);
1635
1636out:
1637 return ret;
1638}
1639
1640static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1641{
1642 int ret = 0;
1643 struct ceph_inode_info *ci = ceph_inode(inode);
1644 s32 stripe_unit = ci->i_layout.stripe_unit;
1645 s32 stripe_count = ci->i_layout.stripe_count;
1646 s32 object_size = ci->i_layout.object_size;
1647 u64 object_set_size = object_size * stripe_count;
1648 u64 nearly, t;
1649
1650 /* round offset up to next period boundary */
1651 nearly = offset + object_set_size - 1;
1652 t = nearly;
1653 nearly -= do_div(t, object_set_size);
1654
1655 while (length && offset < nearly) {
1656 loff_t size = length;
1657 ret = ceph_zero_partial_object(inode, offset, &size);
1658 if (ret < 0)
1659 return ret;
1660 offset += size;
1661 length -= size;
1662 }
1663 while (length >= object_set_size) {
1664 int i;
1665 loff_t pos = offset;
1666 for (i = 0; i < stripe_count; ++i) {
1667 ret = ceph_zero_partial_object(inode, pos, NULL);
1668 if (ret < 0)
1669 return ret;
1670 pos += stripe_unit;
1671 }
1672 offset += object_set_size;
1673 length -= object_set_size;
1674 }
1675 while (length) {
1676 loff_t size = length;
1677 ret = ceph_zero_partial_object(inode, offset, &size);
1678 if (ret < 0)
1679 return ret;
1680 offset += size;
1681 length -= size;
1682 }
1683 return ret;
1684}
1685
1686static long ceph_fallocate(struct file *file, int mode,
1687 loff_t offset, loff_t length)
1688{
1689 struct ceph_file_info *fi = file->private_data;
1690 struct inode *inode = file_inode(file);
1691 struct ceph_inode_info *ci = ceph_inode(inode);
1692 struct ceph_osd_client *osdc =
1693 &ceph_inode_to_client(inode)->client->osdc;
1694 struct ceph_cap_flush *prealloc_cf;
1695 int want, got = 0;
1696 int dirty;
1697 int ret = 0;
1698 loff_t endoff = 0;
1699 loff_t size;
1700
1701 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1702 return -EOPNOTSUPP;
1703
1704 if (!S_ISREG(inode->i_mode))
1705 return -EOPNOTSUPP;
1706
1707 prealloc_cf = ceph_alloc_cap_flush();
1708 if (!prealloc_cf)
1709 return -ENOMEM;
1710
1711 inode_lock(inode);
1712
1713 if (ceph_snap(inode) != CEPH_NOSNAP) {
1714 ret = -EROFS;
1715 goto unlock;
1716 }
1717
1718 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1719 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1720 ret = -ENOSPC;
1721 goto unlock;
1722 }
1723
1724 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1725 ret = ceph_uninline_data(file, NULL);
1726 if (ret < 0)
1727 goto unlock;
1728 }
1729
1730 size = i_size_read(inode);
1731 if (!(mode & FALLOC_FL_KEEP_SIZE))
1732 endoff = offset + length;
1733
1734 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1735 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1736 else
1737 want = CEPH_CAP_FILE_BUFFER;
1738
1739 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1740 if (ret < 0)
1741 goto unlock;
1742
1743 if (mode & FALLOC_FL_PUNCH_HOLE) {
1744 if (offset < size)
1745 ceph_zero_pagecache_range(inode, offset, length);
1746 ret = ceph_zero_objects(inode, offset, length);
1747 } else if (endoff > size) {
1748 truncate_pagecache_range(inode, size, -1);
1749 if (ceph_inode_set_size(inode, endoff))
1750 ceph_check_caps(ceph_inode(inode),
1751 CHECK_CAPS_AUTHONLY, NULL);
1752 }
1753
1754 if (!ret) {
1755 spin_lock(&ci->i_ceph_lock);
1756 ci->i_inline_version = CEPH_INLINE_NONE;
1757 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1758 &prealloc_cf);
1759 spin_unlock(&ci->i_ceph_lock);
1760 if (dirty)
1761 __mark_inode_dirty(inode, dirty);
1762 }
1763
1764 ceph_put_cap_refs(ci, got);
1765unlock:
1766 inode_unlock(inode);
1767 ceph_free_cap_flush(prealloc_cf);
1768 return ret;
1769}
1770
1771const struct file_operations ceph_file_fops = {
1772 .open = ceph_open,
1773 .release = ceph_release,
1774 .llseek = ceph_llseek,
1775 .read_iter = ceph_read_iter,
1776 .write_iter = ceph_write_iter,
1777 .mmap = ceph_mmap,
1778 .fsync = ceph_fsync,
1779 .lock = ceph_lock,
1780 .flock = ceph_flock,
1781 .splice_read = generic_file_splice_read,
1782 .splice_write = iter_file_splice_write,
1783 .unlocked_ioctl = ceph_ioctl,
1784 .compat_ioctl = ceph_ioctl,
1785 .fallocate = ceph_fallocate,
1786};
1787