Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/mount.h>
8#include <linux/namei.h>
9#include <linux/writeback.h>
10#include <linux/falloc.h>
11
12#include "super.h"
13#include "mds_client.h"
14#include "cache.h"
15
16/*
17 * Ceph file operations
18 *
19 * Implement basic open/close functionality, and implement
20 * read/write.
21 *
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
24 *
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
27 * ack from the OSD.
28 *
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
31 *
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
35 */
36
37/*
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
40 */
41static size_t dio_get_pagev_size(const struct iov_iter *it)
42{
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
45 size_t size;
46
47 size = iov->iov_len - it->iov_offset;
48 /*
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
51 */
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54 size += iov->iov_len;
55 }
56 dout("dio_get_pagevlen len = %zu\n", size);
57 return size;
58}
59
60/*
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
64 */
65static struct page **
66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
68{
69 struct iov_iter tmp_it = *it;
70 size_t align;
71 struct page **pages;
72 int ret = 0, idx, npages;
73
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 (PAGE_SIZE - 1);
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 if (!pages) {
79 pages = vmalloc(sizeof(*pages) * npages);
80 if (!pages)
81 return ERR_PTR(-ENOMEM);
82 }
83
84 for (idx = 0; idx < npages; ) {
85 size_t start;
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
88 if (ret < 0)
89 goto fail;
90
91 iov_iter_advance(&tmp_it, ret);
92 nbytes -= ret;
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 }
95
96 BUG_ON(nbytes != 0);
97 *num_pages = npages;
98 *page_align = align;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 return pages;
101fail:
102 ceph_put_page_vector(pages, idx, false);
103 return ERR_PTR(ret);
104}
105
106/*
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
109 */
110static struct ceph_mds_request *
111prepare_open_request(struct super_block *sb, int flags, int create_mode)
112{
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
121
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 if (IS_ERR(req))
124 goto out;
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
128out:
129 return req;
130}
131
132/*
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
135 */
136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137{
138 struct ceph_file_info *cf;
139 int ret = 0;
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
143
144 switch (inode->i_mode & S_IFMT) {
145 case S_IFREG:
146 /* First file open request creates the cookie, we want to keep
147 * this cookie around for the filetime of the inode as not to
148 * have to worry about fscache register / revoke / operation
149 * races.
150 *
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
153 */
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
157 case S_IFDIR:
158 dout("init_file %p %p 0%o (regular)\n", inode, file,
159 inode->i_mode);
160 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
161 if (cf == NULL) {
162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
163 return -ENOMEM;
164 }
165 cf->fmode = fmode;
166 cf->next_offset = 2;
167 cf->readdir_cache_idx = -1;
168 file->private_data = cf;
169 BUG_ON(inode->i_fop->release != ceph_release);
170 break;
171
172 case S_IFLNK:
173 dout("init_file %p %p 0%o (symlink)\n", inode, file,
174 inode->i_mode);
175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
176 break;
177
178 default:
179 dout("init_file %p %p 0%o (special)\n", inode, file,
180 inode->i_mode);
181 /*
182 * we need to drop the open ref now, since we don't
183 * have .release set to ceph_release.
184 */
185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
186 BUG_ON(inode->i_fop->release == ceph_release);
187
188 /* call the proper open fop */
189 ret = inode->i_fop->open(inode, file);
190 }
191 return ret;
192}
193
194/*
195 * If we already have the requisite capabilities, we can satisfy
196 * the open request locally (no need to request new caps from the
197 * MDS). We do, however, need to inform the MDS (asynchronously)
198 * if our wanted caps set expands.
199 */
200int ceph_open(struct inode *inode, struct file *file)
201{
202 struct ceph_inode_info *ci = ceph_inode(inode);
203 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
204 struct ceph_mds_client *mdsc = fsc->mdsc;
205 struct ceph_mds_request *req;
206 struct ceph_file_info *cf = file->private_data;
207 int err;
208 int flags, fmode, wanted;
209
210 if (cf) {
211 dout("open file %p is already opened\n", file);
212 return 0;
213 }
214
215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
216 flags = file->f_flags & ~(O_CREAT|O_EXCL);
217 if (S_ISDIR(inode->i_mode))
218 flags = O_DIRECTORY; /* mds likes to know */
219
220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
221 ceph_vinop(inode), file, flags, file->f_flags);
222 fmode = ceph_flags_to_mode(flags);
223 wanted = ceph_caps_for_mode(fmode);
224
225 /* snapped files are read-only */
226 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
227 return -EROFS;
228
229 /* trivially open snapdir */
230 if (ceph_snap(inode) == CEPH_SNAPDIR) {
231 spin_lock(&ci->i_ceph_lock);
232 __ceph_get_fmode(ci, fmode);
233 spin_unlock(&ci->i_ceph_lock);
234 return ceph_init_file(inode, file, fmode);
235 }
236
237 /*
238 * No need to block if we have caps on the auth MDS (for
239 * write) or any MDS (for read). Update wanted set
240 * asynchronously.
241 */
242 spin_lock(&ci->i_ceph_lock);
243 if (__ceph_is_any_real_caps(ci) &&
244 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
245 int mds_wanted = __ceph_caps_mds_wanted(ci);
246 int issued = __ceph_caps_issued(ci, NULL);
247
248 dout("open %p fmode %d want %s issued %s using existing\n",
249 inode, fmode, ceph_cap_string(wanted),
250 ceph_cap_string(issued));
251 __ceph_get_fmode(ci, fmode);
252 spin_unlock(&ci->i_ceph_lock);
253
254 /* adjust wanted? */
255 if ((issued & wanted) != wanted &&
256 (mds_wanted & wanted) != wanted &&
257 ceph_snap(inode) != CEPH_SNAPDIR)
258 ceph_check_caps(ci, 0, NULL);
259
260 return ceph_init_file(inode, file, fmode);
261 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
262 (ci->i_snap_caps & wanted) == wanted) {
263 __ceph_get_fmode(ci, fmode);
264 spin_unlock(&ci->i_ceph_lock);
265 return ceph_init_file(inode, file, fmode);
266 }
267
268 spin_unlock(&ci->i_ceph_lock);
269
270 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
271 req = prepare_open_request(inode->i_sb, flags, 0);
272 if (IS_ERR(req)) {
273 err = PTR_ERR(req);
274 goto out;
275 }
276 req->r_inode = inode;
277 ihold(inode);
278
279 req->r_num_caps = 1;
280 err = ceph_mdsc_do_request(mdsc, NULL, req);
281 if (!err)
282 err = ceph_init_file(inode, file, req->r_fmode);
283 ceph_mdsc_put_request(req);
284 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
285out:
286 return err;
287}
288
289
290/*
291 * Do a lookup + open with a single request. If we get a non-existent
292 * file or symlink, return 1 so the VFS can retry.
293 */
294int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
295 struct file *file, unsigned flags, umode_t mode,
296 int *opened)
297{
298 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
299 struct ceph_mds_client *mdsc = fsc->mdsc;
300 struct ceph_mds_request *req;
301 struct dentry *dn;
302 struct ceph_acls_info acls = {};
303 int mask;
304 int err;
305
306 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
307 dir, dentry, dentry,
308 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
309
310 if (dentry->d_name.len > NAME_MAX)
311 return -ENAMETOOLONG;
312
313 err = ceph_init_dentry(dentry);
314 if (err < 0)
315 return err;
316
317 if (flags & O_CREAT) {
318 err = ceph_pre_init_acls(dir, &mode, &acls);
319 if (err < 0)
320 return err;
321 }
322
323 /* do the open */
324 req = prepare_open_request(dir->i_sb, flags, mode);
325 if (IS_ERR(req)) {
326 err = PTR_ERR(req);
327 goto out_acl;
328 }
329 req->r_dentry = dget(dentry);
330 req->r_num_caps = 2;
331 if (flags & O_CREAT) {
332 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
333 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
334 if (acls.pagelist) {
335 req->r_pagelist = acls.pagelist;
336 acls.pagelist = NULL;
337 }
338 }
339
340 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
341 if (ceph_security_xattr_wanted(dir))
342 mask |= CEPH_CAP_XATTR_SHARED;
343 req->r_args.open.mask = cpu_to_le32(mask);
344
345 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
346 err = ceph_mdsc_do_request(mdsc,
347 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
348 req);
349 err = ceph_handle_snapdir(req, dentry, err);
350 if (err)
351 goto out_req;
352
353 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
354 err = ceph_handle_notrace_create(dir, dentry);
355
356 if (d_unhashed(dentry)) {
357 dn = ceph_finish_lookup(req, dentry, err);
358 if (IS_ERR(dn))
359 err = PTR_ERR(dn);
360 } else {
361 /* we were given a hashed negative dentry */
362 dn = NULL;
363 }
364 if (err)
365 goto out_req;
366 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
367 /* make vfs retry on splice, ENOENT, or symlink */
368 dout("atomic_open finish_no_open on dn %p\n", dn);
369 err = finish_no_open(file, dn);
370 } else {
371 dout("atomic_open finish_open on dn %p\n", dn);
372 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
373 ceph_init_inode_acls(d_inode(dentry), &acls);
374 *opened |= FILE_CREATED;
375 }
376 err = finish_open(file, dentry, ceph_open, opened);
377 }
378out_req:
379 if (!req->r_err && req->r_target_inode)
380 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
381 ceph_mdsc_put_request(req);
382out_acl:
383 ceph_release_acls_info(&acls);
384 dout("atomic_open result=%d\n", err);
385 return err;
386}
387
388int ceph_release(struct inode *inode, struct file *file)
389{
390 struct ceph_inode_info *ci = ceph_inode(inode);
391 struct ceph_file_info *cf = file->private_data;
392
393 dout("release inode %p file %p\n", inode, file);
394 ceph_put_fmode(ci, cf->fmode);
395 if (cf->last_readdir)
396 ceph_mdsc_put_request(cf->last_readdir);
397 kfree(cf->last_name);
398 kfree(cf->dir_info);
399 kmem_cache_free(ceph_file_cachep, cf);
400
401 /* wake up anyone waiting for caps on this inode */
402 wake_up_all(&ci->i_cap_wq);
403 return 0;
404}
405
406enum {
407 HAVE_RETRIED = 1,
408 CHECK_EOF = 2,
409 READ_INLINE = 3,
410};
411
412/*
413 * Read a range of bytes striped over one or more objects. Iterate over
414 * objects we stripe over. (That's not atomic, but good enough for now.)
415 *
416 * If we get a short result from the OSD, check against i_size; we need to
417 * only return a short read to the caller if we hit EOF.
418 */
419static int striped_read(struct inode *inode,
420 u64 off, u64 len,
421 struct page **pages, int num_pages,
422 int *checkeof)
423{
424 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
425 struct ceph_inode_info *ci = ceph_inode(inode);
426 u64 pos, this_len, left;
427 loff_t i_size;
428 int page_align, pages_left;
429 int read, ret;
430 struct page **page_pos;
431 bool hit_stripe, was_short;
432
433 /*
434 * we may need to do multiple reads. not atomic, unfortunately.
435 */
436 pos = off;
437 left = len;
438 page_pos = pages;
439 pages_left = num_pages;
440 read = 0;
441
442more:
443 page_align = pos & ~PAGE_MASK;
444 this_len = left;
445 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
446 &ci->i_layout, pos, &this_len,
447 ci->i_truncate_seq,
448 ci->i_truncate_size,
449 page_pos, pages_left, page_align);
450 if (ret == -ENOENT)
451 ret = 0;
452 hit_stripe = this_len < left;
453 was_short = ret >= 0 && ret < this_len;
454 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
455 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
456
457 i_size = i_size_read(inode);
458 if (ret >= 0) {
459 int didpages;
460 if (was_short && (pos + ret < i_size)) {
461 int zlen = min(this_len - ret, i_size - pos - ret);
462 int zoff = (off & ~PAGE_MASK) + read + ret;
463 dout(" zero gap %llu to %llu\n",
464 pos + ret, pos + ret + zlen);
465 ceph_zero_page_vector_range(zoff, zlen, pages);
466 ret += zlen;
467 }
468
469 didpages = (page_align + ret) >> PAGE_SHIFT;
470 pos += ret;
471 read = pos - off;
472 left -= ret;
473 page_pos += didpages;
474 pages_left -= didpages;
475
476 /* hit stripe and need continue*/
477 if (left && hit_stripe && pos < i_size)
478 goto more;
479 }
480
481 if (read > 0) {
482 ret = read;
483 /* did we bounce off eof? */
484 if (pos + left > i_size)
485 *checkeof = CHECK_EOF;
486 }
487
488 dout("striped_read returns %d\n", ret);
489 return ret;
490}
491
492/*
493 * Completely synchronous read and write methods. Direct from __user
494 * buffer to osd, or directly to user pages (if O_DIRECT).
495 *
496 * If the read spans object boundary, just do multiple reads.
497 */
498static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
499 int *checkeof)
500{
501 struct file *file = iocb->ki_filp;
502 struct inode *inode = file_inode(file);
503 struct page **pages;
504 u64 off = iocb->ki_pos;
505 int num_pages, ret;
506 size_t len = iov_iter_count(i);
507
508 dout("sync_read on file %p %llu~%u %s\n", file, off,
509 (unsigned)len,
510 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
511
512 if (!len)
513 return 0;
514 /*
515 * flush any page cache pages in this range. this
516 * will make concurrent normal and sync io slow,
517 * but it will at least behave sensibly when they are
518 * in sequence.
519 */
520 ret = filemap_write_and_wait_range(inode->i_mapping, off,
521 off + len);
522 if (ret < 0)
523 return ret;
524
525 num_pages = calc_pages_for(off, len);
526 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
527 if (IS_ERR(pages))
528 return PTR_ERR(pages);
529 ret = striped_read(inode, off, len, pages,
530 num_pages, checkeof);
531 if (ret > 0) {
532 int l, k = 0;
533 size_t left = ret;
534
535 while (left) {
536 size_t page_off = off & ~PAGE_MASK;
537 size_t copy = min_t(size_t, left,
538 PAGE_SIZE - page_off);
539 l = copy_page_to_iter(pages[k++], page_off, copy, i);
540 off += l;
541 left -= l;
542 if (l < copy)
543 break;
544 }
545 }
546 ceph_release_page_vector(pages, num_pages);
547
548 if (off > iocb->ki_pos) {
549 ret = off - iocb->ki_pos;
550 iocb->ki_pos = off;
551 }
552
553 dout("sync_read result %d\n", ret);
554 return ret;
555}
556
557struct ceph_aio_request {
558 struct kiocb *iocb;
559 size_t total_len;
560 int write;
561 int error;
562 struct list_head osd_reqs;
563 unsigned num_reqs;
564 atomic_t pending_reqs;
565 struct timespec mtime;
566 struct ceph_cap_flush *prealloc_cf;
567};
568
569struct ceph_aio_work {
570 struct work_struct work;
571 struct ceph_osd_request *req;
572};
573
574static void ceph_aio_retry_work(struct work_struct *work);
575
576static void ceph_aio_complete(struct inode *inode,
577 struct ceph_aio_request *aio_req)
578{
579 struct ceph_inode_info *ci = ceph_inode(inode);
580 int ret;
581
582 if (!atomic_dec_and_test(&aio_req->pending_reqs))
583 return;
584
585 ret = aio_req->error;
586 if (!ret)
587 ret = aio_req->total_len;
588
589 dout("ceph_aio_complete %p rc %d\n", inode, ret);
590
591 if (ret >= 0 && aio_req->write) {
592 int dirty;
593
594 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
595 if (endoff > i_size_read(inode)) {
596 if (ceph_inode_set_size(inode, endoff))
597 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
598 }
599
600 spin_lock(&ci->i_ceph_lock);
601 ci->i_inline_version = CEPH_INLINE_NONE;
602 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
603 &aio_req->prealloc_cf);
604 spin_unlock(&ci->i_ceph_lock);
605 if (dirty)
606 __mark_inode_dirty(inode, dirty);
607
608 }
609
610 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
611 CEPH_CAP_FILE_RD));
612
613 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
614
615 ceph_free_cap_flush(aio_req->prealloc_cf);
616 kfree(aio_req);
617}
618
619static void ceph_aio_complete_req(struct ceph_osd_request *req,
620 struct ceph_msg *msg)
621{
622 int rc = req->r_result;
623 struct inode *inode = req->r_inode;
624 struct ceph_aio_request *aio_req = req->r_priv;
625 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
626 int num_pages = calc_pages_for((u64)osd_data->alignment,
627 osd_data->length);
628
629 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
630 inode, rc, osd_data->length);
631
632 if (rc == -EOLDSNAPC) {
633 struct ceph_aio_work *aio_work;
634 BUG_ON(!aio_req->write);
635
636 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
637 if (aio_work) {
638 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
639 aio_work->req = req;
640 queue_work(ceph_inode_to_client(inode)->wb_wq,
641 &aio_work->work);
642 return;
643 }
644 rc = -ENOMEM;
645 } else if (!aio_req->write) {
646 if (rc == -ENOENT)
647 rc = 0;
648 if (rc >= 0 && osd_data->length > rc) {
649 int zoff = osd_data->alignment + rc;
650 int zlen = osd_data->length - rc;
651 /*
652 * If read is satisfied by single OSD request,
653 * it can pass EOF. Otherwise read is within
654 * i_size.
655 */
656 if (aio_req->num_reqs == 1) {
657 loff_t i_size = i_size_read(inode);
658 loff_t endoff = aio_req->iocb->ki_pos + rc;
659 if (endoff < i_size)
660 zlen = min_t(size_t, zlen,
661 i_size - endoff);
662 aio_req->total_len = rc + zlen;
663 }
664
665 if (zlen > 0)
666 ceph_zero_page_vector_range(zoff, zlen,
667 osd_data->pages);
668 }
669 }
670
671 ceph_put_page_vector(osd_data->pages, num_pages, false);
672 ceph_osdc_put_request(req);
673
674 if (rc < 0)
675 cmpxchg(&aio_req->error, 0, rc);
676
677 ceph_aio_complete(inode, aio_req);
678 return;
679}
680
681static void ceph_aio_retry_work(struct work_struct *work)
682{
683 struct ceph_aio_work *aio_work =
684 container_of(work, struct ceph_aio_work, work);
685 struct ceph_osd_request *orig_req = aio_work->req;
686 struct ceph_aio_request *aio_req = orig_req->r_priv;
687 struct inode *inode = orig_req->r_inode;
688 struct ceph_inode_info *ci = ceph_inode(inode);
689 struct ceph_snap_context *snapc;
690 struct ceph_osd_request *req;
691 int ret;
692
693 spin_lock(&ci->i_ceph_lock);
694 if (__ceph_have_pending_cap_snap(ci)) {
695 struct ceph_cap_snap *capsnap =
696 list_last_entry(&ci->i_cap_snaps,
697 struct ceph_cap_snap,
698 ci_item);
699 snapc = ceph_get_snap_context(capsnap->context);
700 } else {
701 BUG_ON(!ci->i_head_snapc);
702 snapc = ceph_get_snap_context(ci->i_head_snapc);
703 }
704 spin_unlock(&ci->i_ceph_lock);
705
706 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
707 false, GFP_NOFS);
708 if (!req) {
709 ret = -ENOMEM;
710 req = orig_req;
711 goto out;
712 }
713
714 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
715 CEPH_OSD_FLAG_ONDISK |
716 CEPH_OSD_FLAG_WRITE;
717 req->r_base_oloc = orig_req->r_base_oloc;
718 req->r_base_oid = orig_req->r_base_oid;
719
720 req->r_ops[0] = orig_req->r_ops[0];
721 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
722
723 ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
724 snapc, CEPH_NOSNAP, &aio_req->mtime);
725
726 ceph_osdc_put_request(orig_req);
727
728 req->r_callback = ceph_aio_complete_req;
729 req->r_inode = inode;
730 req->r_priv = aio_req;
731
732 ret = ceph_osdc_start_request(req->r_osdc, req, false);
733out:
734 if (ret < 0) {
735 req->r_result = ret;
736 ceph_aio_complete_req(req, NULL);
737 }
738
739 ceph_put_snap_context(snapc);
740 kfree(aio_work);
741}
742
743/*
744 * Write commit request unsafe callback, called to tell us when a
745 * request is unsafe (that is, in flight--has been handed to the
746 * messenger to send to its target osd). It is called again when
747 * we've received a response message indicating the request is
748 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
749 * is completed early (and unsuccessfully) due to a timeout or
750 * interrupt.
751 *
752 * This is used if we requested both an ACK and ONDISK commit reply
753 * from the OSD.
754 */
755static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
756{
757 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
758
759 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
760 unsafe ? "un" : "");
761 if (unsafe) {
762 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
763 spin_lock(&ci->i_unsafe_lock);
764 list_add_tail(&req->r_unsafe_item,
765 &ci->i_unsafe_writes);
766 spin_unlock(&ci->i_unsafe_lock);
767 } else {
768 spin_lock(&ci->i_unsafe_lock);
769 list_del_init(&req->r_unsafe_item);
770 spin_unlock(&ci->i_unsafe_lock);
771 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
772 }
773}
774
775
776static ssize_t
777ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
778 struct ceph_snap_context *snapc,
779 struct ceph_cap_flush **pcf)
780{
781 struct file *file = iocb->ki_filp;
782 struct inode *inode = file_inode(file);
783 struct ceph_inode_info *ci = ceph_inode(inode);
784 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
785 struct ceph_vino vino;
786 struct ceph_osd_request *req;
787 struct page **pages;
788 struct ceph_aio_request *aio_req = NULL;
789 int num_pages = 0;
790 int flags;
791 int ret;
792 struct timespec mtime = current_fs_time(inode->i_sb);
793 size_t count = iov_iter_count(iter);
794 loff_t pos = iocb->ki_pos;
795 bool write = iov_iter_rw(iter) == WRITE;
796
797 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
798 return -EROFS;
799
800 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
801 (write ? "write" : "read"), file, pos, (unsigned)count);
802
803 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
804 if (ret < 0)
805 return ret;
806
807 if (write) {
808 ret = invalidate_inode_pages2_range(inode->i_mapping,
809 pos >> PAGE_SHIFT,
810 (pos + count) >> PAGE_SHIFT);
811 if (ret < 0)
812 dout("invalidate_inode_pages2_range returned %d\n", ret);
813
814 flags = CEPH_OSD_FLAG_ORDERSNAP |
815 CEPH_OSD_FLAG_ONDISK |
816 CEPH_OSD_FLAG_WRITE;
817 } else {
818 flags = CEPH_OSD_FLAG_READ;
819 }
820
821 while (iov_iter_count(iter) > 0) {
822 u64 size = dio_get_pagev_size(iter);
823 size_t start = 0;
824 ssize_t len;
825
826 vino = ceph_vino(inode);
827 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
828 vino, pos, &size, 0,
829 /*include a 'startsync' command*/
830 write ? 2 : 1,
831 write ? CEPH_OSD_OP_WRITE :
832 CEPH_OSD_OP_READ,
833 flags, snapc,
834 ci->i_truncate_seq,
835 ci->i_truncate_size,
836 false);
837 if (IS_ERR(req)) {
838 ret = PTR_ERR(req);
839 break;
840 }
841
842 len = size;
843 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
844 if (IS_ERR(pages)) {
845 ceph_osdc_put_request(req);
846 ret = PTR_ERR(pages);
847 break;
848 }
849
850 /*
851 * To simplify error handling, allow AIO when IO within i_size
852 * or IO can be satisfied by single OSD request.
853 */
854 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
855 (len == count || pos + count <= i_size_read(inode))) {
856 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
857 if (aio_req) {
858 aio_req->iocb = iocb;
859 aio_req->write = write;
860 INIT_LIST_HEAD(&aio_req->osd_reqs);
861 if (write) {
862 aio_req->mtime = mtime;
863 swap(aio_req->prealloc_cf, *pcf);
864 }
865 }
866 /* ignore error */
867 }
868
869 if (write) {
870 /*
871 * throw out any page cache pages in this range. this
872 * may block.
873 */
874 truncate_inode_pages_range(inode->i_mapping, pos,
875 (pos+len) | (PAGE_SIZE - 1));
876
877 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
878 }
879
880
881 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
882 false, false);
883
884 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
885
886 if (aio_req) {
887 aio_req->total_len += len;
888 aio_req->num_reqs++;
889 atomic_inc(&aio_req->pending_reqs);
890
891 req->r_callback = ceph_aio_complete_req;
892 req->r_inode = inode;
893 req->r_priv = aio_req;
894 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
895
896 pos += len;
897 iov_iter_advance(iter, len);
898 continue;
899 }
900
901 ret = ceph_osdc_start_request(req->r_osdc, req, false);
902 if (!ret)
903 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
904
905 size = i_size_read(inode);
906 if (!write) {
907 if (ret == -ENOENT)
908 ret = 0;
909 if (ret >= 0 && ret < len && pos + ret < size) {
910 int zlen = min_t(size_t, len - ret,
911 size - pos - ret);
912 ceph_zero_page_vector_range(start + ret, zlen,
913 pages);
914 ret += zlen;
915 }
916 if (ret >= 0)
917 len = ret;
918 }
919
920 ceph_put_page_vector(pages, num_pages, false);
921
922 ceph_osdc_put_request(req);
923 if (ret < 0)
924 break;
925
926 pos += len;
927 iov_iter_advance(iter, len);
928
929 if (!write && pos >= size)
930 break;
931
932 if (write && pos > size) {
933 if (ceph_inode_set_size(inode, pos))
934 ceph_check_caps(ceph_inode(inode),
935 CHECK_CAPS_AUTHONLY,
936 NULL);
937 }
938 }
939
940 if (aio_req) {
941 if (aio_req->num_reqs == 0) {
942 kfree(aio_req);
943 return ret;
944 }
945
946 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
947 CEPH_CAP_FILE_RD);
948
949 while (!list_empty(&aio_req->osd_reqs)) {
950 req = list_first_entry(&aio_req->osd_reqs,
951 struct ceph_osd_request,
952 r_unsafe_item);
953 list_del_init(&req->r_unsafe_item);
954 if (ret >= 0)
955 ret = ceph_osdc_start_request(req->r_osdc,
956 req, false);
957 if (ret < 0) {
958 req->r_result = ret;
959 ceph_aio_complete_req(req, NULL);
960 }
961 }
962 return -EIOCBQUEUED;
963 }
964
965 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
966 ret = pos - iocb->ki_pos;
967 iocb->ki_pos = pos;
968 }
969 return ret;
970}
971
972/*
973 * Synchronous write, straight from __user pointer or user pages.
974 *
975 * If write spans object boundary, just do multiple writes. (For a
976 * correct atomic write, we should e.g. take write locks on all
977 * objects, rollback on failure, etc.)
978 */
979static ssize_t
980ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
981 struct ceph_snap_context *snapc)
982{
983 struct file *file = iocb->ki_filp;
984 struct inode *inode = file_inode(file);
985 struct ceph_inode_info *ci = ceph_inode(inode);
986 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
987 struct ceph_vino vino;
988 struct ceph_osd_request *req;
989 struct page **pages;
990 u64 len;
991 int num_pages;
992 int written = 0;
993 int flags;
994 int check_caps = 0;
995 int ret;
996 struct timespec mtime = current_fs_time(inode->i_sb);
997 size_t count = iov_iter_count(from);
998
999 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1000 return -EROFS;
1001
1002 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1003
1004 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1005 if (ret < 0)
1006 return ret;
1007
1008 ret = invalidate_inode_pages2_range(inode->i_mapping,
1009 pos >> PAGE_SHIFT,
1010 (pos + count) >> PAGE_SHIFT);
1011 if (ret < 0)
1012 dout("invalidate_inode_pages2_range returned %d\n", ret);
1013
1014 flags = CEPH_OSD_FLAG_ORDERSNAP |
1015 CEPH_OSD_FLAG_ONDISK |
1016 CEPH_OSD_FLAG_WRITE |
1017 CEPH_OSD_FLAG_ACK;
1018
1019 while ((len = iov_iter_count(from)) > 0) {
1020 size_t left;
1021 int n;
1022
1023 vino = ceph_vino(inode);
1024 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1025 vino, pos, &len, 0, 1,
1026 CEPH_OSD_OP_WRITE, flags, snapc,
1027 ci->i_truncate_seq,
1028 ci->i_truncate_size,
1029 false);
1030 if (IS_ERR(req)) {
1031 ret = PTR_ERR(req);
1032 break;
1033 }
1034
1035 /*
1036 * write from beginning of first page,
1037 * regardless of io alignment
1038 */
1039 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1040
1041 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1042 if (IS_ERR(pages)) {
1043 ret = PTR_ERR(pages);
1044 goto out;
1045 }
1046
1047 left = len;
1048 for (n = 0; n < num_pages; n++) {
1049 size_t plen = min_t(size_t, left, PAGE_SIZE);
1050 ret = copy_page_from_iter(pages[n], 0, plen, from);
1051 if (ret != plen) {
1052 ret = -EFAULT;
1053 break;
1054 }
1055 left -= ret;
1056 }
1057
1058 if (ret < 0) {
1059 ceph_release_page_vector(pages, num_pages);
1060 goto out;
1061 }
1062
1063 /* get a second commit callback */
1064 req->r_unsafe_callback = ceph_sync_write_unsafe;
1065 req->r_inode = inode;
1066
1067 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1068 false, true);
1069
1070 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
1071 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
1072
1073 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1074 if (!ret)
1075 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1076
1077out:
1078 ceph_osdc_put_request(req);
1079 if (ret == 0) {
1080 pos += len;
1081 written += len;
1082
1083 if (pos > i_size_read(inode)) {
1084 check_caps = ceph_inode_set_size(inode, pos);
1085 if (check_caps)
1086 ceph_check_caps(ceph_inode(inode),
1087 CHECK_CAPS_AUTHONLY,
1088 NULL);
1089 }
1090 } else
1091 break;
1092 }
1093
1094 if (ret != -EOLDSNAPC && written > 0) {
1095 ret = written;
1096 iocb->ki_pos = pos;
1097 }
1098 return ret;
1099}
1100
1101/*
1102 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1103 * Atomically grab references, so that those bits are not released
1104 * back to the MDS mid-read.
1105 *
1106 * Hmm, the sync read case isn't actually async... should it be?
1107 */
1108static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1109{
1110 struct file *filp = iocb->ki_filp;
1111 struct ceph_file_info *fi = filp->private_data;
1112 size_t len = iov_iter_count(to);
1113 struct inode *inode = file_inode(filp);
1114 struct ceph_inode_info *ci = ceph_inode(inode);
1115 struct page *pinned_page = NULL;
1116 ssize_t ret;
1117 int want, got = 0;
1118 int retry_op = 0, read = 0;
1119
1120again:
1121 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1122 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1123
1124 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1125 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1126 else
1127 want = CEPH_CAP_FILE_CACHE;
1128 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1129 if (ret < 0)
1130 return ret;
1131
1132 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1133 (iocb->ki_flags & IOCB_DIRECT) ||
1134 (fi->flags & CEPH_F_SYNC)) {
1135
1136 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1137 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1138 ceph_cap_string(got));
1139
1140 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1141 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1142 ret = ceph_direct_read_write(iocb, to,
1143 NULL, NULL);
1144 if (ret >= 0 && ret < len)
1145 retry_op = CHECK_EOF;
1146 } else {
1147 ret = ceph_sync_read(iocb, to, &retry_op);
1148 }
1149 } else {
1150 retry_op = READ_INLINE;
1151 }
1152 } else {
1153 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1154 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1155 ceph_cap_string(got));
1156
1157 ret = generic_file_read_iter(iocb, to);
1158 }
1159 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1160 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1161 if (pinned_page) {
1162 put_page(pinned_page);
1163 pinned_page = NULL;
1164 }
1165 ceph_put_cap_refs(ci, got);
1166 if (retry_op > HAVE_RETRIED && ret >= 0) {
1167 int statret;
1168 struct page *page = NULL;
1169 loff_t i_size;
1170 if (retry_op == READ_INLINE) {
1171 page = __page_cache_alloc(GFP_KERNEL);
1172 if (!page)
1173 return -ENOMEM;
1174 }
1175
1176 statret = __ceph_do_getattr(inode, page,
1177 CEPH_STAT_CAP_INLINE_DATA, !!page);
1178 if (statret < 0) {
1179 __free_page(page);
1180 if (statret == -ENODATA) {
1181 BUG_ON(retry_op != READ_INLINE);
1182 goto again;
1183 }
1184 return statret;
1185 }
1186
1187 i_size = i_size_read(inode);
1188 if (retry_op == READ_INLINE) {
1189 BUG_ON(ret > 0 || read > 0);
1190 if (iocb->ki_pos < i_size &&
1191 iocb->ki_pos < PAGE_SIZE) {
1192 loff_t end = min_t(loff_t, i_size,
1193 iocb->ki_pos + len);
1194 end = min_t(loff_t, end, PAGE_SIZE);
1195 if (statret < end)
1196 zero_user_segment(page, statret, end);
1197 ret = copy_page_to_iter(page,
1198 iocb->ki_pos & ~PAGE_MASK,
1199 end - iocb->ki_pos, to);
1200 iocb->ki_pos += ret;
1201 read += ret;
1202 }
1203 if (iocb->ki_pos < i_size && read < len) {
1204 size_t zlen = min_t(size_t, len - read,
1205 i_size - iocb->ki_pos);
1206 ret = iov_iter_zero(zlen, to);
1207 iocb->ki_pos += ret;
1208 read += ret;
1209 }
1210 __free_pages(page, 0);
1211 return read;
1212 }
1213
1214 /* hit EOF or hole? */
1215 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1216 ret < len) {
1217 dout("sync_read hit hole, ppos %lld < size %lld"
1218 ", reading more\n", iocb->ki_pos, i_size);
1219
1220 read += ret;
1221 len -= ret;
1222 retry_op = HAVE_RETRIED;
1223 goto again;
1224 }
1225 }
1226
1227 if (ret >= 0)
1228 ret += read;
1229
1230 return ret;
1231}
1232
1233/*
1234 * Take cap references to avoid releasing caps to MDS mid-write.
1235 *
1236 * If we are synchronous, and write with an old snap context, the OSD
1237 * may return EOLDSNAPC. In that case, retry the write.. _after_
1238 * dropping our cap refs and allowing the pending snap to logically
1239 * complete _before_ this write occurs.
1240 *
1241 * If we are near ENOSPC, write synchronously.
1242 */
1243static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1244{
1245 struct file *file = iocb->ki_filp;
1246 struct ceph_file_info *fi = file->private_data;
1247 struct inode *inode = file_inode(file);
1248 struct ceph_inode_info *ci = ceph_inode(inode);
1249 struct ceph_osd_client *osdc =
1250 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1251 struct ceph_cap_flush *prealloc_cf;
1252 ssize_t count, written = 0;
1253 int err, want, got;
1254 loff_t pos;
1255
1256 if (ceph_snap(inode) != CEPH_NOSNAP)
1257 return -EROFS;
1258
1259 prealloc_cf = ceph_alloc_cap_flush();
1260 if (!prealloc_cf)
1261 return -ENOMEM;
1262
1263 inode_lock(inode);
1264
1265 /* We can write back this queue in page reclaim */
1266 current->backing_dev_info = inode_to_bdi(inode);
1267
1268 if (iocb->ki_flags & IOCB_APPEND) {
1269 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1270 if (err < 0)
1271 goto out;
1272 }
1273
1274 err = generic_write_checks(iocb, from);
1275 if (err <= 0)
1276 goto out;
1277
1278 pos = iocb->ki_pos;
1279 count = iov_iter_count(from);
1280 err = file_remove_privs(file);
1281 if (err)
1282 goto out;
1283
1284 err = file_update_time(file);
1285 if (err)
1286 goto out;
1287
1288 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1289 err = ceph_uninline_data(file, NULL);
1290 if (err < 0)
1291 goto out;
1292 }
1293
1294retry_snap:
1295 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
1296 err = -ENOSPC;
1297 goto out;
1298 }
1299
1300 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1301 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1302 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1303 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1304 else
1305 want = CEPH_CAP_FILE_BUFFER;
1306 got = 0;
1307 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1308 &got, NULL);
1309 if (err < 0)
1310 goto out;
1311
1312 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1313 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1314
1315 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1316 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1317 struct ceph_snap_context *snapc;
1318 struct iov_iter data;
1319 inode_unlock(inode);
1320
1321 spin_lock(&ci->i_ceph_lock);
1322 if (__ceph_have_pending_cap_snap(ci)) {
1323 struct ceph_cap_snap *capsnap =
1324 list_last_entry(&ci->i_cap_snaps,
1325 struct ceph_cap_snap,
1326 ci_item);
1327 snapc = ceph_get_snap_context(capsnap->context);
1328 } else {
1329 BUG_ON(!ci->i_head_snapc);
1330 snapc = ceph_get_snap_context(ci->i_head_snapc);
1331 }
1332 spin_unlock(&ci->i_ceph_lock);
1333
1334 /* we might need to revert back to that point */
1335 data = *from;
1336 if (iocb->ki_flags & IOCB_DIRECT)
1337 written = ceph_direct_read_write(iocb, &data, snapc,
1338 &prealloc_cf);
1339 else
1340 written = ceph_sync_write(iocb, &data, pos, snapc);
1341 if (written == -EOLDSNAPC) {
1342 dout("aio_write %p %llx.%llx %llu~%u"
1343 "got EOLDSNAPC, retrying\n",
1344 inode, ceph_vinop(inode),
1345 pos, (unsigned)count);
1346 inode_lock(inode);
1347 goto retry_snap;
1348 }
1349 if (written > 0)
1350 iov_iter_advance(from, written);
1351 ceph_put_snap_context(snapc);
1352 } else {
1353 loff_t old_size = i_size_read(inode);
1354 /*
1355 * No need to acquire the i_truncate_mutex. Because
1356 * the MDS revokes Fwb caps before sending truncate
1357 * message to us. We can't get Fwb cap while there
1358 * are pending vmtruncate. So write and vmtruncate
1359 * can not run at the same time
1360 */
1361 written = generic_perform_write(file, from, pos);
1362 if (likely(written >= 0))
1363 iocb->ki_pos = pos + written;
1364 if (i_size_read(inode) > old_size)
1365 ceph_fscache_update_objectsize(inode);
1366 inode_unlock(inode);
1367 }
1368
1369 if (written >= 0) {
1370 int dirty;
1371 spin_lock(&ci->i_ceph_lock);
1372 ci->i_inline_version = CEPH_INLINE_NONE;
1373 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1374 &prealloc_cf);
1375 spin_unlock(&ci->i_ceph_lock);
1376 if (dirty)
1377 __mark_inode_dirty(inode, dirty);
1378 }
1379
1380 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1381 inode, ceph_vinop(inode), pos, (unsigned)count,
1382 ceph_cap_string(got));
1383 ceph_put_cap_refs(ci, got);
1384
1385 if (written >= 0 &&
1386 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1387 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
1388 err = vfs_fsync_range(file, pos, pos + written - 1, 1);
1389 if (err < 0)
1390 written = err;
1391 }
1392
1393 goto out_unlocked;
1394
1395out:
1396 inode_unlock(inode);
1397out_unlocked:
1398 ceph_free_cap_flush(prealloc_cf);
1399 current->backing_dev_info = NULL;
1400 return written ? written : err;
1401}
1402
1403/*
1404 * llseek. be sure to verify file size on SEEK_END.
1405 */
1406static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1407{
1408 struct inode *inode = file->f_mapping->host;
1409 loff_t i_size;
1410 int ret;
1411
1412 inode_lock(inode);
1413
1414 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1415 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1416 if (ret < 0) {
1417 offset = ret;
1418 goto out;
1419 }
1420 }
1421
1422 i_size = i_size_read(inode);
1423 switch (whence) {
1424 case SEEK_END:
1425 offset += i_size;
1426 break;
1427 case SEEK_CUR:
1428 /*
1429 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1430 * position-querying operation. Avoid rewriting the "same"
1431 * f_pos value back to the file because a concurrent read(),
1432 * write() or lseek() might have altered it
1433 */
1434 if (offset == 0) {
1435 offset = file->f_pos;
1436 goto out;
1437 }
1438 offset += file->f_pos;
1439 break;
1440 case SEEK_DATA:
1441 if (offset >= i_size) {
1442 ret = -ENXIO;
1443 goto out;
1444 }
1445 break;
1446 case SEEK_HOLE:
1447 if (offset >= i_size) {
1448 ret = -ENXIO;
1449 goto out;
1450 }
1451 offset = i_size;
1452 break;
1453 }
1454
1455 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1456
1457out:
1458 inode_unlock(inode);
1459 return offset;
1460}
1461
1462static inline void ceph_zero_partial_page(
1463 struct inode *inode, loff_t offset, unsigned size)
1464{
1465 struct page *page;
1466 pgoff_t index = offset >> PAGE_SHIFT;
1467
1468 page = find_lock_page(inode->i_mapping, index);
1469 if (page) {
1470 wait_on_page_writeback(page);
1471 zero_user(page, offset & (PAGE_SIZE - 1), size);
1472 unlock_page(page);
1473 put_page(page);
1474 }
1475}
1476
1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1478 loff_t length)
1479{
1480 loff_t nearly = round_up(offset, PAGE_SIZE);
1481 if (offset < nearly) {
1482 loff_t size = nearly - offset;
1483 if (length < size)
1484 size = length;
1485 ceph_zero_partial_page(inode, offset, size);
1486 offset += size;
1487 length -= size;
1488 }
1489 if (length >= PAGE_SIZE) {
1490 loff_t size = round_down(length, PAGE_SIZE);
1491 truncate_pagecache_range(inode, offset, offset + size - 1);
1492 offset += size;
1493 length -= size;
1494 }
1495 if (length)
1496 ceph_zero_partial_page(inode, offset, length);
1497}
1498
1499static int ceph_zero_partial_object(struct inode *inode,
1500 loff_t offset, loff_t *length)
1501{
1502 struct ceph_inode_info *ci = ceph_inode(inode);
1503 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1504 struct ceph_osd_request *req;
1505 int ret = 0;
1506 loff_t zero = 0;
1507 int op;
1508
1509 if (!length) {
1510 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1511 length = &zero;
1512 } else {
1513 op = CEPH_OSD_OP_ZERO;
1514 }
1515
1516 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1517 ceph_vino(inode),
1518 offset, length,
1519 0, 1, op,
1520 CEPH_OSD_FLAG_WRITE |
1521 CEPH_OSD_FLAG_ONDISK,
1522 NULL, 0, 0, false);
1523 if (IS_ERR(req)) {
1524 ret = PTR_ERR(req);
1525 goto out;
1526 }
1527
1528 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1529 &inode->i_mtime);
1530
1531 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1532 if (!ret) {
1533 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1534 if (ret == -ENOENT)
1535 ret = 0;
1536 }
1537 ceph_osdc_put_request(req);
1538
1539out:
1540 return ret;
1541}
1542
1543static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1544{
1545 int ret = 0;
1546 struct ceph_inode_info *ci = ceph_inode(inode);
1547 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1548 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1549 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1550 u64 object_set_size = object_size * stripe_count;
1551 u64 nearly, t;
1552
1553 /* round offset up to next period boundary */
1554 nearly = offset + object_set_size - 1;
1555 t = nearly;
1556 nearly -= do_div(t, object_set_size);
1557
1558 while (length && offset < nearly) {
1559 loff_t size = length;
1560 ret = ceph_zero_partial_object(inode, offset, &size);
1561 if (ret < 0)
1562 return ret;
1563 offset += size;
1564 length -= size;
1565 }
1566 while (length >= object_set_size) {
1567 int i;
1568 loff_t pos = offset;
1569 for (i = 0; i < stripe_count; ++i) {
1570 ret = ceph_zero_partial_object(inode, pos, NULL);
1571 if (ret < 0)
1572 return ret;
1573 pos += stripe_unit;
1574 }
1575 offset += object_set_size;
1576 length -= object_set_size;
1577 }
1578 while (length) {
1579 loff_t size = length;
1580 ret = ceph_zero_partial_object(inode, offset, &size);
1581 if (ret < 0)
1582 return ret;
1583 offset += size;
1584 length -= size;
1585 }
1586 return ret;
1587}
1588
1589static long ceph_fallocate(struct file *file, int mode,
1590 loff_t offset, loff_t length)
1591{
1592 struct ceph_file_info *fi = file->private_data;
1593 struct inode *inode = file_inode(file);
1594 struct ceph_inode_info *ci = ceph_inode(inode);
1595 struct ceph_osd_client *osdc =
1596 &ceph_inode_to_client(inode)->client->osdc;
1597 struct ceph_cap_flush *prealloc_cf;
1598 int want, got = 0;
1599 int dirty;
1600 int ret = 0;
1601 loff_t endoff = 0;
1602 loff_t size;
1603
1604 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1605 return -EOPNOTSUPP;
1606
1607 if (!S_ISREG(inode->i_mode))
1608 return -EOPNOTSUPP;
1609
1610 prealloc_cf = ceph_alloc_cap_flush();
1611 if (!prealloc_cf)
1612 return -ENOMEM;
1613
1614 inode_lock(inode);
1615
1616 if (ceph_snap(inode) != CEPH_NOSNAP) {
1617 ret = -EROFS;
1618 goto unlock;
1619 }
1620
1621 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1622 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1623 ret = -ENOSPC;
1624 goto unlock;
1625 }
1626
1627 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1628 ret = ceph_uninline_data(file, NULL);
1629 if (ret < 0)
1630 goto unlock;
1631 }
1632
1633 size = i_size_read(inode);
1634 if (!(mode & FALLOC_FL_KEEP_SIZE))
1635 endoff = offset + length;
1636
1637 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1638 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1639 else
1640 want = CEPH_CAP_FILE_BUFFER;
1641
1642 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1643 if (ret < 0)
1644 goto unlock;
1645
1646 if (mode & FALLOC_FL_PUNCH_HOLE) {
1647 if (offset < size)
1648 ceph_zero_pagecache_range(inode, offset, length);
1649 ret = ceph_zero_objects(inode, offset, length);
1650 } else if (endoff > size) {
1651 truncate_pagecache_range(inode, size, -1);
1652 if (ceph_inode_set_size(inode, endoff))
1653 ceph_check_caps(ceph_inode(inode),
1654 CHECK_CAPS_AUTHONLY, NULL);
1655 }
1656
1657 if (!ret) {
1658 spin_lock(&ci->i_ceph_lock);
1659 ci->i_inline_version = CEPH_INLINE_NONE;
1660 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1661 &prealloc_cf);
1662 spin_unlock(&ci->i_ceph_lock);
1663 if (dirty)
1664 __mark_inode_dirty(inode, dirty);
1665 }
1666
1667 ceph_put_cap_refs(ci, got);
1668unlock:
1669 inode_unlock(inode);
1670 ceph_free_cap_flush(prealloc_cf);
1671 return ret;
1672}
1673
1674const struct file_operations ceph_file_fops = {
1675 .open = ceph_open,
1676 .release = ceph_release,
1677 .llseek = ceph_llseek,
1678 .read_iter = ceph_read_iter,
1679 .write_iter = ceph_write_iter,
1680 .mmap = ceph_mmap,
1681 .fsync = ceph_fsync,
1682 .lock = ceph_lock,
1683 .flock = ceph_flock,
1684 .splice_read = generic_file_splice_read,
1685 .splice_write = iter_file_splice_write,
1686 .unlocked_ioctl = ceph_ioctl,
1687 .compat_ioctl = ceph_ioctl,
1688 .fallocate = ceph_fallocate,
1689};
1690
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/striper.h>
4
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/file.h>
9#include <linux/mount.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/falloc.h>
13#include <linux/iversion.h>
14
15#include "super.h"
16#include "mds_client.h"
17#include "cache.h"
18#include "io.h"
19
20static __le32 ceph_flags_sys2wire(u32 flags)
21{
22 u32 wire_flags = 0;
23
24 switch (flags & O_ACCMODE) {
25 case O_RDONLY:
26 wire_flags |= CEPH_O_RDONLY;
27 break;
28 case O_WRONLY:
29 wire_flags |= CEPH_O_WRONLY;
30 break;
31 case O_RDWR:
32 wire_flags |= CEPH_O_RDWR;
33 break;
34 }
35
36 flags &= ~O_ACCMODE;
37
38#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
39
40 ceph_sys2wire(O_CREAT);
41 ceph_sys2wire(O_EXCL);
42 ceph_sys2wire(O_TRUNC);
43 ceph_sys2wire(O_DIRECTORY);
44 ceph_sys2wire(O_NOFOLLOW);
45
46#undef ceph_sys2wire
47
48 if (flags)
49 dout("unused open flags: %x\n", flags);
50
51 return cpu_to_le32(wire_flags);
52}
53
54/*
55 * Ceph file operations
56 *
57 * Implement basic open/close functionality, and implement
58 * read/write.
59 *
60 * We implement three modes of file I/O:
61 * - buffered uses the generic_file_aio_{read,write} helpers
62 *
63 * - synchronous is used when there is multi-client read/write
64 * sharing, avoids the page cache, and synchronously waits for an
65 * ack from the OSD.
66 *
67 * - direct io takes the variant of the sync path that references
68 * user pages directly.
69 *
70 * fsync() flushes and waits on dirty pages, but just queues metadata
71 * for writeback: since the MDS can recover size and mtime there is no
72 * need to wait for MDS acknowledgement.
73 */
74
75/*
76 * How many pages to get in one call to iov_iter_get_pages(). This
77 * determines the size of the on-stack array used as a buffer.
78 */
79#define ITER_GET_BVECS_PAGES 64
80
81static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
82 struct bio_vec *bvecs)
83{
84 size_t size = 0;
85 int bvec_idx = 0;
86
87 if (maxsize > iov_iter_count(iter))
88 maxsize = iov_iter_count(iter);
89
90 while (size < maxsize) {
91 struct page *pages[ITER_GET_BVECS_PAGES];
92 ssize_t bytes;
93 size_t start;
94 int idx = 0;
95
96 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
97 ITER_GET_BVECS_PAGES, &start);
98 if (bytes < 0)
99 return size ?: bytes;
100
101 iov_iter_advance(iter, bytes);
102 size += bytes;
103
104 for ( ; bytes; idx++, bvec_idx++) {
105 struct bio_vec bv = {
106 .bv_page = pages[idx],
107 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
108 .bv_offset = start,
109 };
110
111 bvecs[bvec_idx] = bv;
112 bytes -= bv.bv_len;
113 start = 0;
114 }
115 }
116
117 return size;
118}
119
120/*
121 * iov_iter_get_pages() only considers one iov_iter segment, no matter
122 * what maxsize or maxpages are given. For ITER_BVEC that is a single
123 * page.
124 *
125 * Attempt to get up to @maxsize bytes worth of pages from @iter.
126 * Return the number of bytes in the created bio_vec array, or an error.
127 */
128static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
129 struct bio_vec **bvecs, int *num_bvecs)
130{
131 struct bio_vec *bv;
132 size_t orig_count = iov_iter_count(iter);
133 ssize_t bytes;
134 int npages;
135
136 iov_iter_truncate(iter, maxsize);
137 npages = iov_iter_npages(iter, INT_MAX);
138 iov_iter_reexpand(iter, orig_count);
139
140 /*
141 * __iter_get_bvecs() may populate only part of the array -- zero it
142 * out.
143 */
144 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
145 if (!bv)
146 return -ENOMEM;
147
148 bytes = __iter_get_bvecs(iter, maxsize, bv);
149 if (bytes < 0) {
150 /*
151 * No pages were pinned -- just free the array.
152 */
153 kvfree(bv);
154 return bytes;
155 }
156
157 *bvecs = bv;
158 *num_bvecs = npages;
159 return bytes;
160}
161
162static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
163{
164 int i;
165
166 for (i = 0; i < num_bvecs; i++) {
167 if (bvecs[i].bv_page) {
168 if (should_dirty)
169 set_page_dirty_lock(bvecs[i].bv_page);
170 put_page(bvecs[i].bv_page);
171 }
172 }
173 kvfree(bvecs);
174}
175
176/*
177 * Prepare an open request. Preallocate ceph_cap to avoid an
178 * inopportune ENOMEM later.
179 */
180static struct ceph_mds_request *
181prepare_open_request(struct super_block *sb, int flags, int create_mode)
182{
183 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
184 struct ceph_mds_client *mdsc = fsc->mdsc;
185 struct ceph_mds_request *req;
186 int want_auth = USE_ANY_MDS;
187 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188
189 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190 want_auth = USE_AUTH_MDS;
191
192 req = ceph_mdsc_create_request(mdsc, op, want_auth);
193 if (IS_ERR(req))
194 goto out;
195 req->r_fmode = ceph_flags_to_mode(flags);
196 req->r_args.open.flags = ceph_flags_sys2wire(flags);
197 req->r_args.open.mode = cpu_to_le32(create_mode);
198out:
199 return req;
200}
201
202static int ceph_init_file_info(struct inode *inode, struct file *file,
203 int fmode, bool isdir)
204{
205 struct ceph_inode_info *ci = ceph_inode(inode);
206 struct ceph_file_info *fi;
207
208 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
209 inode->i_mode, isdir ? "dir" : "regular");
210 BUG_ON(inode->i_fop->release != ceph_release);
211
212 if (isdir) {
213 struct ceph_dir_file_info *dfi =
214 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
215 if (!dfi) {
216 ceph_put_fmode(ci, fmode); /* clean up */
217 return -ENOMEM;
218 }
219
220 file->private_data = dfi;
221 fi = &dfi->file_info;
222 dfi->next_offset = 2;
223 dfi->readdir_cache_idx = -1;
224 } else {
225 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
226 if (!fi) {
227 ceph_put_fmode(ci, fmode); /* clean up */
228 return -ENOMEM;
229 }
230
231 file->private_data = fi;
232 }
233
234 fi->fmode = fmode;
235 spin_lock_init(&fi->rw_contexts_lock);
236 INIT_LIST_HEAD(&fi->rw_contexts);
237 fi->meta_err = errseq_sample(&ci->i_meta_err);
238 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
239
240 return 0;
241}
242
243/*
244 * initialize private struct file data.
245 * if we fail, clean up by dropping fmode reference on the ceph_inode
246 */
247static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
248{
249 int ret = 0;
250
251 switch (inode->i_mode & S_IFMT) {
252 case S_IFREG:
253 ceph_fscache_register_inode_cookie(inode);
254 ceph_fscache_file_set_cookie(inode, file);
255 /* fall through */
256 case S_IFDIR:
257 ret = ceph_init_file_info(inode, file, fmode,
258 S_ISDIR(inode->i_mode));
259 if (ret)
260 return ret;
261 break;
262
263 case S_IFLNK:
264 dout("init_file %p %p 0%o (symlink)\n", inode, file,
265 inode->i_mode);
266 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
267 break;
268
269 default:
270 dout("init_file %p %p 0%o (special)\n", inode, file,
271 inode->i_mode);
272 /*
273 * we need to drop the open ref now, since we don't
274 * have .release set to ceph_release.
275 */
276 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
277 BUG_ON(inode->i_fop->release == ceph_release);
278
279 /* call the proper open fop */
280 ret = inode->i_fop->open(inode, file);
281 }
282 return ret;
283}
284
285/*
286 * try renew caps after session gets killed.
287 */
288int ceph_renew_caps(struct inode *inode)
289{
290 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
291 struct ceph_inode_info *ci = ceph_inode(inode);
292 struct ceph_mds_request *req;
293 int err, flags, wanted;
294
295 spin_lock(&ci->i_ceph_lock);
296 wanted = __ceph_caps_file_wanted(ci);
297 if (__ceph_is_any_real_caps(ci) &&
298 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
299 int issued = __ceph_caps_issued(ci, NULL);
300 spin_unlock(&ci->i_ceph_lock);
301 dout("renew caps %p want %s issued %s updating mds_wanted\n",
302 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
303 ceph_check_caps(ci, 0, NULL);
304 return 0;
305 }
306 spin_unlock(&ci->i_ceph_lock);
307
308 flags = 0;
309 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
310 flags = O_RDWR;
311 else if (wanted & CEPH_CAP_FILE_RD)
312 flags = O_RDONLY;
313 else if (wanted & CEPH_CAP_FILE_WR)
314 flags = O_WRONLY;
315#ifdef O_LAZY
316 if (wanted & CEPH_CAP_FILE_LAZYIO)
317 flags |= O_LAZY;
318#endif
319
320 req = prepare_open_request(inode->i_sb, flags, 0);
321 if (IS_ERR(req)) {
322 err = PTR_ERR(req);
323 goto out;
324 }
325
326 req->r_inode = inode;
327 ihold(inode);
328 req->r_num_caps = 1;
329 req->r_fmode = -1;
330
331 err = ceph_mdsc_do_request(mdsc, NULL, req);
332 ceph_mdsc_put_request(req);
333out:
334 dout("renew caps %p open result=%d\n", inode, err);
335 return err < 0 ? err : 0;
336}
337
338/*
339 * If we already have the requisite capabilities, we can satisfy
340 * the open request locally (no need to request new caps from the
341 * MDS). We do, however, need to inform the MDS (asynchronously)
342 * if our wanted caps set expands.
343 */
344int ceph_open(struct inode *inode, struct file *file)
345{
346 struct ceph_inode_info *ci = ceph_inode(inode);
347 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
348 struct ceph_mds_client *mdsc = fsc->mdsc;
349 struct ceph_mds_request *req;
350 struct ceph_file_info *fi = file->private_data;
351 int err;
352 int flags, fmode, wanted;
353
354 if (fi) {
355 dout("open file %p is already opened\n", file);
356 return 0;
357 }
358
359 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
360 flags = file->f_flags & ~(O_CREAT|O_EXCL);
361 if (S_ISDIR(inode->i_mode))
362 flags = O_DIRECTORY; /* mds likes to know */
363
364 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
365 ceph_vinop(inode), file, flags, file->f_flags);
366 fmode = ceph_flags_to_mode(flags);
367 wanted = ceph_caps_for_mode(fmode);
368
369 /* snapped files are read-only */
370 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
371 return -EROFS;
372
373 /* trivially open snapdir */
374 if (ceph_snap(inode) == CEPH_SNAPDIR) {
375 spin_lock(&ci->i_ceph_lock);
376 __ceph_get_fmode(ci, fmode);
377 spin_unlock(&ci->i_ceph_lock);
378 return ceph_init_file(inode, file, fmode);
379 }
380
381 /*
382 * No need to block if we have caps on the auth MDS (for
383 * write) or any MDS (for read). Update wanted set
384 * asynchronously.
385 */
386 spin_lock(&ci->i_ceph_lock);
387 if (__ceph_is_any_real_caps(ci) &&
388 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
389 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
390 int issued = __ceph_caps_issued(ci, NULL);
391
392 dout("open %p fmode %d want %s issued %s using existing\n",
393 inode, fmode, ceph_cap_string(wanted),
394 ceph_cap_string(issued));
395 __ceph_get_fmode(ci, fmode);
396 spin_unlock(&ci->i_ceph_lock);
397
398 /* adjust wanted? */
399 if ((issued & wanted) != wanted &&
400 (mds_wanted & wanted) != wanted &&
401 ceph_snap(inode) != CEPH_SNAPDIR)
402 ceph_check_caps(ci, 0, NULL);
403
404 return ceph_init_file(inode, file, fmode);
405 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
406 (ci->i_snap_caps & wanted) == wanted) {
407 __ceph_get_fmode(ci, fmode);
408 spin_unlock(&ci->i_ceph_lock);
409 return ceph_init_file(inode, file, fmode);
410 }
411
412 spin_unlock(&ci->i_ceph_lock);
413
414 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
415 req = prepare_open_request(inode->i_sb, flags, 0);
416 if (IS_ERR(req)) {
417 err = PTR_ERR(req);
418 goto out;
419 }
420 req->r_inode = inode;
421 ihold(inode);
422
423 req->r_num_caps = 1;
424 err = ceph_mdsc_do_request(mdsc, NULL, req);
425 if (!err)
426 err = ceph_init_file(inode, file, req->r_fmode);
427 ceph_mdsc_put_request(req);
428 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
429out:
430 return err;
431}
432
433
434/*
435 * Do a lookup + open with a single request. If we get a non-existent
436 * file or symlink, return 1 so the VFS can retry.
437 */
438int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
439 struct file *file, unsigned flags, umode_t mode)
440{
441 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
442 struct ceph_mds_client *mdsc = fsc->mdsc;
443 struct ceph_mds_request *req;
444 struct dentry *dn;
445 struct ceph_acl_sec_ctx as_ctx = {};
446 int mask;
447 int err;
448
449 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
450 dir, dentry, dentry,
451 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
452
453 if (dentry->d_name.len > NAME_MAX)
454 return -ENAMETOOLONG;
455
456 if (flags & O_CREAT) {
457 if (ceph_quota_is_max_files_exceeded(dir))
458 return -EDQUOT;
459 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
460 if (err < 0)
461 return err;
462 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
463 if (err < 0)
464 goto out_ctx;
465 } else if (!d_in_lookup(dentry)) {
466 /* If it's not being looked up, it's negative */
467 return -ENOENT;
468 }
469
470 /* do the open */
471 req = prepare_open_request(dir->i_sb, flags, mode);
472 if (IS_ERR(req)) {
473 err = PTR_ERR(req);
474 goto out_ctx;
475 }
476 req->r_dentry = dget(dentry);
477 req->r_num_caps = 2;
478 if (flags & O_CREAT) {
479 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
480 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
481 if (as_ctx.pagelist) {
482 req->r_pagelist = as_ctx.pagelist;
483 as_ctx.pagelist = NULL;
484 }
485 }
486
487 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
488 if (ceph_security_xattr_wanted(dir))
489 mask |= CEPH_CAP_XATTR_SHARED;
490 req->r_args.open.mask = cpu_to_le32(mask);
491
492 req->r_parent = dir;
493 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
494 err = ceph_mdsc_do_request(mdsc,
495 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
496 req);
497 err = ceph_handle_snapdir(req, dentry, err);
498 if (err)
499 goto out_req;
500
501 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
502 err = ceph_handle_notrace_create(dir, dentry);
503
504 if (d_in_lookup(dentry)) {
505 dn = ceph_finish_lookup(req, dentry, err);
506 if (IS_ERR(dn))
507 err = PTR_ERR(dn);
508 } else {
509 /* we were given a hashed negative dentry */
510 dn = NULL;
511 }
512 if (err)
513 goto out_req;
514 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
515 /* make vfs retry on splice, ENOENT, or symlink */
516 dout("atomic_open finish_no_open on dn %p\n", dn);
517 err = finish_no_open(file, dn);
518 } else {
519 dout("atomic_open finish_open on dn %p\n", dn);
520 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
521 ceph_init_inode_acls(d_inode(dentry), &as_ctx);
522 file->f_mode |= FMODE_CREATED;
523 }
524 err = finish_open(file, dentry, ceph_open);
525 }
526out_req:
527 if (!req->r_err && req->r_target_inode)
528 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
529 ceph_mdsc_put_request(req);
530out_ctx:
531 ceph_release_acl_sec_ctx(&as_ctx);
532 dout("atomic_open result=%d\n", err);
533 return err;
534}
535
536int ceph_release(struct inode *inode, struct file *file)
537{
538 struct ceph_inode_info *ci = ceph_inode(inode);
539
540 if (S_ISDIR(inode->i_mode)) {
541 struct ceph_dir_file_info *dfi = file->private_data;
542 dout("release inode %p dir file %p\n", inode, file);
543 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
544
545 ceph_put_fmode(ci, dfi->file_info.fmode);
546
547 if (dfi->last_readdir)
548 ceph_mdsc_put_request(dfi->last_readdir);
549 kfree(dfi->last_name);
550 kfree(dfi->dir_info);
551 kmem_cache_free(ceph_dir_file_cachep, dfi);
552 } else {
553 struct ceph_file_info *fi = file->private_data;
554 dout("release inode %p regular file %p\n", inode, file);
555 WARN_ON(!list_empty(&fi->rw_contexts));
556
557 ceph_put_fmode(ci, fi->fmode);
558 kmem_cache_free(ceph_file_cachep, fi);
559 }
560
561 /* wake up anyone waiting for caps on this inode */
562 wake_up_all(&ci->i_cap_wq);
563 return 0;
564}
565
566enum {
567 HAVE_RETRIED = 1,
568 CHECK_EOF = 2,
569 READ_INLINE = 3,
570};
571
572/*
573 * Completely synchronous read and write methods. Direct from __user
574 * buffer to osd, or directly to user pages (if O_DIRECT).
575 *
576 * If the read spans object boundary, just do multiple reads. (That's not
577 * atomic, but good enough for now.)
578 *
579 * If we get a short result from the OSD, check against i_size; we need to
580 * only return a short read to the caller if we hit EOF.
581 */
582static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
583 int *retry_op)
584{
585 struct file *file = iocb->ki_filp;
586 struct inode *inode = file_inode(file);
587 struct ceph_inode_info *ci = ceph_inode(inode);
588 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
589 struct ceph_osd_client *osdc = &fsc->client->osdc;
590 ssize_t ret;
591 u64 off = iocb->ki_pos;
592 u64 len = iov_iter_count(to);
593
594 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
595 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
596
597 if (!len)
598 return 0;
599 /*
600 * flush any page cache pages in this range. this
601 * will make concurrent normal and sync io slow,
602 * but it will at least behave sensibly when they are
603 * in sequence.
604 */
605 ret = filemap_write_and_wait_range(inode->i_mapping,
606 off, off + len - 1);
607 if (ret < 0)
608 return ret;
609
610 ret = 0;
611 while ((len = iov_iter_count(to)) > 0) {
612 struct ceph_osd_request *req;
613 struct page **pages;
614 int num_pages;
615 size_t page_off;
616 u64 i_size;
617 bool more;
618
619 req = ceph_osdc_new_request(osdc, &ci->i_layout,
620 ci->i_vino, off, &len, 0, 1,
621 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
622 NULL, ci->i_truncate_seq,
623 ci->i_truncate_size, false);
624 if (IS_ERR(req)) {
625 ret = PTR_ERR(req);
626 break;
627 }
628
629 more = len < iov_iter_count(to);
630
631 if (unlikely(iov_iter_is_pipe(to))) {
632 ret = iov_iter_get_pages_alloc(to, &pages, len,
633 &page_off);
634 if (ret <= 0) {
635 ceph_osdc_put_request(req);
636 ret = -ENOMEM;
637 break;
638 }
639 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
640 if (ret < len) {
641 len = ret;
642 osd_req_op_extent_update(req, 0, len);
643 more = false;
644 }
645 } else {
646 num_pages = calc_pages_for(off, len);
647 page_off = off & ~PAGE_MASK;
648 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
649 if (IS_ERR(pages)) {
650 ceph_osdc_put_request(req);
651 ret = PTR_ERR(pages);
652 break;
653 }
654 }
655
656 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
657 false, false);
658 ret = ceph_osdc_start_request(osdc, req, false);
659 if (!ret)
660 ret = ceph_osdc_wait_request(osdc, req);
661 ceph_osdc_put_request(req);
662
663 i_size = i_size_read(inode);
664 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
665 off, len, ret, i_size, (more ? " MORE" : ""));
666
667 if (ret == -ENOENT)
668 ret = 0;
669 if (ret >= 0 && ret < len && (off + ret < i_size)) {
670 int zlen = min(len - ret, i_size - off - ret);
671 int zoff = page_off + ret;
672 dout("sync_read zero gap %llu~%llu\n",
673 off + ret, off + ret + zlen);
674 ceph_zero_page_vector_range(zoff, zlen, pages);
675 ret += zlen;
676 }
677
678 if (unlikely(iov_iter_is_pipe(to))) {
679 if (ret > 0) {
680 iov_iter_advance(to, ret);
681 off += ret;
682 } else {
683 iov_iter_advance(to, 0);
684 }
685 ceph_put_page_vector(pages, num_pages, false);
686 } else {
687 int idx = 0;
688 size_t left = ret > 0 ? ret : 0;
689 while (left > 0) {
690 size_t len, copied;
691 page_off = off & ~PAGE_MASK;
692 len = min_t(size_t, left, PAGE_SIZE - page_off);
693 copied = copy_page_to_iter(pages[idx++],
694 page_off, len, to);
695 off += copied;
696 left -= copied;
697 if (copied < len) {
698 ret = -EFAULT;
699 break;
700 }
701 }
702 ceph_release_page_vector(pages, num_pages);
703 }
704
705 if (ret < 0) {
706 if (ret == -EBLACKLISTED)
707 fsc->blacklisted = true;
708 break;
709 }
710
711 if (off >= i_size || !more)
712 break;
713 }
714
715 if (off > iocb->ki_pos) {
716 if (ret >= 0 &&
717 iov_iter_count(to) > 0 && off >= i_size_read(inode))
718 *retry_op = CHECK_EOF;
719 ret = off - iocb->ki_pos;
720 iocb->ki_pos = off;
721 }
722
723 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
724 return ret;
725}
726
727struct ceph_aio_request {
728 struct kiocb *iocb;
729 size_t total_len;
730 bool write;
731 bool should_dirty;
732 int error;
733 struct list_head osd_reqs;
734 unsigned num_reqs;
735 atomic_t pending_reqs;
736 struct timespec64 mtime;
737 struct ceph_cap_flush *prealloc_cf;
738};
739
740struct ceph_aio_work {
741 struct work_struct work;
742 struct ceph_osd_request *req;
743};
744
745static void ceph_aio_retry_work(struct work_struct *work);
746
747static void ceph_aio_complete(struct inode *inode,
748 struct ceph_aio_request *aio_req)
749{
750 struct ceph_inode_info *ci = ceph_inode(inode);
751 int ret;
752
753 if (!atomic_dec_and_test(&aio_req->pending_reqs))
754 return;
755
756 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
757 inode_dio_end(inode);
758
759 ret = aio_req->error;
760 if (!ret)
761 ret = aio_req->total_len;
762
763 dout("ceph_aio_complete %p rc %d\n", inode, ret);
764
765 if (ret >= 0 && aio_req->write) {
766 int dirty;
767
768 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
769 if (endoff > i_size_read(inode)) {
770 if (ceph_inode_set_size(inode, endoff))
771 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
772 }
773
774 spin_lock(&ci->i_ceph_lock);
775 ci->i_inline_version = CEPH_INLINE_NONE;
776 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
777 &aio_req->prealloc_cf);
778 spin_unlock(&ci->i_ceph_lock);
779 if (dirty)
780 __mark_inode_dirty(inode, dirty);
781
782 }
783
784 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
785 CEPH_CAP_FILE_RD));
786
787 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
788
789 ceph_free_cap_flush(aio_req->prealloc_cf);
790 kfree(aio_req);
791}
792
793static void ceph_aio_complete_req(struct ceph_osd_request *req)
794{
795 int rc = req->r_result;
796 struct inode *inode = req->r_inode;
797 struct ceph_aio_request *aio_req = req->r_priv;
798 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
799
800 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
801 BUG_ON(!osd_data->num_bvecs);
802
803 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
804 inode, rc, osd_data->bvec_pos.iter.bi_size);
805
806 if (rc == -EOLDSNAPC) {
807 struct ceph_aio_work *aio_work;
808 BUG_ON(!aio_req->write);
809
810 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
811 if (aio_work) {
812 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
813 aio_work->req = req;
814 queue_work(ceph_inode_to_client(inode)->inode_wq,
815 &aio_work->work);
816 return;
817 }
818 rc = -ENOMEM;
819 } else if (!aio_req->write) {
820 if (rc == -ENOENT)
821 rc = 0;
822 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
823 struct iov_iter i;
824 int zlen = osd_data->bvec_pos.iter.bi_size - rc;
825
826 /*
827 * If read is satisfied by single OSD request,
828 * it can pass EOF. Otherwise read is within
829 * i_size.
830 */
831 if (aio_req->num_reqs == 1) {
832 loff_t i_size = i_size_read(inode);
833 loff_t endoff = aio_req->iocb->ki_pos + rc;
834 if (endoff < i_size)
835 zlen = min_t(size_t, zlen,
836 i_size - endoff);
837 aio_req->total_len = rc + zlen;
838 }
839
840 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
841 osd_data->num_bvecs,
842 osd_data->bvec_pos.iter.bi_size);
843 iov_iter_advance(&i, rc);
844 iov_iter_zero(zlen, &i);
845 }
846 }
847
848 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
849 aio_req->should_dirty);
850 ceph_osdc_put_request(req);
851
852 if (rc < 0)
853 cmpxchg(&aio_req->error, 0, rc);
854
855 ceph_aio_complete(inode, aio_req);
856 return;
857}
858
859static void ceph_aio_retry_work(struct work_struct *work)
860{
861 struct ceph_aio_work *aio_work =
862 container_of(work, struct ceph_aio_work, work);
863 struct ceph_osd_request *orig_req = aio_work->req;
864 struct ceph_aio_request *aio_req = orig_req->r_priv;
865 struct inode *inode = orig_req->r_inode;
866 struct ceph_inode_info *ci = ceph_inode(inode);
867 struct ceph_snap_context *snapc;
868 struct ceph_osd_request *req;
869 int ret;
870
871 spin_lock(&ci->i_ceph_lock);
872 if (__ceph_have_pending_cap_snap(ci)) {
873 struct ceph_cap_snap *capsnap =
874 list_last_entry(&ci->i_cap_snaps,
875 struct ceph_cap_snap,
876 ci_item);
877 snapc = ceph_get_snap_context(capsnap->context);
878 } else {
879 BUG_ON(!ci->i_head_snapc);
880 snapc = ceph_get_snap_context(ci->i_head_snapc);
881 }
882 spin_unlock(&ci->i_ceph_lock);
883
884 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
885 false, GFP_NOFS);
886 if (!req) {
887 ret = -ENOMEM;
888 req = orig_req;
889 goto out;
890 }
891
892 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
893 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
894 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
895
896 req->r_ops[0] = orig_req->r_ops[0];
897
898 req->r_mtime = aio_req->mtime;
899 req->r_data_offset = req->r_ops[0].extent.offset;
900
901 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
902 if (ret) {
903 ceph_osdc_put_request(req);
904 req = orig_req;
905 goto out;
906 }
907
908 ceph_osdc_put_request(orig_req);
909
910 req->r_callback = ceph_aio_complete_req;
911 req->r_inode = inode;
912 req->r_priv = aio_req;
913
914 ret = ceph_osdc_start_request(req->r_osdc, req, false);
915out:
916 if (ret < 0) {
917 req->r_result = ret;
918 ceph_aio_complete_req(req);
919 }
920
921 ceph_put_snap_context(snapc);
922 kfree(aio_work);
923}
924
925static ssize_t
926ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
927 struct ceph_snap_context *snapc,
928 struct ceph_cap_flush **pcf)
929{
930 struct file *file = iocb->ki_filp;
931 struct inode *inode = file_inode(file);
932 struct ceph_inode_info *ci = ceph_inode(inode);
933 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
934 struct ceph_vino vino;
935 struct ceph_osd_request *req;
936 struct bio_vec *bvecs;
937 struct ceph_aio_request *aio_req = NULL;
938 int num_pages = 0;
939 int flags;
940 int ret = 0;
941 struct timespec64 mtime = current_time(inode);
942 size_t count = iov_iter_count(iter);
943 loff_t pos = iocb->ki_pos;
944 bool write = iov_iter_rw(iter) == WRITE;
945 bool should_dirty = !write && iter_is_iovec(iter);
946
947 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
948 return -EROFS;
949
950 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
951 (write ? "write" : "read"), file, pos, (unsigned)count,
952 snapc, snapc ? snapc->seq : 0);
953
954 if (write) {
955 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
956 pos >> PAGE_SHIFT,
957 (pos + count - 1) >> PAGE_SHIFT);
958 if (ret2 < 0)
959 dout("invalidate_inode_pages2_range returned %d\n", ret2);
960
961 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
962 } else {
963 flags = CEPH_OSD_FLAG_READ;
964 }
965
966 while (iov_iter_count(iter) > 0) {
967 u64 size = iov_iter_count(iter);
968 ssize_t len;
969
970 if (write)
971 size = min_t(u64, size, fsc->mount_options->wsize);
972 else
973 size = min_t(u64, size, fsc->mount_options->rsize);
974
975 vino = ceph_vino(inode);
976 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
977 vino, pos, &size, 0,
978 1,
979 write ? CEPH_OSD_OP_WRITE :
980 CEPH_OSD_OP_READ,
981 flags, snapc,
982 ci->i_truncate_seq,
983 ci->i_truncate_size,
984 false);
985 if (IS_ERR(req)) {
986 ret = PTR_ERR(req);
987 break;
988 }
989
990 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
991 if (len < 0) {
992 ceph_osdc_put_request(req);
993 ret = len;
994 break;
995 }
996 if (len != size)
997 osd_req_op_extent_update(req, 0, len);
998
999 /*
1000 * To simplify error handling, allow AIO when IO within i_size
1001 * or IO can be satisfied by single OSD request.
1002 */
1003 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1004 (len == count || pos + count <= i_size_read(inode))) {
1005 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1006 if (aio_req) {
1007 aio_req->iocb = iocb;
1008 aio_req->write = write;
1009 aio_req->should_dirty = should_dirty;
1010 INIT_LIST_HEAD(&aio_req->osd_reqs);
1011 if (write) {
1012 aio_req->mtime = mtime;
1013 swap(aio_req->prealloc_cf, *pcf);
1014 }
1015 }
1016 /* ignore error */
1017 }
1018
1019 if (write) {
1020 /*
1021 * throw out any page cache pages in this range. this
1022 * may block.
1023 */
1024 truncate_inode_pages_range(inode->i_mapping, pos,
1025 PAGE_ALIGN(pos + len) - 1);
1026
1027 req->r_mtime = mtime;
1028 }
1029
1030 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1031
1032 if (aio_req) {
1033 aio_req->total_len += len;
1034 aio_req->num_reqs++;
1035 atomic_inc(&aio_req->pending_reqs);
1036
1037 req->r_callback = ceph_aio_complete_req;
1038 req->r_inode = inode;
1039 req->r_priv = aio_req;
1040 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1041
1042 pos += len;
1043 continue;
1044 }
1045
1046 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1047 if (!ret)
1048 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1049
1050 size = i_size_read(inode);
1051 if (!write) {
1052 if (ret == -ENOENT)
1053 ret = 0;
1054 if (ret >= 0 && ret < len && pos + ret < size) {
1055 struct iov_iter i;
1056 int zlen = min_t(size_t, len - ret,
1057 size - pos - ret);
1058
1059 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1060 iov_iter_advance(&i, ret);
1061 iov_iter_zero(zlen, &i);
1062 ret += zlen;
1063 }
1064 if (ret >= 0)
1065 len = ret;
1066 }
1067
1068 put_bvecs(bvecs, num_pages, should_dirty);
1069 ceph_osdc_put_request(req);
1070 if (ret < 0)
1071 break;
1072
1073 pos += len;
1074 if (!write && pos >= size)
1075 break;
1076
1077 if (write && pos > size) {
1078 if (ceph_inode_set_size(inode, pos))
1079 ceph_check_caps(ceph_inode(inode),
1080 CHECK_CAPS_AUTHONLY,
1081 NULL);
1082 }
1083 }
1084
1085 if (aio_req) {
1086 LIST_HEAD(osd_reqs);
1087
1088 if (aio_req->num_reqs == 0) {
1089 kfree(aio_req);
1090 return ret;
1091 }
1092
1093 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1094 CEPH_CAP_FILE_RD);
1095
1096 list_splice(&aio_req->osd_reqs, &osd_reqs);
1097 inode_dio_begin(inode);
1098 while (!list_empty(&osd_reqs)) {
1099 req = list_first_entry(&osd_reqs,
1100 struct ceph_osd_request,
1101 r_private_item);
1102 list_del_init(&req->r_private_item);
1103 if (ret >= 0)
1104 ret = ceph_osdc_start_request(req->r_osdc,
1105 req, false);
1106 if (ret < 0) {
1107 req->r_result = ret;
1108 ceph_aio_complete_req(req);
1109 }
1110 }
1111 return -EIOCBQUEUED;
1112 }
1113
1114 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1115 ret = pos - iocb->ki_pos;
1116 iocb->ki_pos = pos;
1117 }
1118 return ret;
1119}
1120
1121/*
1122 * Synchronous write, straight from __user pointer or user pages.
1123 *
1124 * If write spans object boundary, just do multiple writes. (For a
1125 * correct atomic write, we should e.g. take write locks on all
1126 * objects, rollback on failure, etc.)
1127 */
1128static ssize_t
1129ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1130 struct ceph_snap_context *snapc)
1131{
1132 struct file *file = iocb->ki_filp;
1133 struct inode *inode = file_inode(file);
1134 struct ceph_inode_info *ci = ceph_inode(inode);
1135 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1136 struct ceph_vino vino;
1137 struct ceph_osd_request *req;
1138 struct page **pages;
1139 u64 len;
1140 int num_pages;
1141 int written = 0;
1142 int flags;
1143 int ret;
1144 bool check_caps = false;
1145 struct timespec64 mtime = current_time(inode);
1146 size_t count = iov_iter_count(from);
1147
1148 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1149 return -EROFS;
1150
1151 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1152 file, pos, (unsigned)count, snapc, snapc->seq);
1153
1154 ret = filemap_write_and_wait_range(inode->i_mapping,
1155 pos, pos + count - 1);
1156 if (ret < 0)
1157 return ret;
1158
1159 ret = invalidate_inode_pages2_range(inode->i_mapping,
1160 pos >> PAGE_SHIFT,
1161 (pos + count - 1) >> PAGE_SHIFT);
1162 if (ret < 0)
1163 dout("invalidate_inode_pages2_range returned %d\n", ret);
1164
1165 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1166
1167 while ((len = iov_iter_count(from)) > 0) {
1168 size_t left;
1169 int n;
1170
1171 vino = ceph_vino(inode);
1172 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1173 vino, pos, &len, 0, 1,
1174 CEPH_OSD_OP_WRITE, flags, snapc,
1175 ci->i_truncate_seq,
1176 ci->i_truncate_size,
1177 false);
1178 if (IS_ERR(req)) {
1179 ret = PTR_ERR(req);
1180 break;
1181 }
1182
1183 /*
1184 * write from beginning of first page,
1185 * regardless of io alignment
1186 */
1187 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1188
1189 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1190 if (IS_ERR(pages)) {
1191 ret = PTR_ERR(pages);
1192 goto out;
1193 }
1194
1195 left = len;
1196 for (n = 0; n < num_pages; n++) {
1197 size_t plen = min_t(size_t, left, PAGE_SIZE);
1198 ret = copy_page_from_iter(pages[n], 0, plen, from);
1199 if (ret != plen) {
1200 ret = -EFAULT;
1201 break;
1202 }
1203 left -= ret;
1204 }
1205
1206 if (ret < 0) {
1207 ceph_release_page_vector(pages, num_pages);
1208 goto out;
1209 }
1210
1211 req->r_inode = inode;
1212
1213 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1214 false, true);
1215
1216 req->r_mtime = mtime;
1217 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1218 if (!ret)
1219 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1220
1221out:
1222 ceph_osdc_put_request(req);
1223 if (ret != 0) {
1224 ceph_set_error_write(ci);
1225 break;
1226 }
1227
1228 ceph_clear_error_write(ci);
1229 pos += len;
1230 written += len;
1231 if (pos > i_size_read(inode)) {
1232 check_caps = ceph_inode_set_size(inode, pos);
1233 if (check_caps)
1234 ceph_check_caps(ceph_inode(inode),
1235 CHECK_CAPS_AUTHONLY,
1236 NULL);
1237 }
1238
1239 }
1240
1241 if (ret != -EOLDSNAPC && written > 0) {
1242 ret = written;
1243 iocb->ki_pos = pos;
1244 }
1245 return ret;
1246}
1247
1248/*
1249 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1250 * Atomically grab references, so that those bits are not released
1251 * back to the MDS mid-read.
1252 *
1253 * Hmm, the sync read case isn't actually async... should it be?
1254 */
1255static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1256{
1257 struct file *filp = iocb->ki_filp;
1258 struct ceph_file_info *fi = filp->private_data;
1259 size_t len = iov_iter_count(to);
1260 struct inode *inode = file_inode(filp);
1261 struct ceph_inode_info *ci = ceph_inode(inode);
1262 struct page *pinned_page = NULL;
1263 ssize_t ret;
1264 int want, got = 0;
1265 int retry_op = 0, read = 0;
1266
1267again:
1268 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1269 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1270
1271 if (iocb->ki_flags & IOCB_DIRECT)
1272 ceph_start_io_direct(inode);
1273 else
1274 ceph_start_io_read(inode);
1275
1276 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1277 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1278 else
1279 want = CEPH_CAP_FILE_CACHE;
1280 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
1281 &got, &pinned_page);
1282 if (ret < 0) {
1283 if (iocb->ki_flags & IOCB_DIRECT)
1284 ceph_end_io_direct(inode);
1285 else
1286 ceph_end_io_read(inode);
1287 return ret;
1288 }
1289
1290 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1291 (iocb->ki_flags & IOCB_DIRECT) ||
1292 (fi->flags & CEPH_F_SYNC)) {
1293
1294 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1295 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1296 ceph_cap_string(got));
1297
1298 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1299 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1300 ret = ceph_direct_read_write(iocb, to,
1301 NULL, NULL);
1302 if (ret >= 0 && ret < len)
1303 retry_op = CHECK_EOF;
1304 } else {
1305 ret = ceph_sync_read(iocb, to, &retry_op);
1306 }
1307 } else {
1308 retry_op = READ_INLINE;
1309 }
1310 } else {
1311 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1312 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1313 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1314 ceph_cap_string(got));
1315 ceph_add_rw_context(fi, &rw_ctx);
1316 ret = generic_file_read_iter(iocb, to);
1317 ceph_del_rw_context(fi, &rw_ctx);
1318 }
1319
1320 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1321 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1322 if (pinned_page) {
1323 put_page(pinned_page);
1324 pinned_page = NULL;
1325 }
1326 ceph_put_cap_refs(ci, got);
1327
1328 if (iocb->ki_flags & IOCB_DIRECT)
1329 ceph_end_io_direct(inode);
1330 else
1331 ceph_end_io_read(inode);
1332
1333 if (retry_op > HAVE_RETRIED && ret >= 0) {
1334 int statret;
1335 struct page *page = NULL;
1336 loff_t i_size;
1337 if (retry_op == READ_INLINE) {
1338 page = __page_cache_alloc(GFP_KERNEL);
1339 if (!page)
1340 return -ENOMEM;
1341 }
1342
1343 statret = __ceph_do_getattr(inode, page,
1344 CEPH_STAT_CAP_INLINE_DATA, !!page);
1345 if (statret < 0) {
1346 if (page)
1347 __free_page(page);
1348 if (statret == -ENODATA) {
1349 BUG_ON(retry_op != READ_INLINE);
1350 goto again;
1351 }
1352 return statret;
1353 }
1354
1355 i_size = i_size_read(inode);
1356 if (retry_op == READ_INLINE) {
1357 BUG_ON(ret > 0 || read > 0);
1358 if (iocb->ki_pos < i_size &&
1359 iocb->ki_pos < PAGE_SIZE) {
1360 loff_t end = min_t(loff_t, i_size,
1361 iocb->ki_pos + len);
1362 end = min_t(loff_t, end, PAGE_SIZE);
1363 if (statret < end)
1364 zero_user_segment(page, statret, end);
1365 ret = copy_page_to_iter(page,
1366 iocb->ki_pos & ~PAGE_MASK,
1367 end - iocb->ki_pos, to);
1368 iocb->ki_pos += ret;
1369 read += ret;
1370 }
1371 if (iocb->ki_pos < i_size && read < len) {
1372 size_t zlen = min_t(size_t, len - read,
1373 i_size - iocb->ki_pos);
1374 ret = iov_iter_zero(zlen, to);
1375 iocb->ki_pos += ret;
1376 read += ret;
1377 }
1378 __free_pages(page, 0);
1379 return read;
1380 }
1381
1382 /* hit EOF or hole? */
1383 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1384 ret < len) {
1385 dout("sync_read hit hole, ppos %lld < size %lld"
1386 ", reading more\n", iocb->ki_pos, i_size);
1387
1388 read += ret;
1389 len -= ret;
1390 retry_op = HAVE_RETRIED;
1391 goto again;
1392 }
1393 }
1394
1395 if (ret >= 0)
1396 ret += read;
1397
1398 return ret;
1399}
1400
1401/*
1402 * Take cap references to avoid releasing caps to MDS mid-write.
1403 *
1404 * If we are synchronous, and write with an old snap context, the OSD
1405 * may return EOLDSNAPC. In that case, retry the write.. _after_
1406 * dropping our cap refs and allowing the pending snap to logically
1407 * complete _before_ this write occurs.
1408 *
1409 * If we are near ENOSPC, write synchronously.
1410 */
1411static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1412{
1413 struct file *file = iocb->ki_filp;
1414 struct ceph_file_info *fi = file->private_data;
1415 struct inode *inode = file_inode(file);
1416 struct ceph_inode_info *ci = ceph_inode(inode);
1417 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1418 struct ceph_cap_flush *prealloc_cf;
1419 ssize_t count, written = 0;
1420 int err, want, got;
1421 loff_t pos;
1422 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1423
1424 if (ceph_snap(inode) != CEPH_NOSNAP)
1425 return -EROFS;
1426
1427 prealloc_cf = ceph_alloc_cap_flush();
1428 if (!prealloc_cf)
1429 return -ENOMEM;
1430
1431retry_snap:
1432 if (iocb->ki_flags & IOCB_DIRECT)
1433 ceph_start_io_direct(inode);
1434 else
1435 ceph_start_io_write(inode);
1436
1437 /* We can write back this queue in page reclaim */
1438 current->backing_dev_info = inode_to_bdi(inode);
1439
1440 if (iocb->ki_flags & IOCB_APPEND) {
1441 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1442 if (err < 0)
1443 goto out;
1444 }
1445
1446 err = generic_write_checks(iocb, from);
1447 if (err <= 0)
1448 goto out;
1449
1450 pos = iocb->ki_pos;
1451 if (unlikely(pos >= limit)) {
1452 err = -EFBIG;
1453 goto out;
1454 } else {
1455 iov_iter_truncate(from, limit - pos);
1456 }
1457
1458 count = iov_iter_count(from);
1459 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1460 err = -EDQUOT;
1461 goto out;
1462 }
1463
1464 err = file_remove_privs(file);
1465 if (err)
1466 goto out;
1467
1468 err = file_update_time(file);
1469 if (err)
1470 goto out;
1471
1472 inode_inc_iversion_raw(inode);
1473
1474 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1475 err = ceph_uninline_data(file, NULL);
1476 if (err < 0)
1477 goto out;
1478 }
1479
1480 /* FIXME: not complete since it doesn't account for being at quota */
1481 if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1482 err = -ENOSPC;
1483 goto out;
1484 }
1485
1486 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1487 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1488 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1489 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1490 else
1491 want = CEPH_CAP_FILE_BUFFER;
1492 got = 0;
1493 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
1494 &got, NULL);
1495 if (err < 0)
1496 goto out;
1497
1498 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1499 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1500
1501 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1502 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1503 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1504 struct ceph_snap_context *snapc;
1505 struct iov_iter data;
1506
1507 spin_lock(&ci->i_ceph_lock);
1508 if (__ceph_have_pending_cap_snap(ci)) {
1509 struct ceph_cap_snap *capsnap =
1510 list_last_entry(&ci->i_cap_snaps,
1511 struct ceph_cap_snap,
1512 ci_item);
1513 snapc = ceph_get_snap_context(capsnap->context);
1514 } else {
1515 BUG_ON(!ci->i_head_snapc);
1516 snapc = ceph_get_snap_context(ci->i_head_snapc);
1517 }
1518 spin_unlock(&ci->i_ceph_lock);
1519
1520 /* we might need to revert back to that point */
1521 data = *from;
1522 if (iocb->ki_flags & IOCB_DIRECT) {
1523 written = ceph_direct_read_write(iocb, &data, snapc,
1524 &prealloc_cf);
1525 ceph_end_io_direct(inode);
1526 } else {
1527 written = ceph_sync_write(iocb, &data, pos, snapc);
1528 ceph_end_io_write(inode);
1529 }
1530 if (written > 0)
1531 iov_iter_advance(from, written);
1532 ceph_put_snap_context(snapc);
1533 } else {
1534 /*
1535 * No need to acquire the i_truncate_mutex. Because
1536 * the MDS revokes Fwb caps before sending truncate
1537 * message to us. We can't get Fwb cap while there
1538 * are pending vmtruncate. So write and vmtruncate
1539 * can not run at the same time
1540 */
1541 written = generic_perform_write(file, from, pos);
1542 if (likely(written >= 0))
1543 iocb->ki_pos = pos + written;
1544 ceph_end_io_write(inode);
1545 }
1546
1547 if (written >= 0) {
1548 int dirty;
1549
1550 spin_lock(&ci->i_ceph_lock);
1551 ci->i_inline_version = CEPH_INLINE_NONE;
1552 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1553 &prealloc_cf);
1554 spin_unlock(&ci->i_ceph_lock);
1555 if (dirty)
1556 __mark_inode_dirty(inode, dirty);
1557 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1558 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1559 }
1560
1561 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1562 inode, ceph_vinop(inode), pos, (unsigned)count,
1563 ceph_cap_string(got));
1564 ceph_put_cap_refs(ci, got);
1565
1566 if (written == -EOLDSNAPC) {
1567 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1568 inode, ceph_vinop(inode), pos, (unsigned)count);
1569 goto retry_snap;
1570 }
1571
1572 if (written >= 0) {
1573 if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1574 iocb->ki_flags |= IOCB_DSYNC;
1575 written = generic_write_sync(iocb, written);
1576 }
1577
1578 goto out_unlocked;
1579out:
1580 if (iocb->ki_flags & IOCB_DIRECT)
1581 ceph_end_io_direct(inode);
1582 else
1583 ceph_end_io_write(inode);
1584out_unlocked:
1585 ceph_free_cap_flush(prealloc_cf);
1586 current->backing_dev_info = NULL;
1587 return written ? written : err;
1588}
1589
1590/*
1591 * llseek. be sure to verify file size on SEEK_END.
1592 */
1593static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1594{
1595 struct inode *inode = file->f_mapping->host;
1596 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1597 loff_t i_size;
1598 loff_t ret;
1599
1600 inode_lock(inode);
1601
1602 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1603 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1604 if (ret < 0)
1605 goto out;
1606 }
1607
1608 i_size = i_size_read(inode);
1609 switch (whence) {
1610 case SEEK_END:
1611 offset += i_size;
1612 break;
1613 case SEEK_CUR:
1614 /*
1615 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1616 * position-querying operation. Avoid rewriting the "same"
1617 * f_pos value back to the file because a concurrent read(),
1618 * write() or lseek() might have altered it
1619 */
1620 if (offset == 0) {
1621 ret = file->f_pos;
1622 goto out;
1623 }
1624 offset += file->f_pos;
1625 break;
1626 case SEEK_DATA:
1627 if (offset < 0 || offset >= i_size) {
1628 ret = -ENXIO;
1629 goto out;
1630 }
1631 break;
1632 case SEEK_HOLE:
1633 if (offset < 0 || offset >= i_size) {
1634 ret = -ENXIO;
1635 goto out;
1636 }
1637 offset = i_size;
1638 break;
1639 }
1640
1641 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1642
1643out:
1644 inode_unlock(inode);
1645 return ret;
1646}
1647
1648static inline void ceph_zero_partial_page(
1649 struct inode *inode, loff_t offset, unsigned size)
1650{
1651 struct page *page;
1652 pgoff_t index = offset >> PAGE_SHIFT;
1653
1654 page = find_lock_page(inode->i_mapping, index);
1655 if (page) {
1656 wait_on_page_writeback(page);
1657 zero_user(page, offset & (PAGE_SIZE - 1), size);
1658 unlock_page(page);
1659 put_page(page);
1660 }
1661}
1662
1663static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1664 loff_t length)
1665{
1666 loff_t nearly = round_up(offset, PAGE_SIZE);
1667 if (offset < nearly) {
1668 loff_t size = nearly - offset;
1669 if (length < size)
1670 size = length;
1671 ceph_zero_partial_page(inode, offset, size);
1672 offset += size;
1673 length -= size;
1674 }
1675 if (length >= PAGE_SIZE) {
1676 loff_t size = round_down(length, PAGE_SIZE);
1677 truncate_pagecache_range(inode, offset, offset + size - 1);
1678 offset += size;
1679 length -= size;
1680 }
1681 if (length)
1682 ceph_zero_partial_page(inode, offset, length);
1683}
1684
1685static int ceph_zero_partial_object(struct inode *inode,
1686 loff_t offset, loff_t *length)
1687{
1688 struct ceph_inode_info *ci = ceph_inode(inode);
1689 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1690 struct ceph_osd_request *req;
1691 int ret = 0;
1692 loff_t zero = 0;
1693 int op;
1694
1695 if (!length) {
1696 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1697 length = &zero;
1698 } else {
1699 op = CEPH_OSD_OP_ZERO;
1700 }
1701
1702 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1703 ceph_vino(inode),
1704 offset, length,
1705 0, 1, op,
1706 CEPH_OSD_FLAG_WRITE,
1707 NULL, 0, 0, false);
1708 if (IS_ERR(req)) {
1709 ret = PTR_ERR(req);
1710 goto out;
1711 }
1712
1713 req->r_mtime = inode->i_mtime;
1714 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1715 if (!ret) {
1716 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1717 if (ret == -ENOENT)
1718 ret = 0;
1719 }
1720 ceph_osdc_put_request(req);
1721
1722out:
1723 return ret;
1724}
1725
1726static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1727{
1728 int ret = 0;
1729 struct ceph_inode_info *ci = ceph_inode(inode);
1730 s32 stripe_unit = ci->i_layout.stripe_unit;
1731 s32 stripe_count = ci->i_layout.stripe_count;
1732 s32 object_size = ci->i_layout.object_size;
1733 u64 object_set_size = object_size * stripe_count;
1734 u64 nearly, t;
1735
1736 /* round offset up to next period boundary */
1737 nearly = offset + object_set_size - 1;
1738 t = nearly;
1739 nearly -= do_div(t, object_set_size);
1740
1741 while (length && offset < nearly) {
1742 loff_t size = length;
1743 ret = ceph_zero_partial_object(inode, offset, &size);
1744 if (ret < 0)
1745 return ret;
1746 offset += size;
1747 length -= size;
1748 }
1749 while (length >= object_set_size) {
1750 int i;
1751 loff_t pos = offset;
1752 for (i = 0; i < stripe_count; ++i) {
1753 ret = ceph_zero_partial_object(inode, pos, NULL);
1754 if (ret < 0)
1755 return ret;
1756 pos += stripe_unit;
1757 }
1758 offset += object_set_size;
1759 length -= object_set_size;
1760 }
1761 while (length) {
1762 loff_t size = length;
1763 ret = ceph_zero_partial_object(inode, offset, &size);
1764 if (ret < 0)
1765 return ret;
1766 offset += size;
1767 length -= size;
1768 }
1769 return ret;
1770}
1771
1772static long ceph_fallocate(struct file *file, int mode,
1773 loff_t offset, loff_t length)
1774{
1775 struct ceph_file_info *fi = file->private_data;
1776 struct inode *inode = file_inode(file);
1777 struct ceph_inode_info *ci = ceph_inode(inode);
1778 struct ceph_cap_flush *prealloc_cf;
1779 int want, got = 0;
1780 int dirty;
1781 int ret = 0;
1782 loff_t endoff = 0;
1783 loff_t size;
1784
1785 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1786 return -EOPNOTSUPP;
1787
1788 if (!S_ISREG(inode->i_mode))
1789 return -EOPNOTSUPP;
1790
1791 prealloc_cf = ceph_alloc_cap_flush();
1792 if (!prealloc_cf)
1793 return -ENOMEM;
1794
1795 inode_lock(inode);
1796
1797 if (ceph_snap(inode) != CEPH_NOSNAP) {
1798 ret = -EROFS;
1799 goto unlock;
1800 }
1801
1802 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1803 ret = ceph_uninline_data(file, NULL);
1804 if (ret < 0)
1805 goto unlock;
1806 }
1807
1808 size = i_size_read(inode);
1809
1810 /* Are we punching a hole beyond EOF? */
1811 if (offset >= size)
1812 goto unlock;
1813 if ((offset + length) > size)
1814 length = size - offset;
1815
1816 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1817 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1818 else
1819 want = CEPH_CAP_FILE_BUFFER;
1820
1821 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1822 if (ret < 0)
1823 goto unlock;
1824
1825 ceph_zero_pagecache_range(inode, offset, length);
1826 ret = ceph_zero_objects(inode, offset, length);
1827
1828 if (!ret) {
1829 spin_lock(&ci->i_ceph_lock);
1830 ci->i_inline_version = CEPH_INLINE_NONE;
1831 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1832 &prealloc_cf);
1833 spin_unlock(&ci->i_ceph_lock);
1834 if (dirty)
1835 __mark_inode_dirty(inode, dirty);
1836 }
1837
1838 ceph_put_cap_refs(ci, got);
1839unlock:
1840 inode_unlock(inode);
1841 ceph_free_cap_flush(prealloc_cf);
1842 return ret;
1843}
1844
1845/*
1846 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1847 * src_ci. Two attempts are made to obtain both caps, and an error is return if
1848 * this fails; zero is returned on success.
1849 */
1850static int get_rd_wr_caps(struct file *src_filp, int *src_got,
1851 struct file *dst_filp,
1852 loff_t dst_endoff, int *dst_got)
1853{
1854 int ret = 0;
1855 bool retrying = false;
1856
1857retry_caps:
1858 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1859 dst_endoff, dst_got, NULL);
1860 if (ret < 0)
1861 return ret;
1862
1863 /*
1864 * Since we're already holding the FILE_WR capability for the dst file,
1865 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
1866 * retry dance instead to try to get both capabilities.
1867 */
1868 ret = ceph_try_get_caps(file_inode(src_filp),
1869 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1870 false, src_got);
1871 if (ret <= 0) {
1872 /* Start by dropping dst_ci caps and getting src_ci caps */
1873 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
1874 if (retrying) {
1875 if (!ret)
1876 /* ceph_try_get_caps masks EAGAIN */
1877 ret = -EAGAIN;
1878 return ret;
1879 }
1880 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
1881 CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
1882 if (ret < 0)
1883 return ret;
1884 /*... drop src_ci caps too, and retry */
1885 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
1886 retrying = true;
1887 goto retry_caps;
1888 }
1889 return ret;
1890}
1891
1892static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1893 struct ceph_inode_info *dst_ci, int dst_got)
1894{
1895 ceph_put_cap_refs(src_ci, src_got);
1896 ceph_put_cap_refs(dst_ci, dst_got);
1897}
1898
1899/*
1900 * This function does several size-related checks, returning an error if:
1901 * - source file is smaller than off+len
1902 * - destination file size is not OK (inode_newsize_ok())
1903 * - max bytes quotas is exceeded
1904 */
1905static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1906 loff_t src_off, loff_t dst_off, size_t len)
1907{
1908 loff_t size, endoff;
1909
1910 size = i_size_read(src_inode);
1911 /*
1912 * Don't copy beyond source file EOF. Instead of simply setting length
1913 * to (size - src_off), just drop to VFS default implementation, as the
1914 * local i_size may be stale due to other clients writing to the source
1915 * inode.
1916 */
1917 if (src_off + len > size) {
1918 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1919 src_off, len, size);
1920 return -EOPNOTSUPP;
1921 }
1922 size = i_size_read(dst_inode);
1923
1924 endoff = dst_off + len;
1925 if (inode_newsize_ok(dst_inode, endoff))
1926 return -EOPNOTSUPP;
1927
1928 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1929 return -EDQUOT;
1930
1931 return 0;
1932}
1933
1934static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
1935 struct file *dst_file, loff_t dst_off,
1936 size_t len, unsigned int flags)
1937{
1938 struct inode *src_inode = file_inode(src_file);
1939 struct inode *dst_inode = file_inode(dst_file);
1940 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1941 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1942 struct ceph_cap_flush *prealloc_cf;
1943 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
1944 struct ceph_object_locator src_oloc, dst_oloc;
1945 struct ceph_object_id src_oid, dst_oid;
1946 loff_t endoff = 0, size;
1947 ssize_t ret = -EIO;
1948 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1949 u32 src_objlen, dst_objlen, object_size;
1950 int src_got = 0, dst_got = 0, err, dirty;
1951 bool do_final_copy = false;
1952
1953 if (src_inode->i_sb != dst_inode->i_sb) {
1954 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
1955
1956 if (ceph_fsid_compare(&src_fsc->client->fsid,
1957 &dst_fsc->client->fsid)) {
1958 dout("Copying files across clusters: src: %pU dst: %pU\n",
1959 &src_fsc->client->fsid, &dst_fsc->client->fsid);
1960 return -EXDEV;
1961 }
1962 }
1963 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1964 return -EROFS;
1965
1966 /*
1967 * Some of the checks below will return -EOPNOTSUPP, which will force a
1968 * fallback to the default VFS copy_file_range implementation. This is
1969 * desirable in several cases (for ex, the 'len' is smaller than the
1970 * size of the objects, or in cases where that would be more
1971 * efficient).
1972 */
1973
1974 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
1975 return -EOPNOTSUPP;
1976
1977 /*
1978 * Striped file layouts require that we copy partial objects, but the
1979 * OSD copy-from operation only supports full-object copies. Limit
1980 * this to non-striped file layouts for now.
1981 */
1982 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1983 (src_ci->i_layout.stripe_count != 1) ||
1984 (dst_ci->i_layout.stripe_count != 1) ||
1985 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
1986 dout("Invalid src/dst files layout\n");
1987 return -EOPNOTSUPP;
1988 }
1989
1990 if (len < src_ci->i_layout.object_size)
1991 return -EOPNOTSUPP; /* no remote copy will be done */
1992
1993 prealloc_cf = ceph_alloc_cap_flush();
1994 if (!prealloc_cf)
1995 return -ENOMEM;
1996
1997 /* Start by sync'ing the source and destination files */
1998 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1999 if (ret < 0) {
2000 dout("failed to write src file (%zd)\n", ret);
2001 goto out;
2002 }
2003 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2004 if (ret < 0) {
2005 dout("failed to write dst file (%zd)\n", ret);
2006 goto out;
2007 }
2008
2009 /*
2010 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2011 * clients may have dirty data in their caches. And OSDs know nothing
2012 * about caps, so they can't safely do the remote object copies.
2013 */
2014 err = get_rd_wr_caps(src_file, &src_got,
2015 dst_file, (dst_off + len), &dst_got);
2016 if (err < 0) {
2017 dout("get_rd_wr_caps returned %d\n", err);
2018 ret = -EOPNOTSUPP;
2019 goto out;
2020 }
2021
2022 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2023 if (ret < 0)
2024 goto out_caps;
2025
2026 size = i_size_read(dst_inode);
2027 endoff = dst_off + len;
2028
2029 /* Drop dst file cached pages */
2030 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2031 dst_off >> PAGE_SHIFT,
2032 endoff >> PAGE_SHIFT);
2033 if (ret < 0) {
2034 dout("Failed to invalidate inode pages (%zd)\n", ret);
2035 ret = 0; /* XXX */
2036 }
2037 src_oloc.pool = src_ci->i_layout.pool_id;
2038 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2039 dst_oloc.pool = dst_ci->i_layout.pool_id;
2040 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2041
2042 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2043 src_ci->i_layout.object_size,
2044 &src_objnum, &src_objoff, &src_objlen);
2045 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2046 dst_ci->i_layout.object_size,
2047 &dst_objnum, &dst_objoff, &dst_objlen);
2048 /* object-level offsets need to the same */
2049 if (src_objoff != dst_objoff) {
2050 ret = -EOPNOTSUPP;
2051 goto out_caps;
2052 }
2053
2054 /*
2055 * Do a manual copy if the object offset isn't object aligned.
2056 * 'src_objlen' contains the bytes left until the end of the object,
2057 * starting at the src_off
2058 */
2059 if (src_objoff) {
2060 /*
2061 * we need to temporarily drop all caps as we'll be calling
2062 * {read,write}_iter, which will get caps again.
2063 */
2064 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2065 ret = do_splice_direct(src_file, &src_off, dst_file,
2066 &dst_off, src_objlen, flags);
2067 if (ret < 0) {
2068 dout("do_splice_direct returned %d\n", err);
2069 goto out;
2070 }
2071 len -= ret;
2072 err = get_rd_wr_caps(src_file, &src_got,
2073 dst_file, (dst_off + len), &dst_got);
2074 if (err < 0)
2075 goto out;
2076 err = is_file_size_ok(src_inode, dst_inode,
2077 src_off, dst_off, len);
2078 if (err < 0)
2079 goto out_caps;
2080 }
2081 object_size = src_ci->i_layout.object_size;
2082 while (len >= object_size) {
2083 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2084 object_size, &src_objnum,
2085 &src_objoff, &src_objlen);
2086 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2087 object_size, &dst_objnum,
2088 &dst_objoff, &dst_objlen);
2089 ceph_oid_init(&src_oid);
2090 ceph_oid_printf(&src_oid, "%llx.%08llx",
2091 src_ci->i_vino.ino, src_objnum);
2092 ceph_oid_init(&dst_oid);
2093 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2094 dst_ci->i_vino.ino, dst_objnum);
2095 /* Do an object remote copy */
2096 err = ceph_osdc_copy_from(
2097 &src_fsc->client->osdc,
2098 src_ci->i_vino.snap, 0,
2099 &src_oid, &src_oloc,
2100 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2101 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2102 &dst_oid, &dst_oloc,
2103 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2104 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2105 if (err) {
2106 dout("ceph_osdc_copy_from returned %d\n", err);
2107 if (!ret)
2108 ret = err;
2109 goto out_caps;
2110 }
2111 len -= object_size;
2112 src_off += object_size;
2113 dst_off += object_size;
2114 ret += object_size;
2115 }
2116
2117 if (len)
2118 /* We still need one final local copy */
2119 do_final_copy = true;
2120
2121 file_update_time(dst_file);
2122 inode_inc_iversion_raw(dst_inode);
2123
2124 if (endoff > size) {
2125 int caps_flags = 0;
2126
2127 /* Let the MDS know about dst file size change */
2128 if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2129 caps_flags |= CHECK_CAPS_NODELAY;
2130 if (ceph_inode_set_size(dst_inode, endoff))
2131 caps_flags |= CHECK_CAPS_AUTHONLY;
2132 if (caps_flags)
2133 ceph_check_caps(dst_ci, caps_flags, NULL);
2134 }
2135 /* Mark Fw dirty */
2136 spin_lock(&dst_ci->i_ceph_lock);
2137 dst_ci->i_inline_version = CEPH_INLINE_NONE;
2138 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2139 spin_unlock(&dst_ci->i_ceph_lock);
2140 if (dirty)
2141 __mark_inode_dirty(dst_inode, dirty);
2142
2143out_caps:
2144 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2145
2146 if (do_final_copy) {
2147 err = do_splice_direct(src_file, &src_off, dst_file,
2148 &dst_off, len, flags);
2149 if (err < 0) {
2150 dout("do_splice_direct returned %d\n", err);
2151 goto out;
2152 }
2153 len -= err;
2154 ret += err;
2155 }
2156
2157out:
2158 ceph_free_cap_flush(prealloc_cf);
2159
2160 return ret;
2161}
2162
2163static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2164 struct file *dst_file, loff_t dst_off,
2165 size_t len, unsigned int flags)
2166{
2167 ssize_t ret;
2168
2169 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2170 len, flags);
2171
2172 if (ret == -EOPNOTSUPP || ret == -EXDEV)
2173 ret = generic_copy_file_range(src_file, src_off, dst_file,
2174 dst_off, len, flags);
2175 return ret;
2176}
2177
2178const struct file_operations ceph_file_fops = {
2179 .open = ceph_open,
2180 .release = ceph_release,
2181 .llseek = ceph_llseek,
2182 .read_iter = ceph_read_iter,
2183 .write_iter = ceph_write_iter,
2184 .mmap = ceph_mmap,
2185 .fsync = ceph_fsync,
2186 .lock = ceph_lock,
2187 .flock = ceph_flock,
2188 .splice_read = generic_file_splice_read,
2189 .splice_write = iter_file_splice_write,
2190 .unlocked_ioctl = ceph_ioctl,
2191 .compat_ioctl = ceph_ioctl,
2192 .fallocate = ceph_fallocate,
2193 .copy_file_range = ceph_copy_file_range,
2194};