Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/striper.h>
4
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/file.h>
9#include <linux/mount.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/falloc.h>
13#include <linux/iversion.h>
14#include <linux/ktime.h>
15
16#include "super.h"
17#include "mds_client.h"
18#include "cache.h"
19#include "io.h"
20#include "metric.h"
21
22static __le32 ceph_flags_sys2wire(u32 flags)
23{
24 u32 wire_flags = 0;
25
26 switch (flags & O_ACCMODE) {
27 case O_RDONLY:
28 wire_flags |= CEPH_O_RDONLY;
29 break;
30 case O_WRONLY:
31 wire_flags |= CEPH_O_WRONLY;
32 break;
33 case O_RDWR:
34 wire_flags |= CEPH_O_RDWR;
35 break;
36 }
37
38 flags &= ~O_ACCMODE;
39
40#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42 ceph_sys2wire(O_CREAT);
43 ceph_sys2wire(O_EXCL);
44 ceph_sys2wire(O_TRUNC);
45 ceph_sys2wire(O_DIRECTORY);
46 ceph_sys2wire(O_NOFOLLOW);
47
48#undef ceph_sys2wire
49
50 if (flags)
51 dout("unused open flags: %x\n", flags);
52
53 return cpu_to_le32(wire_flags);
54}
55
56/*
57 * Ceph file operations
58 *
59 * Implement basic open/close functionality, and implement
60 * read/write.
61 *
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
64 *
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
67 * ack from the OSD.
68 *
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
71 *
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
75 */
76
77/*
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
80 */
81#define ITER_GET_BVECS_PAGES 64
82
83static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 struct bio_vec *bvecs)
85{
86 size_t size = 0;
87 int bvec_idx = 0;
88
89 if (maxsize > iov_iter_count(iter))
90 maxsize = iov_iter_count(iter);
91
92 while (size < maxsize) {
93 struct page *pages[ITER_GET_BVECS_PAGES];
94 ssize_t bytes;
95 size_t start;
96 int idx = 0;
97
98 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99 ITER_GET_BVECS_PAGES, &start);
100 if (bytes < 0)
101 return size ?: bytes;
102
103 size += bytes;
104
105 for ( ; bytes; idx++, bvec_idx++) {
106 struct bio_vec bv = {
107 .bv_page = pages[idx],
108 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
109 .bv_offset = start,
110 };
111
112 bvecs[bvec_idx] = bv;
113 bytes -= bv.bv_len;
114 start = 0;
115 }
116 }
117
118 return size;
119}
120
121/*
122 * iov_iter_get_pages() only considers one iov_iter segment, no matter
123 * what maxsize or maxpages are given. For ITER_BVEC that is a single
124 * page.
125 *
126 * Attempt to get up to @maxsize bytes worth of pages from @iter.
127 * Return the number of bytes in the created bio_vec array, or an error.
128 */
129static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
130 struct bio_vec **bvecs, int *num_bvecs)
131{
132 struct bio_vec *bv;
133 size_t orig_count = iov_iter_count(iter);
134 ssize_t bytes;
135 int npages;
136
137 iov_iter_truncate(iter, maxsize);
138 npages = iov_iter_npages(iter, INT_MAX);
139 iov_iter_reexpand(iter, orig_count);
140
141 /*
142 * __iter_get_bvecs() may populate only part of the array -- zero it
143 * out.
144 */
145 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
146 if (!bv)
147 return -ENOMEM;
148
149 bytes = __iter_get_bvecs(iter, maxsize, bv);
150 if (bytes < 0) {
151 /*
152 * No pages were pinned -- just free the array.
153 */
154 kvfree(bv);
155 return bytes;
156 }
157
158 *bvecs = bv;
159 *num_bvecs = npages;
160 return bytes;
161}
162
163static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
164{
165 int i;
166
167 for (i = 0; i < num_bvecs; i++) {
168 if (bvecs[i].bv_page) {
169 if (should_dirty)
170 set_page_dirty_lock(bvecs[i].bv_page);
171 put_page(bvecs[i].bv_page);
172 }
173 }
174 kvfree(bvecs);
175}
176
177/*
178 * Prepare an open request. Preallocate ceph_cap to avoid an
179 * inopportune ENOMEM later.
180 */
181static struct ceph_mds_request *
182prepare_open_request(struct super_block *sb, int flags, int create_mode)
183{
184 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
185 struct ceph_mds_request *req;
186 int want_auth = USE_ANY_MDS;
187 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188
189 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190 want_auth = USE_AUTH_MDS;
191
192 req = ceph_mdsc_create_request(mdsc, op, want_auth);
193 if (IS_ERR(req))
194 goto out;
195 req->r_fmode = ceph_flags_to_mode(flags);
196 req->r_args.open.flags = ceph_flags_sys2wire(flags);
197 req->r_args.open.mode = cpu_to_le32(create_mode);
198out:
199 return req;
200}
201
202static int ceph_init_file_info(struct inode *inode, struct file *file,
203 int fmode, bool isdir)
204{
205 struct ceph_inode_info *ci = ceph_inode(inode);
206 struct ceph_mount_options *opt =
207 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
208 struct ceph_file_info *fi;
209 int ret;
210
211 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
212 inode->i_mode, isdir ? "dir" : "regular");
213 BUG_ON(inode->i_fop->release != ceph_release);
214
215 if (isdir) {
216 struct ceph_dir_file_info *dfi =
217 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
218 if (!dfi)
219 return -ENOMEM;
220
221 file->private_data = dfi;
222 fi = &dfi->file_info;
223 dfi->next_offset = 2;
224 dfi->readdir_cache_idx = -1;
225 } else {
226 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
227 if (!fi)
228 return -ENOMEM;
229
230 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
231 fi->flags |= CEPH_F_SYNC;
232
233 file->private_data = fi;
234 }
235
236 ceph_get_fmode(ci, fmode, 1);
237 fi->fmode = fmode;
238
239 spin_lock_init(&fi->rw_contexts_lock);
240 INIT_LIST_HEAD(&fi->rw_contexts);
241 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
242
243 if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
244 ret = ceph_uninline_data(file);
245 if (ret < 0)
246 goto error;
247 }
248
249 return 0;
250
251error:
252 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
253 ceph_put_fmode(ci, fi->fmode, 1);
254 kmem_cache_free(ceph_file_cachep, fi);
255 /* wake up anyone waiting for caps on this inode */
256 wake_up_all(&ci->i_cap_wq);
257 return ret;
258}
259
260/*
261 * initialize private struct file data.
262 * if we fail, clean up by dropping fmode reference on the ceph_inode
263 */
264static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
265{
266 int ret = 0;
267
268 switch (inode->i_mode & S_IFMT) {
269 case S_IFREG:
270 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
271 fallthrough;
272 case S_IFDIR:
273 ret = ceph_init_file_info(inode, file, fmode,
274 S_ISDIR(inode->i_mode));
275 break;
276
277 case S_IFLNK:
278 dout("init_file %p %p 0%o (symlink)\n", inode, file,
279 inode->i_mode);
280 break;
281
282 default:
283 dout("init_file %p %p 0%o (special)\n", inode, file,
284 inode->i_mode);
285 /*
286 * we need to drop the open ref now, since we don't
287 * have .release set to ceph_release.
288 */
289 BUG_ON(inode->i_fop->release == ceph_release);
290
291 /* call the proper open fop */
292 ret = inode->i_fop->open(inode, file);
293 }
294 return ret;
295}
296
297/*
298 * try renew caps after session gets killed.
299 */
300int ceph_renew_caps(struct inode *inode, int fmode)
301{
302 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
303 struct ceph_inode_info *ci = ceph_inode(inode);
304 struct ceph_mds_request *req;
305 int err, flags, wanted;
306
307 spin_lock(&ci->i_ceph_lock);
308 __ceph_touch_fmode(ci, mdsc, fmode);
309 wanted = __ceph_caps_file_wanted(ci);
310 if (__ceph_is_any_real_caps(ci) &&
311 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
312 int issued = __ceph_caps_issued(ci, NULL);
313 spin_unlock(&ci->i_ceph_lock);
314 dout("renew caps %p want %s issued %s updating mds_wanted\n",
315 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
316 ceph_check_caps(ci, 0);
317 return 0;
318 }
319 spin_unlock(&ci->i_ceph_lock);
320
321 flags = 0;
322 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
323 flags = O_RDWR;
324 else if (wanted & CEPH_CAP_FILE_RD)
325 flags = O_RDONLY;
326 else if (wanted & CEPH_CAP_FILE_WR)
327 flags = O_WRONLY;
328#ifdef O_LAZY
329 if (wanted & CEPH_CAP_FILE_LAZYIO)
330 flags |= O_LAZY;
331#endif
332
333 req = prepare_open_request(inode->i_sb, flags, 0);
334 if (IS_ERR(req)) {
335 err = PTR_ERR(req);
336 goto out;
337 }
338
339 req->r_inode = inode;
340 ihold(inode);
341 req->r_num_caps = 1;
342
343 err = ceph_mdsc_do_request(mdsc, NULL, req);
344 ceph_mdsc_put_request(req);
345out:
346 dout("renew caps %p open result=%d\n", inode, err);
347 return err < 0 ? err : 0;
348}
349
350/*
351 * If we already have the requisite capabilities, we can satisfy
352 * the open request locally (no need to request new caps from the
353 * MDS). We do, however, need to inform the MDS (asynchronously)
354 * if our wanted caps set expands.
355 */
356int ceph_open(struct inode *inode, struct file *file)
357{
358 struct ceph_inode_info *ci = ceph_inode(inode);
359 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
360 struct ceph_mds_client *mdsc = fsc->mdsc;
361 struct ceph_mds_request *req;
362 struct ceph_file_info *fi = file->private_data;
363 int err;
364 int flags, fmode, wanted;
365
366 if (fi) {
367 dout("open file %p is already opened\n", file);
368 return 0;
369 }
370
371 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
372 flags = file->f_flags & ~(O_CREAT|O_EXCL);
373 if (S_ISDIR(inode->i_mode))
374 flags = O_DIRECTORY; /* mds likes to know */
375
376 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
377 ceph_vinop(inode), file, flags, file->f_flags);
378 fmode = ceph_flags_to_mode(flags);
379 wanted = ceph_caps_for_mode(fmode);
380
381 /* snapped files are read-only */
382 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
383 return -EROFS;
384
385 /* trivially open snapdir */
386 if (ceph_snap(inode) == CEPH_SNAPDIR) {
387 return ceph_init_file(inode, file, fmode);
388 }
389
390 /*
391 * No need to block if we have caps on the auth MDS (for
392 * write) or any MDS (for read). Update wanted set
393 * asynchronously.
394 */
395 spin_lock(&ci->i_ceph_lock);
396 if (__ceph_is_any_real_caps(ci) &&
397 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
398 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
399 int issued = __ceph_caps_issued(ci, NULL);
400
401 dout("open %p fmode %d want %s issued %s using existing\n",
402 inode, fmode, ceph_cap_string(wanted),
403 ceph_cap_string(issued));
404 __ceph_touch_fmode(ci, mdsc, fmode);
405 spin_unlock(&ci->i_ceph_lock);
406
407 /* adjust wanted? */
408 if ((issued & wanted) != wanted &&
409 (mds_wanted & wanted) != wanted &&
410 ceph_snap(inode) != CEPH_SNAPDIR)
411 ceph_check_caps(ci, 0);
412
413 return ceph_init_file(inode, file, fmode);
414 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
415 (ci->i_snap_caps & wanted) == wanted) {
416 __ceph_touch_fmode(ci, mdsc, fmode);
417 spin_unlock(&ci->i_ceph_lock);
418 return ceph_init_file(inode, file, fmode);
419 }
420
421 spin_unlock(&ci->i_ceph_lock);
422
423 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
424 req = prepare_open_request(inode->i_sb, flags, 0);
425 if (IS_ERR(req)) {
426 err = PTR_ERR(req);
427 goto out;
428 }
429 req->r_inode = inode;
430 ihold(inode);
431
432 req->r_num_caps = 1;
433 err = ceph_mdsc_do_request(mdsc, NULL, req);
434 if (!err)
435 err = ceph_init_file(inode, file, req->r_fmode);
436 ceph_mdsc_put_request(req);
437 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
438out:
439 return err;
440}
441
442/* Clone the layout from a synchronous create, if the dir now has Dc caps */
443static void
444cache_file_layout(struct inode *dst, struct inode *src)
445{
446 struct ceph_inode_info *cdst = ceph_inode(dst);
447 struct ceph_inode_info *csrc = ceph_inode(src);
448
449 spin_lock(&cdst->i_ceph_lock);
450 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
451 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
452 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
453 sizeof(cdst->i_cached_layout));
454 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
455 ceph_try_get_string(csrc->i_layout.pool_ns));
456 }
457 spin_unlock(&cdst->i_ceph_lock);
458}
459
460/*
461 * Try to set up an async create. We need caps, a file layout, and inode number,
462 * and either a lease on the dentry or complete dir info. If any of those
463 * criteria are not satisfied, then return false and the caller can go
464 * synchronous.
465 */
466static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
467 struct ceph_file_layout *lo, u64 *pino)
468{
469 struct ceph_inode_info *ci = ceph_inode(dir);
470 struct ceph_dentry_info *di = ceph_dentry(dentry);
471 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
472 u64 ino;
473
474 spin_lock(&ci->i_ceph_lock);
475 /* No auth cap means no chance for Dc caps */
476 if (!ci->i_auth_cap)
477 goto no_async;
478
479 /* Any delegated inos? */
480 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
481 goto no_async;
482
483 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
484 goto no_async;
485
486 if ((__ceph_caps_issued(ci, NULL) & want) != want)
487 goto no_async;
488
489 if (d_in_lookup(dentry)) {
490 if (!__ceph_dir_is_complete(ci))
491 goto no_async;
492 spin_lock(&dentry->d_lock);
493 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
494 spin_unlock(&dentry->d_lock);
495 } else if (atomic_read(&ci->i_shared_gen) !=
496 READ_ONCE(di->lease_shared_gen)) {
497 goto no_async;
498 }
499
500 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
501 if (!ino)
502 goto no_async;
503
504 *pino = ino;
505 ceph_take_cap_refs(ci, want, false);
506 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
507 rcu_assign_pointer(lo->pool_ns,
508 ceph_try_get_string(ci->i_cached_layout.pool_ns));
509 got = want;
510no_async:
511 spin_unlock(&ci->i_ceph_lock);
512 return got;
513}
514
515static void restore_deleg_ino(struct inode *dir, u64 ino)
516{
517 struct ceph_inode_info *ci = ceph_inode(dir);
518 struct ceph_mds_session *s = NULL;
519
520 spin_lock(&ci->i_ceph_lock);
521 if (ci->i_auth_cap)
522 s = ceph_get_mds_session(ci->i_auth_cap->session);
523 spin_unlock(&ci->i_ceph_lock);
524 if (s) {
525 int err = ceph_restore_deleg_ino(s, ino);
526 if (err)
527 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
528 ino, err);
529 ceph_put_mds_session(s);
530 }
531}
532
533static void wake_async_create_waiters(struct inode *inode,
534 struct ceph_mds_session *session)
535{
536 struct ceph_inode_info *ci = ceph_inode(inode);
537 bool check_cap = false;
538
539 spin_lock(&ci->i_ceph_lock);
540 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
541 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
542 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
543
544 if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
545 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
546 check_cap = true;
547 }
548 }
549 ceph_kick_flushing_inode_caps(session, ci);
550 spin_unlock(&ci->i_ceph_lock);
551
552 if (check_cap)
553 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
554}
555
556static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
557 struct ceph_mds_request *req)
558{
559 struct dentry *dentry = req->r_dentry;
560 struct inode *dinode = d_inode(dentry);
561 struct inode *tinode = req->r_target_inode;
562 int result = req->r_err ? req->r_err :
563 le32_to_cpu(req->r_reply_info.head->result);
564
565 WARN_ON_ONCE(dinode && tinode && dinode != tinode);
566
567 /* MDS changed -- caller must resubmit */
568 if (result == -EJUKEBOX)
569 goto out;
570
571 mapping_set_error(req->r_parent->i_mapping, result);
572
573 if (result) {
574 int pathlen = 0;
575 u64 base = 0;
576 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
577 &base, 0);
578
579 pr_warn("async create failure path=(%llx)%s result=%d!\n",
580 base, IS_ERR(path) ? "<<bad>>" : path, result);
581 ceph_mdsc_free_path(path, pathlen);
582
583 ceph_dir_clear_complete(req->r_parent);
584 if (!d_unhashed(dentry))
585 d_drop(dentry);
586
587 if (dinode) {
588 mapping_set_error(dinode->i_mapping, result);
589 ceph_inode_shutdown(dinode);
590 wake_async_create_waiters(dinode, req->r_session);
591 }
592 }
593
594 if (tinode) {
595 u64 ino = ceph_vino(tinode).ino;
596
597 if (req->r_deleg_ino != ino)
598 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
599 __func__, req->r_err, req->r_deleg_ino, ino);
600
601 mapping_set_error(tinode->i_mapping, result);
602 wake_async_create_waiters(tinode, req->r_session);
603 } else if (!result) {
604 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
605 req->r_deleg_ino);
606 }
607out:
608 ceph_mdsc_release_dir_caps(req);
609}
610
611static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
612 struct file *file, umode_t mode,
613 struct ceph_mds_request *req,
614 struct ceph_acl_sec_ctx *as_ctx,
615 struct ceph_file_layout *lo)
616{
617 int ret;
618 char xattr_buf[4];
619 struct ceph_mds_reply_inode in = { };
620 struct ceph_mds_reply_info_in iinfo = { .in = &in };
621 struct ceph_inode_info *ci = ceph_inode(dir);
622 struct ceph_dentry_info *di = ceph_dentry(dentry);
623 struct inode *inode;
624 struct timespec64 now;
625 struct ceph_string *pool_ns;
626 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
627 struct ceph_vino vino = { .ino = req->r_deleg_ino,
628 .snap = CEPH_NOSNAP };
629
630 ktime_get_real_ts64(&now);
631
632 inode = ceph_get_inode(dentry->d_sb, vino);
633 if (IS_ERR(inode))
634 return PTR_ERR(inode);
635
636 iinfo.inline_version = CEPH_INLINE_NONE;
637 iinfo.change_attr = 1;
638 ceph_encode_timespec64(&iinfo.btime, &now);
639
640 if (req->r_pagelist) {
641 iinfo.xattr_len = req->r_pagelist->length;
642 iinfo.xattr_data = req->r_pagelist->mapped_tail;
643 } else {
644 /* fake it */
645 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
646 iinfo.xattr_data = xattr_buf;
647 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
648 }
649
650 in.ino = cpu_to_le64(vino.ino);
651 in.snapid = cpu_to_le64(CEPH_NOSNAP);
652 in.version = cpu_to_le64(1); // ???
653 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
654 in.cap.cap_id = cpu_to_le64(1);
655 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
656 in.cap.flags = CEPH_CAP_FLAG_AUTH;
657 in.ctime = in.mtime = in.atime = iinfo.btime;
658 in.truncate_seq = cpu_to_le32(1);
659 in.truncate_size = cpu_to_le64(-1ULL);
660 in.xattr_version = cpu_to_le64(1);
661 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
662 if (dir->i_mode & S_ISGID) {
663 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
664
665 /* Directories always inherit the setgid bit. */
666 if (S_ISDIR(mode))
667 mode |= S_ISGID;
668 } else {
669 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
670 }
671 in.mode = cpu_to_le32((u32)mode);
672
673 in.nlink = cpu_to_le32(1);
674 in.max_size = cpu_to_le64(lo->stripe_unit);
675
676 ceph_file_layout_to_legacy(lo, &in.layout);
677 /* lo is private, so pool_ns can't change */
678 pool_ns = rcu_dereference_raw(lo->pool_ns);
679 if (pool_ns) {
680 iinfo.pool_ns_len = pool_ns->len;
681 iinfo.pool_ns_data = pool_ns->str;
682 }
683
684 down_read(&mdsc->snap_rwsem);
685 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
686 req->r_fmode, NULL);
687 up_read(&mdsc->snap_rwsem);
688 if (ret) {
689 dout("%s failed to fill inode: %d\n", __func__, ret);
690 ceph_dir_clear_complete(dir);
691 if (!d_unhashed(dentry))
692 d_drop(dentry);
693 if (inode->i_state & I_NEW)
694 discard_new_inode(inode);
695 } else {
696 struct dentry *dn;
697
698 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
699 vino.ino, ceph_ino(dir), dentry->d_name.name);
700 ceph_dir_clear_ordered(dir);
701 ceph_init_inode_acls(inode, as_ctx);
702 if (inode->i_state & I_NEW) {
703 /*
704 * If it's not I_NEW, then someone created this before
705 * we got here. Assume the server is aware of it at
706 * that point and don't worry about setting
707 * CEPH_I_ASYNC_CREATE.
708 */
709 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
710 unlock_new_inode(inode);
711 }
712 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
713 if (!d_unhashed(dentry))
714 d_drop(dentry);
715 dn = d_splice_alias(inode, dentry);
716 WARN_ON_ONCE(dn && dn != dentry);
717 }
718 file->f_mode |= FMODE_CREATED;
719 ret = finish_open(file, dentry, ceph_open);
720 }
721
722 spin_lock(&dentry->d_lock);
723 di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
724 wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
725 spin_unlock(&dentry->d_lock);
726
727 return ret;
728}
729
730/*
731 * Do a lookup + open with a single request. If we get a non-existent
732 * file or symlink, return 1 so the VFS can retry.
733 */
734int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
735 struct file *file, unsigned flags, umode_t mode)
736{
737 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
738 struct ceph_mds_client *mdsc = fsc->mdsc;
739 struct ceph_mds_request *req;
740 struct dentry *dn;
741 struct ceph_acl_sec_ctx as_ctx = {};
742 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
743 int mask;
744 int err;
745
746 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
747 dir, dentry, dentry,
748 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
749
750 if (dentry->d_name.len > NAME_MAX)
751 return -ENAMETOOLONG;
752
753 err = ceph_wait_on_conflict_unlink(dentry);
754 if (err)
755 return err;
756 /*
757 * Do not truncate the file, since atomic_open is called before the
758 * permission check. The caller will do the truncation afterward.
759 */
760 flags &= ~O_TRUNC;
761
762 if (flags & O_CREAT) {
763 if (ceph_quota_is_max_files_exceeded(dir))
764 return -EDQUOT;
765 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
766 if (err < 0)
767 return err;
768 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
769 if (err < 0)
770 goto out_ctx;
771 /* Async create can't handle more than a page of xattrs */
772 if (as_ctx.pagelist &&
773 !list_is_singular(&as_ctx.pagelist->head))
774 try_async = false;
775 } else if (!d_in_lookup(dentry)) {
776 /* If it's not being looked up, it's negative */
777 return -ENOENT;
778 }
779retry:
780 /* do the open */
781 req = prepare_open_request(dir->i_sb, flags, mode);
782 if (IS_ERR(req)) {
783 err = PTR_ERR(req);
784 goto out_ctx;
785 }
786 req->r_dentry = dget(dentry);
787 req->r_num_caps = 2;
788 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
789 if (ceph_security_xattr_wanted(dir))
790 mask |= CEPH_CAP_XATTR_SHARED;
791 req->r_args.open.mask = cpu_to_le32(mask);
792 req->r_parent = dir;
793 ihold(dir);
794
795 if (flags & O_CREAT) {
796 struct ceph_file_layout lo;
797
798 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
799 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
800 if (as_ctx.pagelist) {
801 req->r_pagelist = as_ctx.pagelist;
802 as_ctx.pagelist = NULL;
803 }
804 if (try_async &&
805 (req->r_dir_caps =
806 try_prep_async_create(dir, dentry, &lo,
807 &req->r_deleg_ino))) {
808 struct ceph_dentry_info *di = ceph_dentry(dentry);
809
810 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
811 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
812 req->r_callback = ceph_async_create_cb;
813
814 spin_lock(&dentry->d_lock);
815 di->flags |= CEPH_DENTRY_ASYNC_CREATE;
816 spin_unlock(&dentry->d_lock);
817
818 err = ceph_mdsc_submit_request(mdsc, dir, req);
819 if (!err) {
820 err = ceph_finish_async_create(dir, dentry,
821 file, mode, req,
822 &as_ctx, &lo);
823 } else if (err == -EJUKEBOX) {
824 restore_deleg_ino(dir, req->r_deleg_ino);
825 ceph_mdsc_put_request(req);
826 try_async = false;
827 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
828 goto retry;
829 }
830 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
831 goto out_req;
832 }
833 }
834
835 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
836 err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
837 if (err == -ENOENT) {
838 dentry = ceph_handle_snapdir(req, dentry);
839 if (IS_ERR(dentry)) {
840 err = PTR_ERR(dentry);
841 goto out_req;
842 }
843 err = 0;
844 }
845
846 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
847 err = ceph_handle_notrace_create(dir, dentry);
848
849 if (d_in_lookup(dentry)) {
850 dn = ceph_finish_lookup(req, dentry, err);
851 if (IS_ERR(dn))
852 err = PTR_ERR(dn);
853 } else {
854 /* we were given a hashed negative dentry */
855 dn = NULL;
856 }
857 if (err)
858 goto out_req;
859 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
860 /* make vfs retry on splice, ENOENT, or symlink */
861 dout("atomic_open finish_no_open on dn %p\n", dn);
862 err = finish_no_open(file, dn);
863 } else {
864 dout("atomic_open finish_open on dn %p\n", dn);
865 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
866 struct inode *newino = d_inode(dentry);
867
868 cache_file_layout(dir, newino);
869 ceph_init_inode_acls(newino, &as_ctx);
870 file->f_mode |= FMODE_CREATED;
871 }
872 err = finish_open(file, dentry, ceph_open);
873 }
874out_req:
875 ceph_mdsc_put_request(req);
876out_ctx:
877 ceph_release_acl_sec_ctx(&as_ctx);
878 dout("atomic_open result=%d\n", err);
879 return err;
880}
881
882int ceph_release(struct inode *inode, struct file *file)
883{
884 struct ceph_inode_info *ci = ceph_inode(inode);
885
886 if (S_ISDIR(inode->i_mode)) {
887 struct ceph_dir_file_info *dfi = file->private_data;
888 dout("release inode %p dir file %p\n", inode, file);
889 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
890
891 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
892
893 if (dfi->last_readdir)
894 ceph_mdsc_put_request(dfi->last_readdir);
895 kfree(dfi->last_name);
896 kfree(dfi->dir_info);
897 kmem_cache_free(ceph_dir_file_cachep, dfi);
898 } else {
899 struct ceph_file_info *fi = file->private_data;
900 dout("release inode %p regular file %p\n", inode, file);
901 WARN_ON(!list_empty(&fi->rw_contexts));
902
903 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
904 ceph_put_fmode(ci, fi->fmode, 1);
905
906 kmem_cache_free(ceph_file_cachep, fi);
907 }
908
909 /* wake up anyone waiting for caps on this inode */
910 wake_up_all(&ci->i_cap_wq);
911 return 0;
912}
913
914enum {
915 HAVE_RETRIED = 1,
916 CHECK_EOF = 2,
917 READ_INLINE = 3,
918};
919
920/*
921 * Completely synchronous read and write methods. Direct from __user
922 * buffer to osd, or directly to user pages (if O_DIRECT).
923 *
924 * If the read spans object boundary, just do multiple reads. (That's not
925 * atomic, but good enough for now.)
926 *
927 * If we get a short result from the OSD, check against i_size; we need to
928 * only return a short read to the caller if we hit EOF.
929 */
930static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
931 int *retry_op)
932{
933 struct file *file = iocb->ki_filp;
934 struct inode *inode = file_inode(file);
935 struct ceph_inode_info *ci = ceph_inode(inode);
936 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
937 struct ceph_osd_client *osdc = &fsc->client->osdc;
938 ssize_t ret;
939 u64 off = iocb->ki_pos;
940 u64 len = iov_iter_count(to);
941 u64 i_size = i_size_read(inode);
942
943 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
944 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
945
946 if (!len)
947 return 0;
948 /*
949 * flush any page cache pages in this range. this
950 * will make concurrent normal and sync io slow,
951 * but it will at least behave sensibly when they are
952 * in sequence.
953 */
954 ret = filemap_write_and_wait_range(inode->i_mapping,
955 off, off + len - 1);
956 if (ret < 0)
957 return ret;
958
959 ret = 0;
960 while ((len = iov_iter_count(to)) > 0) {
961 struct ceph_osd_request *req;
962 struct page **pages;
963 int num_pages;
964 size_t page_off;
965 bool more;
966 int idx;
967 size_t left;
968
969 req = ceph_osdc_new_request(osdc, &ci->i_layout,
970 ci->i_vino, off, &len, 0, 1,
971 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
972 NULL, ci->i_truncate_seq,
973 ci->i_truncate_size, false);
974 if (IS_ERR(req)) {
975 ret = PTR_ERR(req);
976 break;
977 }
978
979 more = len < iov_iter_count(to);
980
981 num_pages = calc_pages_for(off, len);
982 page_off = off & ~PAGE_MASK;
983 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
984 if (IS_ERR(pages)) {
985 ceph_osdc_put_request(req);
986 ret = PTR_ERR(pages);
987 break;
988 }
989
990 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
991 false, false);
992 ceph_osdc_start_request(osdc, req);
993 ret = ceph_osdc_wait_request(osdc, req);
994
995 ceph_update_read_metrics(&fsc->mdsc->metric,
996 req->r_start_latency,
997 req->r_end_latency,
998 len, ret);
999
1000 ceph_osdc_put_request(req);
1001
1002 i_size = i_size_read(inode);
1003 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1004 off, len, ret, i_size, (more ? " MORE" : ""));
1005
1006 if (ret == -ENOENT)
1007 ret = 0;
1008 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1009 int zlen = min(len - ret, i_size - off - ret);
1010 int zoff = page_off + ret;
1011 dout("sync_read zero gap %llu~%llu\n",
1012 off + ret, off + ret + zlen);
1013 ceph_zero_page_vector_range(zoff, zlen, pages);
1014 ret += zlen;
1015 }
1016
1017 idx = 0;
1018 left = ret > 0 ? ret : 0;
1019 while (left > 0) {
1020 size_t len, copied;
1021 page_off = off & ~PAGE_MASK;
1022 len = min_t(size_t, left, PAGE_SIZE - page_off);
1023 SetPageUptodate(pages[idx]);
1024 copied = copy_page_to_iter(pages[idx++],
1025 page_off, len, to);
1026 off += copied;
1027 left -= copied;
1028 if (copied < len) {
1029 ret = -EFAULT;
1030 break;
1031 }
1032 }
1033 ceph_release_page_vector(pages, num_pages);
1034
1035 if (ret < 0) {
1036 if (ret == -EBLOCKLISTED)
1037 fsc->blocklisted = true;
1038 break;
1039 }
1040
1041 if (off >= i_size || !more)
1042 break;
1043 }
1044
1045 if (off > iocb->ki_pos) {
1046 if (off >= i_size) {
1047 *retry_op = CHECK_EOF;
1048 ret = i_size - iocb->ki_pos;
1049 iocb->ki_pos = i_size;
1050 } else {
1051 ret = off - iocb->ki_pos;
1052 iocb->ki_pos = off;
1053 }
1054 }
1055
1056 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1057 return ret;
1058}
1059
1060struct ceph_aio_request {
1061 struct kiocb *iocb;
1062 size_t total_len;
1063 bool write;
1064 bool should_dirty;
1065 int error;
1066 struct list_head osd_reqs;
1067 unsigned num_reqs;
1068 atomic_t pending_reqs;
1069 struct timespec64 mtime;
1070 struct ceph_cap_flush *prealloc_cf;
1071};
1072
1073struct ceph_aio_work {
1074 struct work_struct work;
1075 struct ceph_osd_request *req;
1076};
1077
1078static void ceph_aio_retry_work(struct work_struct *work);
1079
1080static void ceph_aio_complete(struct inode *inode,
1081 struct ceph_aio_request *aio_req)
1082{
1083 struct ceph_inode_info *ci = ceph_inode(inode);
1084 int ret;
1085
1086 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1087 return;
1088
1089 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1090 inode_dio_end(inode);
1091
1092 ret = aio_req->error;
1093 if (!ret)
1094 ret = aio_req->total_len;
1095
1096 dout("ceph_aio_complete %p rc %d\n", inode, ret);
1097
1098 if (ret >= 0 && aio_req->write) {
1099 int dirty;
1100
1101 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1102 if (endoff > i_size_read(inode)) {
1103 if (ceph_inode_set_size(inode, endoff))
1104 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1105 }
1106
1107 spin_lock(&ci->i_ceph_lock);
1108 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1109 &aio_req->prealloc_cf);
1110 spin_unlock(&ci->i_ceph_lock);
1111 if (dirty)
1112 __mark_inode_dirty(inode, dirty);
1113
1114 }
1115
1116 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1117 CEPH_CAP_FILE_RD));
1118
1119 aio_req->iocb->ki_complete(aio_req->iocb, ret);
1120
1121 ceph_free_cap_flush(aio_req->prealloc_cf);
1122 kfree(aio_req);
1123}
1124
1125static void ceph_aio_complete_req(struct ceph_osd_request *req)
1126{
1127 int rc = req->r_result;
1128 struct inode *inode = req->r_inode;
1129 struct ceph_aio_request *aio_req = req->r_priv;
1130 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1131 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1132 unsigned int len = osd_data->bvec_pos.iter.bi_size;
1133
1134 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1135 BUG_ON(!osd_data->num_bvecs);
1136
1137 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1138
1139 if (rc == -EOLDSNAPC) {
1140 struct ceph_aio_work *aio_work;
1141 BUG_ON(!aio_req->write);
1142
1143 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1144 if (aio_work) {
1145 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1146 aio_work->req = req;
1147 queue_work(ceph_inode_to_client(inode)->inode_wq,
1148 &aio_work->work);
1149 return;
1150 }
1151 rc = -ENOMEM;
1152 } else if (!aio_req->write) {
1153 if (rc == -ENOENT)
1154 rc = 0;
1155 if (rc >= 0 && len > rc) {
1156 struct iov_iter i;
1157 int zlen = len - rc;
1158
1159 /*
1160 * If read is satisfied by single OSD request,
1161 * it can pass EOF. Otherwise read is within
1162 * i_size.
1163 */
1164 if (aio_req->num_reqs == 1) {
1165 loff_t i_size = i_size_read(inode);
1166 loff_t endoff = aio_req->iocb->ki_pos + rc;
1167 if (endoff < i_size)
1168 zlen = min_t(size_t, zlen,
1169 i_size - endoff);
1170 aio_req->total_len = rc + zlen;
1171 }
1172
1173 iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1174 osd_data->num_bvecs, len);
1175 iov_iter_advance(&i, rc);
1176 iov_iter_zero(zlen, &i);
1177 }
1178 }
1179
1180 /* r_start_latency == 0 means the request was not submitted */
1181 if (req->r_start_latency) {
1182 if (aio_req->write)
1183 ceph_update_write_metrics(metric, req->r_start_latency,
1184 req->r_end_latency, len, rc);
1185 else
1186 ceph_update_read_metrics(metric, req->r_start_latency,
1187 req->r_end_latency, len, rc);
1188 }
1189
1190 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1191 aio_req->should_dirty);
1192 ceph_osdc_put_request(req);
1193
1194 if (rc < 0)
1195 cmpxchg(&aio_req->error, 0, rc);
1196
1197 ceph_aio_complete(inode, aio_req);
1198 return;
1199}
1200
1201static void ceph_aio_retry_work(struct work_struct *work)
1202{
1203 struct ceph_aio_work *aio_work =
1204 container_of(work, struct ceph_aio_work, work);
1205 struct ceph_osd_request *orig_req = aio_work->req;
1206 struct ceph_aio_request *aio_req = orig_req->r_priv;
1207 struct inode *inode = orig_req->r_inode;
1208 struct ceph_inode_info *ci = ceph_inode(inode);
1209 struct ceph_snap_context *snapc;
1210 struct ceph_osd_request *req;
1211 int ret;
1212
1213 spin_lock(&ci->i_ceph_lock);
1214 if (__ceph_have_pending_cap_snap(ci)) {
1215 struct ceph_cap_snap *capsnap =
1216 list_last_entry(&ci->i_cap_snaps,
1217 struct ceph_cap_snap,
1218 ci_item);
1219 snapc = ceph_get_snap_context(capsnap->context);
1220 } else {
1221 BUG_ON(!ci->i_head_snapc);
1222 snapc = ceph_get_snap_context(ci->i_head_snapc);
1223 }
1224 spin_unlock(&ci->i_ceph_lock);
1225
1226 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1227 false, GFP_NOFS);
1228 if (!req) {
1229 ret = -ENOMEM;
1230 req = orig_req;
1231 goto out;
1232 }
1233
1234 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1235 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1236 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1237
1238 req->r_ops[0] = orig_req->r_ops[0];
1239
1240 req->r_mtime = aio_req->mtime;
1241 req->r_data_offset = req->r_ops[0].extent.offset;
1242
1243 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1244 if (ret) {
1245 ceph_osdc_put_request(req);
1246 req = orig_req;
1247 goto out;
1248 }
1249
1250 ceph_osdc_put_request(orig_req);
1251
1252 req->r_callback = ceph_aio_complete_req;
1253 req->r_inode = inode;
1254 req->r_priv = aio_req;
1255
1256 ceph_osdc_start_request(req->r_osdc, req);
1257out:
1258 if (ret < 0) {
1259 req->r_result = ret;
1260 ceph_aio_complete_req(req);
1261 }
1262
1263 ceph_put_snap_context(snapc);
1264 kfree(aio_work);
1265}
1266
1267static ssize_t
1268ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1269 struct ceph_snap_context *snapc,
1270 struct ceph_cap_flush **pcf)
1271{
1272 struct file *file = iocb->ki_filp;
1273 struct inode *inode = file_inode(file);
1274 struct ceph_inode_info *ci = ceph_inode(inode);
1275 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1276 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1277 struct ceph_vino vino;
1278 struct ceph_osd_request *req;
1279 struct bio_vec *bvecs;
1280 struct ceph_aio_request *aio_req = NULL;
1281 int num_pages = 0;
1282 int flags;
1283 int ret = 0;
1284 struct timespec64 mtime = current_time(inode);
1285 size_t count = iov_iter_count(iter);
1286 loff_t pos = iocb->ki_pos;
1287 bool write = iov_iter_rw(iter) == WRITE;
1288 bool should_dirty = !write && user_backed_iter(iter);
1289
1290 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1291 return -EROFS;
1292
1293 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1294 (write ? "write" : "read"), file, pos, (unsigned)count,
1295 snapc, snapc ? snapc->seq : 0);
1296
1297 if (write) {
1298 int ret2;
1299
1300 ceph_fscache_invalidate(inode, true);
1301
1302 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1303 pos >> PAGE_SHIFT,
1304 (pos + count - 1) >> PAGE_SHIFT);
1305 if (ret2 < 0)
1306 dout("invalidate_inode_pages2_range returned %d\n", ret2);
1307
1308 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1309 } else {
1310 flags = CEPH_OSD_FLAG_READ;
1311 }
1312
1313 while (iov_iter_count(iter) > 0) {
1314 u64 size = iov_iter_count(iter);
1315 ssize_t len;
1316
1317 if (write)
1318 size = min_t(u64, size, fsc->mount_options->wsize);
1319 else
1320 size = min_t(u64, size, fsc->mount_options->rsize);
1321
1322 vino = ceph_vino(inode);
1323 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1324 vino, pos, &size, 0,
1325 1,
1326 write ? CEPH_OSD_OP_WRITE :
1327 CEPH_OSD_OP_READ,
1328 flags, snapc,
1329 ci->i_truncate_seq,
1330 ci->i_truncate_size,
1331 false);
1332 if (IS_ERR(req)) {
1333 ret = PTR_ERR(req);
1334 break;
1335 }
1336
1337 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1338 if (len < 0) {
1339 ceph_osdc_put_request(req);
1340 ret = len;
1341 break;
1342 }
1343 if (len != size)
1344 osd_req_op_extent_update(req, 0, len);
1345
1346 /*
1347 * To simplify error handling, allow AIO when IO within i_size
1348 * or IO can be satisfied by single OSD request.
1349 */
1350 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1351 (len == count || pos + count <= i_size_read(inode))) {
1352 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1353 if (aio_req) {
1354 aio_req->iocb = iocb;
1355 aio_req->write = write;
1356 aio_req->should_dirty = should_dirty;
1357 INIT_LIST_HEAD(&aio_req->osd_reqs);
1358 if (write) {
1359 aio_req->mtime = mtime;
1360 swap(aio_req->prealloc_cf, *pcf);
1361 }
1362 }
1363 /* ignore error */
1364 }
1365
1366 if (write) {
1367 /*
1368 * throw out any page cache pages in this range. this
1369 * may block.
1370 */
1371 truncate_inode_pages_range(inode->i_mapping, pos,
1372 PAGE_ALIGN(pos + len) - 1);
1373
1374 req->r_mtime = mtime;
1375 }
1376
1377 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1378
1379 if (aio_req) {
1380 aio_req->total_len += len;
1381 aio_req->num_reqs++;
1382 atomic_inc(&aio_req->pending_reqs);
1383
1384 req->r_callback = ceph_aio_complete_req;
1385 req->r_inode = inode;
1386 req->r_priv = aio_req;
1387 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1388
1389 pos += len;
1390 continue;
1391 }
1392
1393 ceph_osdc_start_request(req->r_osdc, req);
1394 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1395
1396 if (write)
1397 ceph_update_write_metrics(metric, req->r_start_latency,
1398 req->r_end_latency, len, ret);
1399 else
1400 ceph_update_read_metrics(metric, req->r_start_latency,
1401 req->r_end_latency, len, ret);
1402
1403 size = i_size_read(inode);
1404 if (!write) {
1405 if (ret == -ENOENT)
1406 ret = 0;
1407 if (ret >= 0 && ret < len && pos + ret < size) {
1408 struct iov_iter i;
1409 int zlen = min_t(size_t, len - ret,
1410 size - pos - ret);
1411
1412 iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1413 iov_iter_advance(&i, ret);
1414 iov_iter_zero(zlen, &i);
1415 ret += zlen;
1416 }
1417 if (ret >= 0)
1418 len = ret;
1419 }
1420
1421 put_bvecs(bvecs, num_pages, should_dirty);
1422 ceph_osdc_put_request(req);
1423 if (ret < 0)
1424 break;
1425
1426 pos += len;
1427 if (!write && pos >= size)
1428 break;
1429
1430 if (write && pos > size) {
1431 if (ceph_inode_set_size(inode, pos))
1432 ceph_check_caps(ceph_inode(inode),
1433 CHECK_CAPS_AUTHONLY);
1434 }
1435 }
1436
1437 if (aio_req) {
1438 LIST_HEAD(osd_reqs);
1439
1440 if (aio_req->num_reqs == 0) {
1441 kfree(aio_req);
1442 return ret;
1443 }
1444
1445 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1446 CEPH_CAP_FILE_RD);
1447
1448 list_splice(&aio_req->osd_reqs, &osd_reqs);
1449 inode_dio_begin(inode);
1450 while (!list_empty(&osd_reqs)) {
1451 req = list_first_entry(&osd_reqs,
1452 struct ceph_osd_request,
1453 r_private_item);
1454 list_del_init(&req->r_private_item);
1455 if (ret >= 0)
1456 ceph_osdc_start_request(req->r_osdc, req);
1457 if (ret < 0) {
1458 req->r_result = ret;
1459 ceph_aio_complete_req(req);
1460 }
1461 }
1462 return -EIOCBQUEUED;
1463 }
1464
1465 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1466 ret = pos - iocb->ki_pos;
1467 iocb->ki_pos = pos;
1468 }
1469 return ret;
1470}
1471
1472/*
1473 * Synchronous write, straight from __user pointer or user pages.
1474 *
1475 * If write spans object boundary, just do multiple writes. (For a
1476 * correct atomic write, we should e.g. take write locks on all
1477 * objects, rollback on failure, etc.)
1478 */
1479static ssize_t
1480ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1481 struct ceph_snap_context *snapc)
1482{
1483 struct file *file = iocb->ki_filp;
1484 struct inode *inode = file_inode(file);
1485 struct ceph_inode_info *ci = ceph_inode(inode);
1486 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1487 struct ceph_vino vino;
1488 struct ceph_osd_request *req;
1489 struct page **pages;
1490 u64 len;
1491 int num_pages;
1492 int written = 0;
1493 int flags;
1494 int ret;
1495 bool check_caps = false;
1496 struct timespec64 mtime = current_time(inode);
1497 size_t count = iov_iter_count(from);
1498
1499 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1500 return -EROFS;
1501
1502 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1503 file, pos, (unsigned)count, snapc, snapc->seq);
1504
1505 ret = filemap_write_and_wait_range(inode->i_mapping,
1506 pos, pos + count - 1);
1507 if (ret < 0)
1508 return ret;
1509
1510 ceph_fscache_invalidate(inode, false);
1511 ret = invalidate_inode_pages2_range(inode->i_mapping,
1512 pos >> PAGE_SHIFT,
1513 (pos + count - 1) >> PAGE_SHIFT);
1514 if (ret < 0)
1515 dout("invalidate_inode_pages2_range returned %d\n", ret);
1516
1517 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1518
1519 while ((len = iov_iter_count(from)) > 0) {
1520 size_t left;
1521 int n;
1522
1523 vino = ceph_vino(inode);
1524 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1525 vino, pos, &len, 0, 1,
1526 CEPH_OSD_OP_WRITE, flags, snapc,
1527 ci->i_truncate_seq,
1528 ci->i_truncate_size,
1529 false);
1530 if (IS_ERR(req)) {
1531 ret = PTR_ERR(req);
1532 break;
1533 }
1534
1535 /*
1536 * write from beginning of first page,
1537 * regardless of io alignment
1538 */
1539 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1540
1541 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1542 if (IS_ERR(pages)) {
1543 ret = PTR_ERR(pages);
1544 goto out;
1545 }
1546
1547 left = len;
1548 for (n = 0; n < num_pages; n++) {
1549 size_t plen = min_t(size_t, left, PAGE_SIZE);
1550 ret = copy_page_from_iter(pages[n], 0, plen, from);
1551 if (ret != plen) {
1552 ret = -EFAULT;
1553 break;
1554 }
1555 left -= ret;
1556 }
1557
1558 if (ret < 0) {
1559 ceph_release_page_vector(pages, num_pages);
1560 goto out;
1561 }
1562
1563 req->r_inode = inode;
1564
1565 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1566 false, true);
1567
1568 req->r_mtime = mtime;
1569 ceph_osdc_start_request(&fsc->client->osdc, req);
1570 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1571
1572 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1573 req->r_end_latency, len, ret);
1574out:
1575 ceph_osdc_put_request(req);
1576 if (ret != 0) {
1577 ceph_set_error_write(ci);
1578 break;
1579 }
1580
1581 ceph_clear_error_write(ci);
1582 pos += len;
1583 written += len;
1584 if (pos > i_size_read(inode)) {
1585 check_caps = ceph_inode_set_size(inode, pos);
1586 if (check_caps)
1587 ceph_check_caps(ceph_inode(inode),
1588 CHECK_CAPS_AUTHONLY);
1589 }
1590
1591 }
1592
1593 if (ret != -EOLDSNAPC && written > 0) {
1594 ret = written;
1595 iocb->ki_pos = pos;
1596 }
1597 return ret;
1598}
1599
1600/*
1601 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1602 * Atomically grab references, so that those bits are not released
1603 * back to the MDS mid-read.
1604 *
1605 * Hmm, the sync read case isn't actually async... should it be?
1606 */
1607static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1608{
1609 struct file *filp = iocb->ki_filp;
1610 struct ceph_file_info *fi = filp->private_data;
1611 size_t len = iov_iter_count(to);
1612 struct inode *inode = file_inode(filp);
1613 struct ceph_inode_info *ci = ceph_inode(inode);
1614 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1615 ssize_t ret;
1616 int want = 0, got = 0;
1617 int retry_op = 0, read = 0;
1618
1619again:
1620 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1621 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1622
1623 if (ceph_inode_is_shutdown(inode))
1624 return -ESTALE;
1625
1626 if (direct_lock)
1627 ceph_start_io_direct(inode);
1628 else
1629 ceph_start_io_read(inode);
1630
1631 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1632 want |= CEPH_CAP_FILE_CACHE;
1633 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1634 want |= CEPH_CAP_FILE_LAZYIO;
1635
1636 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1637 if (ret < 0) {
1638 if (direct_lock)
1639 ceph_end_io_direct(inode);
1640 else
1641 ceph_end_io_read(inode);
1642 return ret;
1643 }
1644
1645 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1646 (iocb->ki_flags & IOCB_DIRECT) ||
1647 (fi->flags & CEPH_F_SYNC)) {
1648
1649 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1650 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1651 ceph_cap_string(got));
1652
1653 if (!ceph_has_inline_data(ci)) {
1654 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1655 ret = ceph_direct_read_write(iocb, to,
1656 NULL, NULL);
1657 if (ret >= 0 && ret < len)
1658 retry_op = CHECK_EOF;
1659 } else {
1660 ret = ceph_sync_read(iocb, to, &retry_op);
1661 }
1662 } else {
1663 retry_op = READ_INLINE;
1664 }
1665 } else {
1666 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1667 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1668 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1669 ceph_cap_string(got));
1670 ceph_add_rw_context(fi, &rw_ctx);
1671 ret = generic_file_read_iter(iocb, to);
1672 ceph_del_rw_context(fi, &rw_ctx);
1673 }
1674
1675 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1676 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1677 ceph_put_cap_refs(ci, got);
1678
1679 if (direct_lock)
1680 ceph_end_io_direct(inode);
1681 else
1682 ceph_end_io_read(inode);
1683
1684 if (retry_op > HAVE_RETRIED && ret >= 0) {
1685 int statret;
1686 struct page *page = NULL;
1687 loff_t i_size;
1688 if (retry_op == READ_INLINE) {
1689 page = __page_cache_alloc(GFP_KERNEL);
1690 if (!page)
1691 return -ENOMEM;
1692 }
1693
1694 statret = __ceph_do_getattr(inode, page,
1695 CEPH_STAT_CAP_INLINE_DATA, !!page);
1696 if (statret < 0) {
1697 if (page)
1698 __free_page(page);
1699 if (statret == -ENODATA) {
1700 BUG_ON(retry_op != READ_INLINE);
1701 goto again;
1702 }
1703 return statret;
1704 }
1705
1706 i_size = i_size_read(inode);
1707 if (retry_op == READ_INLINE) {
1708 BUG_ON(ret > 0 || read > 0);
1709 if (iocb->ki_pos < i_size &&
1710 iocb->ki_pos < PAGE_SIZE) {
1711 loff_t end = min_t(loff_t, i_size,
1712 iocb->ki_pos + len);
1713 end = min_t(loff_t, end, PAGE_SIZE);
1714 if (statret < end)
1715 zero_user_segment(page, statret, end);
1716 ret = copy_page_to_iter(page,
1717 iocb->ki_pos & ~PAGE_MASK,
1718 end - iocb->ki_pos, to);
1719 iocb->ki_pos += ret;
1720 read += ret;
1721 }
1722 if (iocb->ki_pos < i_size && read < len) {
1723 size_t zlen = min_t(size_t, len - read,
1724 i_size - iocb->ki_pos);
1725 ret = iov_iter_zero(zlen, to);
1726 iocb->ki_pos += ret;
1727 read += ret;
1728 }
1729 __free_pages(page, 0);
1730 return read;
1731 }
1732
1733 /* hit EOF or hole? */
1734 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1735 ret < len) {
1736 dout("sync_read hit hole, ppos %lld < size %lld"
1737 ", reading more\n", iocb->ki_pos, i_size);
1738
1739 read += ret;
1740 len -= ret;
1741 retry_op = HAVE_RETRIED;
1742 goto again;
1743 }
1744 }
1745
1746 if (ret >= 0)
1747 ret += read;
1748
1749 return ret;
1750}
1751
1752/*
1753 * Take cap references to avoid releasing caps to MDS mid-write.
1754 *
1755 * If we are synchronous, and write with an old snap context, the OSD
1756 * may return EOLDSNAPC. In that case, retry the write.. _after_
1757 * dropping our cap refs and allowing the pending snap to logically
1758 * complete _before_ this write occurs.
1759 *
1760 * If we are near ENOSPC, write synchronously.
1761 */
1762static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1763{
1764 struct file *file = iocb->ki_filp;
1765 struct ceph_file_info *fi = file->private_data;
1766 struct inode *inode = file_inode(file);
1767 struct ceph_inode_info *ci = ceph_inode(inode);
1768 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1769 struct ceph_osd_client *osdc = &fsc->client->osdc;
1770 struct ceph_cap_flush *prealloc_cf;
1771 ssize_t count, written = 0;
1772 int err, want = 0, got;
1773 bool direct_lock = false;
1774 u32 map_flags;
1775 u64 pool_flags;
1776 loff_t pos;
1777 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1778
1779 if (ceph_inode_is_shutdown(inode))
1780 return -ESTALE;
1781
1782 if (ceph_snap(inode) != CEPH_NOSNAP)
1783 return -EROFS;
1784
1785 prealloc_cf = ceph_alloc_cap_flush();
1786 if (!prealloc_cf)
1787 return -ENOMEM;
1788
1789 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1790 direct_lock = true;
1791
1792retry_snap:
1793 if (direct_lock)
1794 ceph_start_io_direct(inode);
1795 else
1796 ceph_start_io_write(inode);
1797
1798 /* We can write back this queue in page reclaim */
1799 current->backing_dev_info = inode_to_bdi(inode);
1800
1801 if (iocb->ki_flags & IOCB_APPEND) {
1802 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1803 if (err < 0)
1804 goto out;
1805 }
1806
1807 err = generic_write_checks(iocb, from);
1808 if (err <= 0)
1809 goto out;
1810
1811 pos = iocb->ki_pos;
1812 if (unlikely(pos >= limit)) {
1813 err = -EFBIG;
1814 goto out;
1815 } else {
1816 iov_iter_truncate(from, limit - pos);
1817 }
1818
1819 count = iov_iter_count(from);
1820 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1821 err = -EDQUOT;
1822 goto out;
1823 }
1824
1825 down_read(&osdc->lock);
1826 map_flags = osdc->osdmap->flags;
1827 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1828 up_read(&osdc->lock);
1829 if ((map_flags & CEPH_OSDMAP_FULL) ||
1830 (pool_flags & CEPH_POOL_FLAG_FULL)) {
1831 err = -ENOSPC;
1832 goto out;
1833 }
1834
1835 err = file_remove_privs(file);
1836 if (err)
1837 goto out;
1838
1839 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1840 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1841 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1842 want |= CEPH_CAP_FILE_BUFFER;
1843 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1844 want |= CEPH_CAP_FILE_LAZYIO;
1845 got = 0;
1846 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1847 if (err < 0)
1848 goto out;
1849
1850 err = file_update_time(file);
1851 if (err)
1852 goto out_caps;
1853
1854 inode_inc_iversion_raw(inode);
1855
1856 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1857 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1858
1859 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1860 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1861 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1862 struct ceph_snap_context *snapc;
1863 struct iov_iter data;
1864
1865 spin_lock(&ci->i_ceph_lock);
1866 if (__ceph_have_pending_cap_snap(ci)) {
1867 struct ceph_cap_snap *capsnap =
1868 list_last_entry(&ci->i_cap_snaps,
1869 struct ceph_cap_snap,
1870 ci_item);
1871 snapc = ceph_get_snap_context(capsnap->context);
1872 } else {
1873 BUG_ON(!ci->i_head_snapc);
1874 snapc = ceph_get_snap_context(ci->i_head_snapc);
1875 }
1876 spin_unlock(&ci->i_ceph_lock);
1877
1878 /* we might need to revert back to that point */
1879 data = *from;
1880 if (iocb->ki_flags & IOCB_DIRECT)
1881 written = ceph_direct_read_write(iocb, &data, snapc,
1882 &prealloc_cf);
1883 else
1884 written = ceph_sync_write(iocb, &data, pos, snapc);
1885 if (direct_lock)
1886 ceph_end_io_direct(inode);
1887 else
1888 ceph_end_io_write(inode);
1889 if (written > 0)
1890 iov_iter_advance(from, written);
1891 ceph_put_snap_context(snapc);
1892 } else {
1893 /*
1894 * No need to acquire the i_truncate_mutex. Because
1895 * the MDS revokes Fwb caps before sending truncate
1896 * message to us. We can't get Fwb cap while there
1897 * are pending vmtruncate. So write and vmtruncate
1898 * can not run at the same time
1899 */
1900 written = generic_perform_write(iocb, from);
1901 if (likely(written >= 0))
1902 iocb->ki_pos = pos + written;
1903 ceph_end_io_write(inode);
1904 }
1905
1906 if (written >= 0) {
1907 int dirty;
1908
1909 spin_lock(&ci->i_ceph_lock);
1910 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1911 &prealloc_cf);
1912 spin_unlock(&ci->i_ceph_lock);
1913 if (dirty)
1914 __mark_inode_dirty(inode, dirty);
1915 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1916 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
1917 }
1918
1919 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1920 inode, ceph_vinop(inode), pos, (unsigned)count,
1921 ceph_cap_string(got));
1922 ceph_put_cap_refs(ci, got);
1923
1924 if (written == -EOLDSNAPC) {
1925 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1926 inode, ceph_vinop(inode), pos, (unsigned)count);
1927 goto retry_snap;
1928 }
1929
1930 if (written >= 0) {
1931 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1932 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1933 iocb->ki_flags |= IOCB_DSYNC;
1934 written = generic_write_sync(iocb, written);
1935 }
1936
1937 goto out_unlocked;
1938out_caps:
1939 ceph_put_cap_refs(ci, got);
1940out:
1941 if (direct_lock)
1942 ceph_end_io_direct(inode);
1943 else
1944 ceph_end_io_write(inode);
1945out_unlocked:
1946 ceph_free_cap_flush(prealloc_cf);
1947 current->backing_dev_info = NULL;
1948 return written ? written : err;
1949}
1950
1951/*
1952 * llseek. be sure to verify file size on SEEK_END.
1953 */
1954static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1955{
1956 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1957 struct inode *inode = file_inode(file);
1958 int ret;
1959
1960 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1961 if (ret < 0)
1962 return ret;
1963 }
1964 return generic_file_llseek(file, offset, whence);
1965}
1966
1967static inline void ceph_zero_partial_page(
1968 struct inode *inode, loff_t offset, unsigned size)
1969{
1970 struct page *page;
1971 pgoff_t index = offset >> PAGE_SHIFT;
1972
1973 page = find_lock_page(inode->i_mapping, index);
1974 if (page) {
1975 wait_on_page_writeback(page);
1976 zero_user(page, offset & (PAGE_SIZE - 1), size);
1977 unlock_page(page);
1978 put_page(page);
1979 }
1980}
1981
1982static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1983 loff_t length)
1984{
1985 loff_t nearly = round_up(offset, PAGE_SIZE);
1986 if (offset < nearly) {
1987 loff_t size = nearly - offset;
1988 if (length < size)
1989 size = length;
1990 ceph_zero_partial_page(inode, offset, size);
1991 offset += size;
1992 length -= size;
1993 }
1994 if (length >= PAGE_SIZE) {
1995 loff_t size = round_down(length, PAGE_SIZE);
1996 truncate_pagecache_range(inode, offset, offset + size - 1);
1997 offset += size;
1998 length -= size;
1999 }
2000 if (length)
2001 ceph_zero_partial_page(inode, offset, length);
2002}
2003
2004static int ceph_zero_partial_object(struct inode *inode,
2005 loff_t offset, loff_t *length)
2006{
2007 struct ceph_inode_info *ci = ceph_inode(inode);
2008 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2009 struct ceph_osd_request *req;
2010 int ret = 0;
2011 loff_t zero = 0;
2012 int op;
2013
2014 if (ceph_inode_is_shutdown(inode))
2015 return -EIO;
2016
2017 if (!length) {
2018 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2019 length = &zero;
2020 } else {
2021 op = CEPH_OSD_OP_ZERO;
2022 }
2023
2024 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2025 ceph_vino(inode),
2026 offset, length,
2027 0, 1, op,
2028 CEPH_OSD_FLAG_WRITE,
2029 NULL, 0, 0, false);
2030 if (IS_ERR(req)) {
2031 ret = PTR_ERR(req);
2032 goto out;
2033 }
2034
2035 req->r_mtime = inode->i_mtime;
2036 ceph_osdc_start_request(&fsc->client->osdc, req);
2037 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2038 if (ret == -ENOENT)
2039 ret = 0;
2040 ceph_osdc_put_request(req);
2041
2042out:
2043 return ret;
2044}
2045
2046static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2047{
2048 int ret = 0;
2049 struct ceph_inode_info *ci = ceph_inode(inode);
2050 s32 stripe_unit = ci->i_layout.stripe_unit;
2051 s32 stripe_count = ci->i_layout.stripe_count;
2052 s32 object_size = ci->i_layout.object_size;
2053 u64 object_set_size = object_size * stripe_count;
2054 u64 nearly, t;
2055
2056 /* round offset up to next period boundary */
2057 nearly = offset + object_set_size - 1;
2058 t = nearly;
2059 nearly -= do_div(t, object_set_size);
2060
2061 while (length && offset < nearly) {
2062 loff_t size = length;
2063 ret = ceph_zero_partial_object(inode, offset, &size);
2064 if (ret < 0)
2065 return ret;
2066 offset += size;
2067 length -= size;
2068 }
2069 while (length >= object_set_size) {
2070 int i;
2071 loff_t pos = offset;
2072 for (i = 0; i < stripe_count; ++i) {
2073 ret = ceph_zero_partial_object(inode, pos, NULL);
2074 if (ret < 0)
2075 return ret;
2076 pos += stripe_unit;
2077 }
2078 offset += object_set_size;
2079 length -= object_set_size;
2080 }
2081 while (length) {
2082 loff_t size = length;
2083 ret = ceph_zero_partial_object(inode, offset, &size);
2084 if (ret < 0)
2085 return ret;
2086 offset += size;
2087 length -= size;
2088 }
2089 return ret;
2090}
2091
2092static long ceph_fallocate(struct file *file, int mode,
2093 loff_t offset, loff_t length)
2094{
2095 struct ceph_file_info *fi = file->private_data;
2096 struct inode *inode = file_inode(file);
2097 struct ceph_inode_info *ci = ceph_inode(inode);
2098 struct ceph_cap_flush *prealloc_cf;
2099 int want, got = 0;
2100 int dirty;
2101 int ret = 0;
2102 loff_t endoff = 0;
2103 loff_t size;
2104
2105 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2106 return -EOPNOTSUPP;
2107
2108 if (!S_ISREG(inode->i_mode))
2109 return -EOPNOTSUPP;
2110
2111 prealloc_cf = ceph_alloc_cap_flush();
2112 if (!prealloc_cf)
2113 return -ENOMEM;
2114
2115 inode_lock(inode);
2116
2117 if (ceph_snap(inode) != CEPH_NOSNAP) {
2118 ret = -EROFS;
2119 goto unlock;
2120 }
2121
2122 size = i_size_read(inode);
2123
2124 /* Are we punching a hole beyond EOF? */
2125 if (offset >= size)
2126 goto unlock;
2127 if ((offset + length) > size)
2128 length = size - offset;
2129
2130 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2131 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2132 else
2133 want = CEPH_CAP_FILE_BUFFER;
2134
2135 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2136 if (ret < 0)
2137 goto unlock;
2138
2139 filemap_invalidate_lock(inode->i_mapping);
2140 ceph_fscache_invalidate(inode, false);
2141 ceph_zero_pagecache_range(inode, offset, length);
2142 ret = ceph_zero_objects(inode, offset, length);
2143
2144 if (!ret) {
2145 spin_lock(&ci->i_ceph_lock);
2146 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2147 &prealloc_cf);
2148 spin_unlock(&ci->i_ceph_lock);
2149 if (dirty)
2150 __mark_inode_dirty(inode, dirty);
2151 }
2152 filemap_invalidate_unlock(inode->i_mapping);
2153
2154 ceph_put_cap_refs(ci, got);
2155unlock:
2156 inode_unlock(inode);
2157 ceph_free_cap_flush(prealloc_cf);
2158 return ret;
2159}
2160
2161/*
2162 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2163 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2164 * this fails; zero is returned on success.
2165 */
2166static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2167 struct file *dst_filp,
2168 loff_t dst_endoff, int *dst_got)
2169{
2170 int ret = 0;
2171 bool retrying = false;
2172
2173retry_caps:
2174 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2175 dst_endoff, dst_got);
2176 if (ret < 0)
2177 return ret;
2178
2179 /*
2180 * Since we're already holding the FILE_WR capability for the dst file,
2181 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2182 * retry dance instead to try to get both capabilities.
2183 */
2184 ret = ceph_try_get_caps(file_inode(src_filp),
2185 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2186 false, src_got);
2187 if (ret <= 0) {
2188 /* Start by dropping dst_ci caps and getting src_ci caps */
2189 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2190 if (retrying) {
2191 if (!ret)
2192 /* ceph_try_get_caps masks EAGAIN */
2193 ret = -EAGAIN;
2194 return ret;
2195 }
2196 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2197 CEPH_CAP_FILE_SHARED, -1, src_got);
2198 if (ret < 0)
2199 return ret;
2200 /*... drop src_ci caps too, and retry */
2201 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2202 retrying = true;
2203 goto retry_caps;
2204 }
2205 return ret;
2206}
2207
2208static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2209 struct ceph_inode_info *dst_ci, int dst_got)
2210{
2211 ceph_put_cap_refs(src_ci, src_got);
2212 ceph_put_cap_refs(dst_ci, dst_got);
2213}
2214
2215/*
2216 * This function does several size-related checks, returning an error if:
2217 * - source file is smaller than off+len
2218 * - destination file size is not OK (inode_newsize_ok())
2219 * - max bytes quotas is exceeded
2220 */
2221static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2222 loff_t src_off, loff_t dst_off, size_t len)
2223{
2224 loff_t size, endoff;
2225
2226 size = i_size_read(src_inode);
2227 /*
2228 * Don't copy beyond source file EOF. Instead of simply setting length
2229 * to (size - src_off), just drop to VFS default implementation, as the
2230 * local i_size may be stale due to other clients writing to the source
2231 * inode.
2232 */
2233 if (src_off + len > size) {
2234 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2235 src_off, len, size);
2236 return -EOPNOTSUPP;
2237 }
2238 size = i_size_read(dst_inode);
2239
2240 endoff = dst_off + len;
2241 if (inode_newsize_ok(dst_inode, endoff))
2242 return -EOPNOTSUPP;
2243
2244 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2245 return -EDQUOT;
2246
2247 return 0;
2248}
2249
2250static struct ceph_osd_request *
2251ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2252 u64 src_snapid,
2253 struct ceph_object_id *src_oid,
2254 struct ceph_object_locator *src_oloc,
2255 struct ceph_object_id *dst_oid,
2256 struct ceph_object_locator *dst_oloc,
2257 u32 truncate_seq, u64 truncate_size)
2258{
2259 struct ceph_osd_request *req;
2260 int ret;
2261 u32 src_fadvise_flags =
2262 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2263 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2264 u32 dst_fadvise_flags =
2265 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2266 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2267
2268 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2269 if (!req)
2270 return ERR_PTR(-ENOMEM);
2271
2272 req->r_flags = CEPH_OSD_FLAG_WRITE;
2273
2274 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2275 ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2276
2277 ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2278 src_oid, src_oloc,
2279 src_fadvise_flags,
2280 dst_fadvise_flags,
2281 truncate_seq,
2282 truncate_size,
2283 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2284 if (ret)
2285 goto out;
2286
2287 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2288 if (ret)
2289 goto out;
2290
2291 return req;
2292
2293out:
2294 ceph_osdc_put_request(req);
2295 return ERR_PTR(ret);
2296}
2297
2298static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2299 struct ceph_inode_info *dst_ci, u64 *dst_off,
2300 struct ceph_fs_client *fsc,
2301 size_t len, unsigned int flags)
2302{
2303 struct ceph_object_locator src_oloc, dst_oloc;
2304 struct ceph_object_id src_oid, dst_oid;
2305 struct ceph_osd_client *osdc;
2306 struct ceph_osd_request *req;
2307 size_t bytes = 0;
2308 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2309 u32 src_objlen, dst_objlen;
2310 u32 object_size = src_ci->i_layout.object_size;
2311 int ret;
2312
2313 src_oloc.pool = src_ci->i_layout.pool_id;
2314 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2315 dst_oloc.pool = dst_ci->i_layout.pool_id;
2316 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2317 osdc = &fsc->client->osdc;
2318
2319 while (len >= object_size) {
2320 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2321 object_size, &src_objnum,
2322 &src_objoff, &src_objlen);
2323 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2324 object_size, &dst_objnum,
2325 &dst_objoff, &dst_objlen);
2326 ceph_oid_init(&src_oid);
2327 ceph_oid_printf(&src_oid, "%llx.%08llx",
2328 src_ci->i_vino.ino, src_objnum);
2329 ceph_oid_init(&dst_oid);
2330 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2331 dst_ci->i_vino.ino, dst_objnum);
2332 /* Do an object remote copy */
2333 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2334 &src_oid, &src_oloc,
2335 &dst_oid, &dst_oloc,
2336 dst_ci->i_truncate_seq,
2337 dst_ci->i_truncate_size);
2338 if (IS_ERR(req))
2339 ret = PTR_ERR(req);
2340 else {
2341 ceph_osdc_start_request(osdc, req);
2342 ret = ceph_osdc_wait_request(osdc, req);
2343 ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2344 req->r_start_latency,
2345 req->r_end_latency,
2346 object_size, ret);
2347 ceph_osdc_put_request(req);
2348 }
2349 if (ret) {
2350 if (ret == -EOPNOTSUPP) {
2351 fsc->have_copy_from2 = false;
2352 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2353 }
2354 dout("ceph_osdc_copy_from returned %d\n", ret);
2355 if (!bytes)
2356 bytes = ret;
2357 goto out;
2358 }
2359 len -= object_size;
2360 bytes += object_size;
2361 *src_off += object_size;
2362 *dst_off += object_size;
2363 }
2364
2365out:
2366 ceph_oloc_destroy(&src_oloc);
2367 ceph_oloc_destroy(&dst_oloc);
2368 return bytes;
2369}
2370
2371static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2372 struct file *dst_file, loff_t dst_off,
2373 size_t len, unsigned int flags)
2374{
2375 struct inode *src_inode = file_inode(src_file);
2376 struct inode *dst_inode = file_inode(dst_file);
2377 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2378 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2379 struct ceph_cap_flush *prealloc_cf;
2380 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2381 loff_t size;
2382 ssize_t ret = -EIO, bytes;
2383 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2384 u32 src_objlen, dst_objlen;
2385 int src_got = 0, dst_got = 0, err, dirty;
2386
2387 if (src_inode->i_sb != dst_inode->i_sb) {
2388 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2389
2390 if (ceph_fsid_compare(&src_fsc->client->fsid,
2391 &dst_fsc->client->fsid)) {
2392 dout("Copying files across clusters: src: %pU dst: %pU\n",
2393 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2394 return -EXDEV;
2395 }
2396 }
2397 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2398 return -EROFS;
2399
2400 /*
2401 * Some of the checks below will return -EOPNOTSUPP, which will force a
2402 * fallback to the default VFS copy_file_range implementation. This is
2403 * desirable in several cases (for ex, the 'len' is smaller than the
2404 * size of the objects, or in cases where that would be more
2405 * efficient).
2406 */
2407
2408 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2409 return -EOPNOTSUPP;
2410
2411 if (!src_fsc->have_copy_from2)
2412 return -EOPNOTSUPP;
2413
2414 /*
2415 * Striped file layouts require that we copy partial objects, but the
2416 * OSD copy-from operation only supports full-object copies. Limit
2417 * this to non-striped file layouts for now.
2418 */
2419 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2420 (src_ci->i_layout.stripe_count != 1) ||
2421 (dst_ci->i_layout.stripe_count != 1) ||
2422 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2423 dout("Invalid src/dst files layout\n");
2424 return -EOPNOTSUPP;
2425 }
2426
2427 if (len < src_ci->i_layout.object_size)
2428 return -EOPNOTSUPP; /* no remote copy will be done */
2429
2430 prealloc_cf = ceph_alloc_cap_flush();
2431 if (!prealloc_cf)
2432 return -ENOMEM;
2433
2434 /* Start by sync'ing the source and destination files */
2435 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2436 if (ret < 0) {
2437 dout("failed to write src file (%zd)\n", ret);
2438 goto out;
2439 }
2440 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2441 if (ret < 0) {
2442 dout("failed to write dst file (%zd)\n", ret);
2443 goto out;
2444 }
2445
2446 /*
2447 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2448 * clients may have dirty data in their caches. And OSDs know nothing
2449 * about caps, so they can't safely do the remote object copies.
2450 */
2451 err = get_rd_wr_caps(src_file, &src_got,
2452 dst_file, (dst_off + len), &dst_got);
2453 if (err < 0) {
2454 dout("get_rd_wr_caps returned %d\n", err);
2455 ret = -EOPNOTSUPP;
2456 goto out;
2457 }
2458
2459 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2460 if (ret < 0)
2461 goto out_caps;
2462
2463 /* Drop dst file cached pages */
2464 ceph_fscache_invalidate(dst_inode, false);
2465 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2466 dst_off >> PAGE_SHIFT,
2467 (dst_off + len) >> PAGE_SHIFT);
2468 if (ret < 0) {
2469 dout("Failed to invalidate inode pages (%zd)\n", ret);
2470 ret = 0; /* XXX */
2471 }
2472 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2473 src_ci->i_layout.object_size,
2474 &src_objnum, &src_objoff, &src_objlen);
2475 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2476 dst_ci->i_layout.object_size,
2477 &dst_objnum, &dst_objoff, &dst_objlen);
2478 /* object-level offsets need to the same */
2479 if (src_objoff != dst_objoff) {
2480 ret = -EOPNOTSUPP;
2481 goto out_caps;
2482 }
2483
2484 /*
2485 * Do a manual copy if the object offset isn't object aligned.
2486 * 'src_objlen' contains the bytes left until the end of the object,
2487 * starting at the src_off
2488 */
2489 if (src_objoff) {
2490 dout("Initial partial copy of %u bytes\n", src_objlen);
2491
2492 /*
2493 * we need to temporarily drop all caps as we'll be calling
2494 * {read,write}_iter, which will get caps again.
2495 */
2496 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2497 ret = do_splice_direct(src_file, &src_off, dst_file,
2498 &dst_off, src_objlen, flags);
2499 /* Abort on short copies or on error */
2500 if (ret < src_objlen) {
2501 dout("Failed partial copy (%zd)\n", ret);
2502 goto out;
2503 }
2504 len -= ret;
2505 err = get_rd_wr_caps(src_file, &src_got,
2506 dst_file, (dst_off + len), &dst_got);
2507 if (err < 0)
2508 goto out;
2509 err = is_file_size_ok(src_inode, dst_inode,
2510 src_off, dst_off, len);
2511 if (err < 0)
2512 goto out_caps;
2513 }
2514
2515 size = i_size_read(dst_inode);
2516 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2517 src_fsc, len, flags);
2518 if (bytes <= 0) {
2519 if (!ret)
2520 ret = bytes;
2521 goto out_caps;
2522 }
2523 dout("Copied %zu bytes out of %zu\n", bytes, len);
2524 len -= bytes;
2525 ret += bytes;
2526
2527 file_update_time(dst_file);
2528 inode_inc_iversion_raw(dst_inode);
2529
2530 if (dst_off > size) {
2531 /* Let the MDS know about dst file size change */
2532 if (ceph_inode_set_size(dst_inode, dst_off) ||
2533 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2534 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
2535 }
2536 /* Mark Fw dirty */
2537 spin_lock(&dst_ci->i_ceph_lock);
2538 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2539 spin_unlock(&dst_ci->i_ceph_lock);
2540 if (dirty)
2541 __mark_inode_dirty(dst_inode, dirty);
2542
2543out_caps:
2544 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2545
2546 /*
2547 * Do the final manual copy if we still have some bytes left, unless
2548 * there were errors in remote object copies (len >= object_size).
2549 */
2550 if (len && (len < src_ci->i_layout.object_size)) {
2551 dout("Final partial copy of %zu bytes\n", len);
2552 bytes = do_splice_direct(src_file, &src_off, dst_file,
2553 &dst_off, len, flags);
2554 if (bytes > 0)
2555 ret += bytes;
2556 else
2557 dout("Failed partial copy (%zd)\n", bytes);
2558 }
2559
2560out:
2561 ceph_free_cap_flush(prealloc_cf);
2562
2563 return ret;
2564}
2565
2566static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2567 struct file *dst_file, loff_t dst_off,
2568 size_t len, unsigned int flags)
2569{
2570 ssize_t ret;
2571
2572 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2573 len, flags);
2574
2575 if (ret == -EOPNOTSUPP || ret == -EXDEV)
2576 ret = generic_copy_file_range(src_file, src_off, dst_file,
2577 dst_off, len, flags);
2578 return ret;
2579}
2580
2581const struct file_operations ceph_file_fops = {
2582 .open = ceph_open,
2583 .release = ceph_release,
2584 .llseek = ceph_llseek,
2585 .read_iter = ceph_read_iter,
2586 .write_iter = ceph_write_iter,
2587 .mmap = ceph_mmap,
2588 .fsync = ceph_fsync,
2589 .lock = ceph_lock,
2590 .setlease = simple_nosetlease,
2591 .flock = ceph_flock,
2592 .splice_read = generic_file_splice_read,
2593 .splice_write = iter_file_splice_write,
2594 .unlocked_ioctl = ceph_ioctl,
2595 .compat_ioctl = compat_ptr_ioctl,
2596 .fallocate = ceph_fallocate,
2597 .copy_file_range = ceph_copy_file_range,
2598};
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/striper.h>
4
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/file.h>
9#include <linux/mount.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/falloc.h>
13#include <linux/iversion.h>
14#include <linux/ktime.h>
15#include <linux/splice.h>
16
17#include "super.h"
18#include "mds_client.h"
19#include "cache.h"
20#include "io.h"
21#include "metric.h"
22
23static __le32 ceph_flags_sys2wire(struct ceph_mds_client *mdsc, u32 flags)
24{
25 struct ceph_client *cl = mdsc->fsc->client;
26 u32 wire_flags = 0;
27
28 switch (flags & O_ACCMODE) {
29 case O_RDONLY:
30 wire_flags |= CEPH_O_RDONLY;
31 break;
32 case O_WRONLY:
33 wire_flags |= CEPH_O_WRONLY;
34 break;
35 case O_RDWR:
36 wire_flags |= CEPH_O_RDWR;
37 break;
38 }
39
40 flags &= ~O_ACCMODE;
41
42#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
43
44 ceph_sys2wire(O_CREAT);
45 ceph_sys2wire(O_EXCL);
46 ceph_sys2wire(O_TRUNC);
47 ceph_sys2wire(O_DIRECTORY);
48 ceph_sys2wire(O_NOFOLLOW);
49
50#undef ceph_sys2wire
51
52 if (flags)
53 doutc(cl, "unused open flags: %x\n", flags);
54
55 return cpu_to_le32(wire_flags);
56}
57
58/*
59 * Ceph file operations
60 *
61 * Implement basic open/close functionality, and implement
62 * read/write.
63 *
64 * We implement three modes of file I/O:
65 * - buffered uses the generic_file_aio_{read,write} helpers
66 *
67 * - synchronous is used when there is multi-client read/write
68 * sharing, avoids the page cache, and synchronously waits for an
69 * ack from the OSD.
70 *
71 * - direct io takes the variant of the sync path that references
72 * user pages directly.
73 *
74 * fsync() flushes and waits on dirty pages, but just queues metadata
75 * for writeback: since the MDS can recover size and mtime there is no
76 * need to wait for MDS acknowledgement.
77 */
78
79/*
80 * How many pages to get in one call to iov_iter_get_pages(). This
81 * determines the size of the on-stack array used as a buffer.
82 */
83#define ITER_GET_BVECS_PAGES 64
84
85static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
86 struct bio_vec *bvecs)
87{
88 size_t size = 0;
89 int bvec_idx = 0;
90
91 if (maxsize > iov_iter_count(iter))
92 maxsize = iov_iter_count(iter);
93
94 while (size < maxsize) {
95 struct page *pages[ITER_GET_BVECS_PAGES];
96 ssize_t bytes;
97 size_t start;
98 int idx = 0;
99
100 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
101 ITER_GET_BVECS_PAGES, &start);
102 if (bytes < 0)
103 return size ?: bytes;
104
105 size += bytes;
106
107 for ( ; bytes; idx++, bvec_idx++) {
108 int len = min_t(int, bytes, PAGE_SIZE - start);
109
110 bvec_set_page(&bvecs[bvec_idx], pages[idx], len, start);
111 bytes -= len;
112 start = 0;
113 }
114 }
115
116 return size;
117}
118
119/*
120 * iov_iter_get_pages() only considers one iov_iter segment, no matter
121 * what maxsize or maxpages are given. For ITER_BVEC that is a single
122 * page.
123 *
124 * Attempt to get up to @maxsize bytes worth of pages from @iter.
125 * Return the number of bytes in the created bio_vec array, or an error.
126 */
127static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
128 struct bio_vec **bvecs, int *num_bvecs)
129{
130 struct bio_vec *bv;
131 size_t orig_count = iov_iter_count(iter);
132 ssize_t bytes;
133 int npages;
134
135 iov_iter_truncate(iter, maxsize);
136 npages = iov_iter_npages(iter, INT_MAX);
137 iov_iter_reexpand(iter, orig_count);
138
139 /*
140 * __iter_get_bvecs() may populate only part of the array -- zero it
141 * out.
142 */
143 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
144 if (!bv)
145 return -ENOMEM;
146
147 bytes = __iter_get_bvecs(iter, maxsize, bv);
148 if (bytes < 0) {
149 /*
150 * No pages were pinned -- just free the array.
151 */
152 kvfree(bv);
153 return bytes;
154 }
155
156 *bvecs = bv;
157 *num_bvecs = npages;
158 return bytes;
159}
160
161static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
162{
163 int i;
164
165 for (i = 0; i < num_bvecs; i++) {
166 if (bvecs[i].bv_page) {
167 if (should_dirty)
168 set_page_dirty_lock(bvecs[i].bv_page);
169 put_page(bvecs[i].bv_page);
170 }
171 }
172 kvfree(bvecs);
173}
174
175/*
176 * Prepare an open request. Preallocate ceph_cap to avoid an
177 * inopportune ENOMEM later.
178 */
179static struct ceph_mds_request *
180prepare_open_request(struct super_block *sb, int flags, int create_mode)
181{
182 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
183 struct ceph_mds_request *req;
184 int want_auth = USE_ANY_MDS;
185 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
186
187 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
188 want_auth = USE_AUTH_MDS;
189
190 req = ceph_mdsc_create_request(mdsc, op, want_auth);
191 if (IS_ERR(req))
192 goto out;
193 req->r_fmode = ceph_flags_to_mode(flags);
194 req->r_args.open.flags = ceph_flags_sys2wire(mdsc, flags);
195 req->r_args.open.mode = cpu_to_le32(create_mode);
196out:
197 return req;
198}
199
200static int ceph_init_file_info(struct inode *inode, struct file *file,
201 int fmode, bool isdir)
202{
203 struct ceph_inode_info *ci = ceph_inode(inode);
204 struct ceph_mount_options *opt =
205 ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
206 struct ceph_client *cl = ceph_inode_to_client(inode);
207 struct ceph_file_info *fi;
208 int ret;
209
210 doutc(cl, "%p %llx.%llx %p 0%o (%s)\n", inode, ceph_vinop(inode),
211 file, inode->i_mode, isdir ? "dir" : "regular");
212 BUG_ON(inode->i_fop->release != ceph_release);
213
214 if (isdir) {
215 struct ceph_dir_file_info *dfi =
216 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
217 if (!dfi)
218 return -ENOMEM;
219
220 file->private_data = dfi;
221 fi = &dfi->file_info;
222 dfi->next_offset = 2;
223 dfi->readdir_cache_idx = -1;
224 } else {
225 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
226 if (!fi)
227 return -ENOMEM;
228
229 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
230 fi->flags |= CEPH_F_SYNC;
231
232 file->private_data = fi;
233 }
234
235 ceph_get_fmode(ci, fmode, 1);
236 fi->fmode = fmode;
237
238 spin_lock_init(&fi->rw_contexts_lock);
239 INIT_LIST_HEAD(&fi->rw_contexts);
240 fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
241
242 if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
243 ret = ceph_uninline_data(file);
244 if (ret < 0)
245 goto error;
246 }
247
248 return 0;
249
250error:
251 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
252 ceph_put_fmode(ci, fi->fmode, 1);
253 kmem_cache_free(ceph_file_cachep, fi);
254 /* wake up anyone waiting for caps on this inode */
255 wake_up_all(&ci->i_cap_wq);
256 return ret;
257}
258
259/*
260 * initialize private struct file data.
261 * if we fail, clean up by dropping fmode reference on the ceph_inode
262 */
263static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
264{
265 struct ceph_client *cl = ceph_inode_to_client(inode);
266 int ret = 0;
267
268 switch (inode->i_mode & S_IFMT) {
269 case S_IFREG:
270 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
271 fallthrough;
272 case S_IFDIR:
273 ret = ceph_init_file_info(inode, file, fmode,
274 S_ISDIR(inode->i_mode));
275 break;
276
277 case S_IFLNK:
278 doutc(cl, "%p %llx.%llx %p 0%o (symlink)\n", inode,
279 ceph_vinop(inode), file, inode->i_mode);
280 break;
281
282 default:
283 doutc(cl, "%p %llx.%llx %p 0%o (special)\n", inode,
284 ceph_vinop(inode), file, inode->i_mode);
285 /*
286 * we need to drop the open ref now, since we don't
287 * have .release set to ceph_release.
288 */
289 BUG_ON(inode->i_fop->release == ceph_release);
290
291 /* call the proper open fop */
292 ret = inode->i_fop->open(inode, file);
293 }
294 return ret;
295}
296
297/*
298 * try renew caps after session gets killed.
299 */
300int ceph_renew_caps(struct inode *inode, int fmode)
301{
302 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
303 struct ceph_client *cl = mdsc->fsc->client;
304 struct ceph_inode_info *ci = ceph_inode(inode);
305 struct ceph_mds_request *req;
306 int err, flags, wanted;
307
308 spin_lock(&ci->i_ceph_lock);
309 __ceph_touch_fmode(ci, mdsc, fmode);
310 wanted = __ceph_caps_file_wanted(ci);
311 if (__ceph_is_any_real_caps(ci) &&
312 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
313 int issued = __ceph_caps_issued(ci, NULL);
314 spin_unlock(&ci->i_ceph_lock);
315 doutc(cl, "%p %llx.%llx want %s issued %s updating mds_wanted\n",
316 inode, ceph_vinop(inode), ceph_cap_string(wanted),
317 ceph_cap_string(issued));
318 ceph_check_caps(ci, 0);
319 return 0;
320 }
321 spin_unlock(&ci->i_ceph_lock);
322
323 flags = 0;
324 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
325 flags = O_RDWR;
326 else if (wanted & CEPH_CAP_FILE_RD)
327 flags = O_RDONLY;
328 else if (wanted & CEPH_CAP_FILE_WR)
329 flags = O_WRONLY;
330#ifdef O_LAZY
331 if (wanted & CEPH_CAP_FILE_LAZYIO)
332 flags |= O_LAZY;
333#endif
334
335 req = prepare_open_request(inode->i_sb, flags, 0);
336 if (IS_ERR(req)) {
337 err = PTR_ERR(req);
338 goto out;
339 }
340
341 req->r_inode = inode;
342 ihold(inode);
343 req->r_num_caps = 1;
344
345 err = ceph_mdsc_do_request(mdsc, NULL, req);
346 ceph_mdsc_put_request(req);
347out:
348 doutc(cl, "%p %llx.%llx open result=%d\n", inode, ceph_vinop(inode),
349 err);
350 return err < 0 ? err : 0;
351}
352
353/*
354 * If we already have the requisite capabilities, we can satisfy
355 * the open request locally (no need to request new caps from the
356 * MDS). We do, however, need to inform the MDS (asynchronously)
357 * if our wanted caps set expands.
358 */
359int ceph_open(struct inode *inode, struct file *file)
360{
361 struct ceph_inode_info *ci = ceph_inode(inode);
362 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
363 struct ceph_client *cl = fsc->client;
364 struct ceph_mds_client *mdsc = fsc->mdsc;
365 struct ceph_mds_request *req;
366 struct ceph_file_info *fi = file->private_data;
367 int err;
368 int flags, fmode, wanted;
369
370 if (fi) {
371 doutc(cl, "file %p is already opened\n", file);
372 return 0;
373 }
374
375 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
376 flags = file->f_flags & ~(O_CREAT|O_EXCL);
377 if (S_ISDIR(inode->i_mode)) {
378 flags = O_DIRECTORY; /* mds likes to know */
379 } else if (S_ISREG(inode->i_mode)) {
380 err = fscrypt_file_open(inode, file);
381 if (err)
382 return err;
383 }
384
385 doutc(cl, "%p %llx.%llx file %p flags %d (%d)\n", inode,
386 ceph_vinop(inode), file, flags, file->f_flags);
387 fmode = ceph_flags_to_mode(flags);
388 wanted = ceph_caps_for_mode(fmode);
389
390 /* snapped files are read-only */
391 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
392 return -EROFS;
393
394 /* trivially open snapdir */
395 if (ceph_snap(inode) == CEPH_SNAPDIR) {
396 return ceph_init_file(inode, file, fmode);
397 }
398
399 /*
400 * No need to block if we have caps on the auth MDS (for
401 * write) or any MDS (for read). Update wanted set
402 * asynchronously.
403 */
404 spin_lock(&ci->i_ceph_lock);
405 if (__ceph_is_any_real_caps(ci) &&
406 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
407 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
408 int issued = __ceph_caps_issued(ci, NULL);
409
410 doutc(cl, "open %p fmode %d want %s issued %s using existing\n",
411 inode, fmode, ceph_cap_string(wanted),
412 ceph_cap_string(issued));
413 __ceph_touch_fmode(ci, mdsc, fmode);
414 spin_unlock(&ci->i_ceph_lock);
415
416 /* adjust wanted? */
417 if ((issued & wanted) != wanted &&
418 (mds_wanted & wanted) != wanted &&
419 ceph_snap(inode) != CEPH_SNAPDIR)
420 ceph_check_caps(ci, 0);
421
422 return ceph_init_file(inode, file, fmode);
423 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
424 (ci->i_snap_caps & wanted) == wanted) {
425 __ceph_touch_fmode(ci, mdsc, fmode);
426 spin_unlock(&ci->i_ceph_lock);
427 return ceph_init_file(inode, file, fmode);
428 }
429
430 spin_unlock(&ci->i_ceph_lock);
431
432 doutc(cl, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
433 req = prepare_open_request(inode->i_sb, flags, 0);
434 if (IS_ERR(req)) {
435 err = PTR_ERR(req);
436 goto out;
437 }
438 req->r_inode = inode;
439 ihold(inode);
440
441 req->r_num_caps = 1;
442 err = ceph_mdsc_do_request(mdsc, NULL, req);
443 if (!err)
444 err = ceph_init_file(inode, file, req->r_fmode);
445 ceph_mdsc_put_request(req);
446 doutc(cl, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
447out:
448 return err;
449}
450
451/* Clone the layout from a synchronous create, if the dir now has Dc caps */
452static void
453cache_file_layout(struct inode *dst, struct inode *src)
454{
455 struct ceph_inode_info *cdst = ceph_inode(dst);
456 struct ceph_inode_info *csrc = ceph_inode(src);
457
458 spin_lock(&cdst->i_ceph_lock);
459 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
460 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
461 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
462 sizeof(cdst->i_cached_layout));
463 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
464 ceph_try_get_string(csrc->i_layout.pool_ns));
465 }
466 spin_unlock(&cdst->i_ceph_lock);
467}
468
469/*
470 * Try to set up an async create. We need caps, a file layout, and inode number,
471 * and either a lease on the dentry or complete dir info. If any of those
472 * criteria are not satisfied, then return false and the caller can go
473 * synchronous.
474 */
475static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
476 struct ceph_file_layout *lo, u64 *pino)
477{
478 struct ceph_inode_info *ci = ceph_inode(dir);
479 struct ceph_dentry_info *di = ceph_dentry(dentry);
480 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
481 u64 ino;
482
483 spin_lock(&ci->i_ceph_lock);
484 /* No auth cap means no chance for Dc caps */
485 if (!ci->i_auth_cap)
486 goto no_async;
487
488 /* Any delegated inos? */
489 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
490 goto no_async;
491
492 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
493 goto no_async;
494
495 if ((__ceph_caps_issued(ci, NULL) & want) != want)
496 goto no_async;
497
498 if (d_in_lookup(dentry)) {
499 if (!__ceph_dir_is_complete(ci))
500 goto no_async;
501 spin_lock(&dentry->d_lock);
502 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
503 spin_unlock(&dentry->d_lock);
504 } else if (atomic_read(&ci->i_shared_gen) !=
505 READ_ONCE(di->lease_shared_gen)) {
506 goto no_async;
507 }
508
509 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
510 if (!ino)
511 goto no_async;
512
513 *pino = ino;
514 ceph_take_cap_refs(ci, want, false);
515 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
516 rcu_assign_pointer(lo->pool_ns,
517 ceph_try_get_string(ci->i_cached_layout.pool_ns));
518 got = want;
519no_async:
520 spin_unlock(&ci->i_ceph_lock);
521 return got;
522}
523
524static void restore_deleg_ino(struct inode *dir, u64 ino)
525{
526 struct ceph_client *cl = ceph_inode_to_client(dir);
527 struct ceph_inode_info *ci = ceph_inode(dir);
528 struct ceph_mds_session *s = NULL;
529
530 spin_lock(&ci->i_ceph_lock);
531 if (ci->i_auth_cap)
532 s = ceph_get_mds_session(ci->i_auth_cap->session);
533 spin_unlock(&ci->i_ceph_lock);
534 if (s) {
535 int err = ceph_restore_deleg_ino(s, ino);
536 if (err)
537 pr_warn_client(cl,
538 "unable to restore delegated ino 0x%llx to session: %d\n",
539 ino, err);
540 ceph_put_mds_session(s);
541 }
542}
543
544static void wake_async_create_waiters(struct inode *inode,
545 struct ceph_mds_session *session)
546{
547 struct ceph_inode_info *ci = ceph_inode(inode);
548 bool check_cap = false;
549
550 spin_lock(&ci->i_ceph_lock);
551 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
552 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
553 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
554
555 if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
556 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
557 check_cap = true;
558 }
559 }
560 ceph_kick_flushing_inode_caps(session, ci);
561 spin_unlock(&ci->i_ceph_lock);
562
563 if (check_cap)
564 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
565}
566
567static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
568 struct ceph_mds_request *req)
569{
570 struct ceph_client *cl = mdsc->fsc->client;
571 struct dentry *dentry = req->r_dentry;
572 struct inode *dinode = d_inode(dentry);
573 struct inode *tinode = req->r_target_inode;
574 int result = req->r_err ? req->r_err :
575 le32_to_cpu(req->r_reply_info.head->result);
576
577 WARN_ON_ONCE(dinode && tinode && dinode != tinode);
578
579 /* MDS changed -- caller must resubmit */
580 if (result == -EJUKEBOX)
581 goto out;
582
583 mapping_set_error(req->r_parent->i_mapping, result);
584
585 if (result) {
586 int pathlen = 0;
587 u64 base = 0;
588 char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
589 &base, 0);
590
591 pr_warn_client(cl,
592 "async create failure path=(%llx)%s result=%d!\n",
593 base, IS_ERR(path) ? "<<bad>>" : path, result);
594 ceph_mdsc_free_path(path, pathlen);
595
596 ceph_dir_clear_complete(req->r_parent);
597 if (!d_unhashed(dentry))
598 d_drop(dentry);
599
600 if (dinode) {
601 mapping_set_error(dinode->i_mapping, result);
602 ceph_inode_shutdown(dinode);
603 wake_async_create_waiters(dinode, req->r_session);
604 }
605 }
606
607 if (tinode) {
608 u64 ino = ceph_vino(tinode).ino;
609
610 if (req->r_deleg_ino != ino)
611 pr_warn_client(cl,
612 "inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
613 req->r_err, req->r_deleg_ino, ino);
614
615 mapping_set_error(tinode->i_mapping, result);
616 wake_async_create_waiters(tinode, req->r_session);
617 } else if (!result) {
618 pr_warn_client(cl, "no req->r_target_inode for 0x%llx\n",
619 req->r_deleg_ino);
620 }
621out:
622 ceph_mdsc_release_dir_caps(req);
623}
624
625static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
626 struct dentry *dentry,
627 struct file *file, umode_t mode,
628 struct ceph_mds_request *req,
629 struct ceph_acl_sec_ctx *as_ctx,
630 struct ceph_file_layout *lo)
631{
632 int ret;
633 char xattr_buf[4];
634 struct ceph_mds_reply_inode in = { };
635 struct ceph_mds_reply_info_in iinfo = { .in = &in };
636 struct ceph_inode_info *ci = ceph_inode(dir);
637 struct ceph_dentry_info *di = ceph_dentry(dentry);
638 struct timespec64 now;
639 struct ceph_string *pool_ns;
640 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
641 struct ceph_client *cl = mdsc->fsc->client;
642 struct ceph_vino vino = { .ino = req->r_deleg_ino,
643 .snap = CEPH_NOSNAP };
644
645 ktime_get_real_ts64(&now);
646
647 iinfo.inline_version = CEPH_INLINE_NONE;
648 iinfo.change_attr = 1;
649 ceph_encode_timespec64(&iinfo.btime, &now);
650
651 if (req->r_pagelist) {
652 iinfo.xattr_len = req->r_pagelist->length;
653 iinfo.xattr_data = req->r_pagelist->mapped_tail;
654 } else {
655 /* fake it */
656 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
657 iinfo.xattr_data = xattr_buf;
658 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
659 }
660
661 in.ino = cpu_to_le64(vino.ino);
662 in.snapid = cpu_to_le64(CEPH_NOSNAP);
663 in.version = cpu_to_le64(1); // ???
664 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
665 in.cap.cap_id = cpu_to_le64(1);
666 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
667 in.cap.flags = CEPH_CAP_FLAG_AUTH;
668 in.ctime = in.mtime = in.atime = iinfo.btime;
669 in.truncate_seq = cpu_to_le32(1);
670 in.truncate_size = cpu_to_le64(-1ULL);
671 in.xattr_version = cpu_to_le64(1);
672 in.uid = cpu_to_le32(from_kuid(&init_user_ns,
673 mapped_fsuid(req->r_mnt_idmap,
674 &init_user_ns)));
675 if (dir->i_mode & S_ISGID) {
676 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
677
678 /* Directories always inherit the setgid bit. */
679 if (S_ISDIR(mode))
680 mode |= S_ISGID;
681 } else {
682 in.gid = cpu_to_le32(from_kgid(&init_user_ns,
683 mapped_fsgid(req->r_mnt_idmap,
684 &init_user_ns)));
685 }
686 in.mode = cpu_to_le32((u32)mode);
687
688 in.nlink = cpu_to_le32(1);
689 in.max_size = cpu_to_le64(lo->stripe_unit);
690
691 ceph_file_layout_to_legacy(lo, &in.layout);
692 /* lo is private, so pool_ns can't change */
693 pool_ns = rcu_dereference_raw(lo->pool_ns);
694 if (pool_ns) {
695 iinfo.pool_ns_len = pool_ns->len;
696 iinfo.pool_ns_data = pool_ns->str;
697 }
698
699 down_read(&mdsc->snap_rwsem);
700 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
701 req->r_fmode, NULL);
702 up_read(&mdsc->snap_rwsem);
703 if (ret) {
704 doutc(cl, "failed to fill inode: %d\n", ret);
705 ceph_dir_clear_complete(dir);
706 if (!d_unhashed(dentry))
707 d_drop(dentry);
708 discard_new_inode(inode);
709 } else {
710 struct dentry *dn;
711
712 doutc(cl, "d_adding new inode 0x%llx to 0x%llx/%s\n",
713 vino.ino, ceph_ino(dir), dentry->d_name.name);
714 ceph_dir_clear_ordered(dir);
715 ceph_init_inode_acls(inode, as_ctx);
716 if (inode->i_state & I_NEW) {
717 /*
718 * If it's not I_NEW, then someone created this before
719 * we got here. Assume the server is aware of it at
720 * that point and don't worry about setting
721 * CEPH_I_ASYNC_CREATE.
722 */
723 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
724 unlock_new_inode(inode);
725 }
726 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
727 if (!d_unhashed(dentry))
728 d_drop(dentry);
729 dn = d_splice_alias(inode, dentry);
730 WARN_ON_ONCE(dn && dn != dentry);
731 }
732 file->f_mode |= FMODE_CREATED;
733 ret = finish_open(file, dentry, ceph_open);
734 }
735
736 spin_lock(&dentry->d_lock);
737 di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
738 wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
739 spin_unlock(&dentry->d_lock);
740
741 return ret;
742}
743
744/*
745 * Do a lookup + open with a single request. If we get a non-existent
746 * file or symlink, return 1 so the VFS can retry.
747 */
748int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
749 struct file *file, unsigned flags, umode_t mode)
750{
751 struct mnt_idmap *idmap = file_mnt_idmap(file);
752 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
753 struct ceph_client *cl = fsc->client;
754 struct ceph_mds_client *mdsc = fsc->mdsc;
755 struct ceph_mds_request *req;
756 struct inode *new_inode = NULL;
757 struct dentry *dn;
758 struct ceph_acl_sec_ctx as_ctx = {};
759 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
760 int mask;
761 int err;
762
763 doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
764 dir, ceph_vinop(dir), dentry, dentry,
765 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
766
767 if (dentry->d_name.len > NAME_MAX)
768 return -ENAMETOOLONG;
769
770 err = ceph_wait_on_conflict_unlink(dentry);
771 if (err)
772 return err;
773 /*
774 * Do not truncate the file, since atomic_open is called before the
775 * permission check. The caller will do the truncation afterward.
776 */
777 flags &= ~O_TRUNC;
778
779retry:
780 if (flags & O_CREAT) {
781 if (ceph_quota_is_max_files_exceeded(dir))
782 return -EDQUOT;
783
784 new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
785 if (IS_ERR(new_inode)) {
786 err = PTR_ERR(new_inode);
787 goto out_ctx;
788 }
789 /* Async create can't handle more than a page of xattrs */
790 if (as_ctx.pagelist &&
791 !list_is_singular(&as_ctx.pagelist->head))
792 try_async = false;
793 } else if (!d_in_lookup(dentry)) {
794 /* If it's not being looked up, it's negative */
795 return -ENOENT;
796 }
797
798 /* do the open */
799 req = prepare_open_request(dir->i_sb, flags, mode);
800 if (IS_ERR(req)) {
801 err = PTR_ERR(req);
802 goto out_ctx;
803 }
804 req->r_dentry = dget(dentry);
805 req->r_num_caps = 2;
806 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
807 if (ceph_security_xattr_wanted(dir))
808 mask |= CEPH_CAP_XATTR_SHARED;
809 req->r_args.open.mask = cpu_to_le32(mask);
810 req->r_parent = dir;
811 if (req->r_op == CEPH_MDS_OP_CREATE)
812 req->r_mnt_idmap = mnt_idmap_get(idmap);
813 ihold(dir);
814 if (IS_ENCRYPTED(dir)) {
815 set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
816 err = fscrypt_prepare_lookup_partial(dir, dentry);
817 if (err < 0)
818 goto out_req;
819 }
820
821 if (flags & O_CREAT) {
822 struct ceph_file_layout lo;
823
824 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
825 CEPH_CAP_XATTR_EXCL;
826 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
827
828 ceph_as_ctx_to_req(req, &as_ctx);
829
830 if (try_async && (req->r_dir_caps =
831 try_prep_async_create(dir, dentry, &lo,
832 &req->r_deleg_ino))) {
833 struct ceph_vino vino = { .ino = req->r_deleg_ino,
834 .snap = CEPH_NOSNAP };
835 struct ceph_dentry_info *di = ceph_dentry(dentry);
836
837 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
838 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
839 req->r_callback = ceph_async_create_cb;
840
841 /* Hash inode before RPC */
842 new_inode = ceph_get_inode(dir->i_sb, vino, new_inode);
843 if (IS_ERR(new_inode)) {
844 err = PTR_ERR(new_inode);
845 new_inode = NULL;
846 goto out_req;
847 }
848 WARN_ON_ONCE(!(new_inode->i_state & I_NEW));
849
850 spin_lock(&dentry->d_lock);
851 di->flags |= CEPH_DENTRY_ASYNC_CREATE;
852 spin_unlock(&dentry->d_lock);
853
854 err = ceph_mdsc_submit_request(mdsc, dir, req);
855 if (!err) {
856 err = ceph_finish_async_create(dir, new_inode,
857 dentry, file,
858 mode, req,
859 &as_ctx, &lo);
860 new_inode = NULL;
861 } else if (err == -EJUKEBOX) {
862 restore_deleg_ino(dir, req->r_deleg_ino);
863 ceph_mdsc_put_request(req);
864 discard_new_inode(new_inode);
865 ceph_release_acl_sec_ctx(&as_ctx);
866 memset(&as_ctx, 0, sizeof(as_ctx));
867 new_inode = NULL;
868 try_async = false;
869 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
870 goto retry;
871 }
872 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
873 goto out_req;
874 }
875 }
876
877 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
878 req->r_new_inode = new_inode;
879 new_inode = NULL;
880 err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
881 if (err == -ENOENT) {
882 dentry = ceph_handle_snapdir(req, dentry);
883 if (IS_ERR(dentry)) {
884 err = PTR_ERR(dentry);
885 goto out_req;
886 }
887 err = 0;
888 }
889
890 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
891 err = ceph_handle_notrace_create(dir, dentry);
892
893 if (d_in_lookup(dentry)) {
894 dn = ceph_finish_lookup(req, dentry, err);
895 if (IS_ERR(dn))
896 err = PTR_ERR(dn);
897 } else {
898 /* we were given a hashed negative dentry */
899 dn = NULL;
900 }
901 if (err)
902 goto out_req;
903 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
904 /* make vfs retry on splice, ENOENT, or symlink */
905 doutc(cl, "finish_no_open on dn %p\n", dn);
906 err = finish_no_open(file, dn);
907 } else {
908 if (IS_ENCRYPTED(dir) &&
909 !fscrypt_has_permitted_context(dir, d_inode(dentry))) {
910 pr_warn_client(cl,
911 "Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
912 ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
913 goto out_req;
914 }
915
916 doutc(cl, "finish_open on dn %p\n", dn);
917 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
918 struct inode *newino = d_inode(dentry);
919
920 cache_file_layout(dir, newino);
921 ceph_init_inode_acls(newino, &as_ctx);
922 file->f_mode |= FMODE_CREATED;
923 }
924 err = finish_open(file, dentry, ceph_open);
925 }
926out_req:
927 ceph_mdsc_put_request(req);
928 iput(new_inode);
929out_ctx:
930 ceph_release_acl_sec_ctx(&as_ctx);
931 doutc(cl, "result=%d\n", err);
932 return err;
933}
934
935int ceph_release(struct inode *inode, struct file *file)
936{
937 struct ceph_client *cl = ceph_inode_to_client(inode);
938 struct ceph_inode_info *ci = ceph_inode(inode);
939
940 if (S_ISDIR(inode->i_mode)) {
941 struct ceph_dir_file_info *dfi = file->private_data;
942 doutc(cl, "%p %llx.%llx dir file %p\n", inode,
943 ceph_vinop(inode), file);
944 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
945
946 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
947
948 if (dfi->last_readdir)
949 ceph_mdsc_put_request(dfi->last_readdir);
950 kfree(dfi->last_name);
951 kfree(dfi->dir_info);
952 kmem_cache_free(ceph_dir_file_cachep, dfi);
953 } else {
954 struct ceph_file_info *fi = file->private_data;
955 doutc(cl, "%p %llx.%llx regular file %p\n", inode,
956 ceph_vinop(inode), file);
957 WARN_ON(!list_empty(&fi->rw_contexts));
958
959 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
960 ceph_put_fmode(ci, fi->fmode, 1);
961
962 kmem_cache_free(ceph_file_cachep, fi);
963 }
964
965 /* wake up anyone waiting for caps on this inode */
966 wake_up_all(&ci->i_cap_wq);
967 return 0;
968}
969
970enum {
971 HAVE_RETRIED = 1,
972 CHECK_EOF = 2,
973 READ_INLINE = 3,
974};
975
976/*
977 * Completely synchronous read and write methods. Direct from __user
978 * buffer to osd, or directly to user pages (if O_DIRECT).
979 *
980 * If the read spans object boundary, just do multiple reads. (That's not
981 * atomic, but good enough for now.)
982 *
983 * If we get a short result from the OSD, check against i_size; we need to
984 * only return a short read to the caller if we hit EOF.
985 */
986ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
987 struct iov_iter *to, int *retry_op,
988 u64 *last_objver)
989{
990 struct ceph_inode_info *ci = ceph_inode(inode);
991 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
992 struct ceph_client *cl = fsc->client;
993 struct ceph_osd_client *osdc = &fsc->client->osdc;
994 ssize_t ret;
995 u64 off = *ki_pos;
996 u64 len = iov_iter_count(to);
997 u64 i_size = i_size_read(inode);
998 bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
999 u64 objver = 0;
1000
1001 doutc(cl, "on inode %p %llx.%llx %llx~%llx\n", inode,
1002 ceph_vinop(inode), *ki_pos, len);
1003
1004 if (ceph_inode_is_shutdown(inode))
1005 return -EIO;
1006
1007 if (!len)
1008 return 0;
1009 /*
1010 * flush any page cache pages in this range. this
1011 * will make concurrent normal and sync io slow,
1012 * but it will at least behave sensibly when they are
1013 * in sequence.
1014 */
1015 ret = filemap_write_and_wait_range(inode->i_mapping,
1016 off, off + len - 1);
1017 if (ret < 0)
1018 return ret;
1019
1020 ret = 0;
1021 while ((len = iov_iter_count(to)) > 0) {
1022 struct ceph_osd_request *req;
1023 struct page **pages;
1024 int num_pages;
1025 size_t page_off;
1026 bool more;
1027 int idx;
1028 size_t left;
1029 struct ceph_osd_req_op *op;
1030 u64 read_off = off;
1031 u64 read_len = len;
1032 int extent_cnt;
1033
1034 /* determine new offset/length if encrypted */
1035 ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
1036
1037 doutc(cl, "orig %llu~%llu reading %llu~%llu", off, len,
1038 read_off, read_len);
1039
1040 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1041 ci->i_vino, read_off, &read_len, 0, 1,
1042 sparse ? CEPH_OSD_OP_SPARSE_READ :
1043 CEPH_OSD_OP_READ,
1044 CEPH_OSD_FLAG_READ,
1045 NULL, ci->i_truncate_seq,
1046 ci->i_truncate_size, false);
1047 if (IS_ERR(req)) {
1048 ret = PTR_ERR(req);
1049 break;
1050 }
1051
1052 /* adjust len downward if the request truncated the len */
1053 if (off + len > read_off + read_len)
1054 len = read_off + read_len - off;
1055 more = len < iov_iter_count(to);
1056
1057 num_pages = calc_pages_for(read_off, read_len);
1058 page_off = offset_in_page(off);
1059 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1060 if (IS_ERR(pages)) {
1061 ceph_osdc_put_request(req);
1062 ret = PTR_ERR(pages);
1063 break;
1064 }
1065
1066 osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
1067 offset_in_page(read_off),
1068 false, false);
1069
1070 op = &req->r_ops[0];
1071 if (sparse) {
1072 extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
1073 ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
1074 if (ret) {
1075 ceph_osdc_put_request(req);
1076 break;
1077 }
1078 }
1079
1080 ceph_osdc_start_request(osdc, req);
1081 ret = ceph_osdc_wait_request(osdc, req);
1082
1083 ceph_update_read_metrics(&fsc->mdsc->metric,
1084 req->r_start_latency,
1085 req->r_end_latency,
1086 read_len, ret);
1087
1088 if (ret > 0)
1089 objver = req->r_version;
1090
1091 i_size = i_size_read(inode);
1092 doutc(cl, "%llu~%llu got %zd i_size %llu%s\n", off, len,
1093 ret, i_size, (more ? " MORE" : ""));
1094
1095 /* Fix it to go to end of extent map */
1096 if (sparse && ret >= 0)
1097 ret = ceph_sparse_ext_map_end(op);
1098 else if (ret == -ENOENT)
1099 ret = 0;
1100
1101 if (ret > 0 && IS_ENCRYPTED(inode)) {
1102 int fret;
1103
1104 fret = ceph_fscrypt_decrypt_extents(inode, pages,
1105 read_off, op->extent.sparse_ext,
1106 op->extent.sparse_ext_cnt);
1107 if (fret < 0) {
1108 ret = fret;
1109 ceph_osdc_put_request(req);
1110 break;
1111 }
1112
1113 /* account for any partial block at the beginning */
1114 fret -= (off - read_off);
1115
1116 /*
1117 * Short read after big offset adjustment?
1118 * Nothing is usable, just call it a zero
1119 * len read.
1120 */
1121 fret = max(fret, 0);
1122
1123 /* account for partial block at the end */
1124 ret = min_t(ssize_t, fret, len);
1125 }
1126
1127 ceph_osdc_put_request(req);
1128
1129 /* Short read but not EOF? Zero out the remainder. */
1130 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1131 int zlen = min(len - ret, i_size - off - ret);
1132 int zoff = page_off + ret;
1133
1134 doutc(cl, "zero gap %llu~%llu\n", off + ret,
1135 off + ret + zlen);
1136 ceph_zero_page_vector_range(zoff, zlen, pages);
1137 ret += zlen;
1138 }
1139
1140 idx = 0;
1141 left = ret > 0 ? ret : 0;
1142 while (left > 0) {
1143 size_t plen, copied;
1144
1145 plen = min_t(size_t, left, PAGE_SIZE - page_off);
1146 SetPageUptodate(pages[idx]);
1147 copied = copy_page_to_iter(pages[idx++],
1148 page_off, plen, to);
1149 off += copied;
1150 left -= copied;
1151 page_off = 0;
1152 if (copied < plen) {
1153 ret = -EFAULT;
1154 break;
1155 }
1156 }
1157 ceph_release_page_vector(pages, num_pages);
1158
1159 if (ret < 0) {
1160 if (ret == -EBLOCKLISTED)
1161 fsc->blocklisted = true;
1162 break;
1163 }
1164
1165 if (off >= i_size || !more)
1166 break;
1167 }
1168
1169 if (ret > 0) {
1170 if (off > *ki_pos) {
1171 if (off >= i_size) {
1172 *retry_op = CHECK_EOF;
1173 ret = i_size - *ki_pos;
1174 *ki_pos = i_size;
1175 } else {
1176 ret = off - *ki_pos;
1177 *ki_pos = off;
1178 }
1179 }
1180
1181 if (last_objver)
1182 *last_objver = objver;
1183 }
1184 doutc(cl, "result %zd retry_op %d\n", ret, *retry_op);
1185 return ret;
1186}
1187
1188static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
1189 int *retry_op)
1190{
1191 struct file *file = iocb->ki_filp;
1192 struct inode *inode = file_inode(file);
1193 struct ceph_client *cl = ceph_inode_to_client(inode);
1194
1195 doutc(cl, "on file %p %llx~%zx %s\n", file, iocb->ki_pos,
1196 iov_iter_count(to),
1197 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
1198
1199 return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
1200}
1201
1202struct ceph_aio_request {
1203 struct kiocb *iocb;
1204 size_t total_len;
1205 bool write;
1206 bool should_dirty;
1207 int error;
1208 struct list_head osd_reqs;
1209 unsigned num_reqs;
1210 atomic_t pending_reqs;
1211 struct timespec64 mtime;
1212 struct ceph_cap_flush *prealloc_cf;
1213};
1214
1215struct ceph_aio_work {
1216 struct work_struct work;
1217 struct ceph_osd_request *req;
1218};
1219
1220static void ceph_aio_retry_work(struct work_struct *work);
1221
1222static void ceph_aio_complete(struct inode *inode,
1223 struct ceph_aio_request *aio_req)
1224{
1225 struct ceph_client *cl = ceph_inode_to_client(inode);
1226 struct ceph_inode_info *ci = ceph_inode(inode);
1227 int ret;
1228
1229 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1230 return;
1231
1232 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1233 inode_dio_end(inode);
1234
1235 ret = aio_req->error;
1236 if (!ret)
1237 ret = aio_req->total_len;
1238
1239 doutc(cl, "%p %llx.%llx rc %d\n", inode, ceph_vinop(inode), ret);
1240
1241 if (ret >= 0 && aio_req->write) {
1242 int dirty;
1243
1244 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1245 if (endoff > i_size_read(inode)) {
1246 if (ceph_inode_set_size(inode, endoff))
1247 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1248 }
1249
1250 spin_lock(&ci->i_ceph_lock);
1251 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1252 &aio_req->prealloc_cf);
1253 spin_unlock(&ci->i_ceph_lock);
1254 if (dirty)
1255 __mark_inode_dirty(inode, dirty);
1256
1257 }
1258
1259 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1260 CEPH_CAP_FILE_RD));
1261
1262 aio_req->iocb->ki_complete(aio_req->iocb, ret);
1263
1264 ceph_free_cap_flush(aio_req->prealloc_cf);
1265 kfree(aio_req);
1266}
1267
1268static void ceph_aio_complete_req(struct ceph_osd_request *req)
1269{
1270 int rc = req->r_result;
1271 struct inode *inode = req->r_inode;
1272 struct ceph_aio_request *aio_req = req->r_priv;
1273 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1274 struct ceph_osd_req_op *op = &req->r_ops[0];
1275 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1276 unsigned int len = osd_data->bvec_pos.iter.bi_size;
1277 bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
1278 struct ceph_client *cl = ceph_inode_to_client(inode);
1279
1280 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1281 BUG_ON(!osd_data->num_bvecs);
1282
1283 doutc(cl, "req %p inode %p %llx.%llx, rc %d bytes %u\n", req,
1284 inode, ceph_vinop(inode), rc, len);
1285
1286 if (rc == -EOLDSNAPC) {
1287 struct ceph_aio_work *aio_work;
1288 BUG_ON(!aio_req->write);
1289
1290 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1291 if (aio_work) {
1292 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1293 aio_work->req = req;
1294 queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
1295 &aio_work->work);
1296 return;
1297 }
1298 rc = -ENOMEM;
1299 } else if (!aio_req->write) {
1300 if (sparse && rc >= 0)
1301 rc = ceph_sparse_ext_map_end(op);
1302 if (rc == -ENOENT)
1303 rc = 0;
1304 if (rc >= 0 && len > rc) {
1305 struct iov_iter i;
1306 int zlen = len - rc;
1307
1308 /*
1309 * If read is satisfied by single OSD request,
1310 * it can pass EOF. Otherwise read is within
1311 * i_size.
1312 */
1313 if (aio_req->num_reqs == 1) {
1314 loff_t i_size = i_size_read(inode);
1315 loff_t endoff = aio_req->iocb->ki_pos + rc;
1316 if (endoff < i_size)
1317 zlen = min_t(size_t, zlen,
1318 i_size - endoff);
1319 aio_req->total_len = rc + zlen;
1320 }
1321
1322 iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1323 osd_data->num_bvecs, len);
1324 iov_iter_advance(&i, rc);
1325 iov_iter_zero(zlen, &i);
1326 }
1327 }
1328
1329 /* r_start_latency == 0 means the request was not submitted */
1330 if (req->r_start_latency) {
1331 if (aio_req->write)
1332 ceph_update_write_metrics(metric, req->r_start_latency,
1333 req->r_end_latency, len, rc);
1334 else
1335 ceph_update_read_metrics(metric, req->r_start_latency,
1336 req->r_end_latency, len, rc);
1337 }
1338
1339 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1340 aio_req->should_dirty);
1341 ceph_osdc_put_request(req);
1342
1343 if (rc < 0)
1344 cmpxchg(&aio_req->error, 0, rc);
1345
1346 ceph_aio_complete(inode, aio_req);
1347 return;
1348}
1349
1350static void ceph_aio_retry_work(struct work_struct *work)
1351{
1352 struct ceph_aio_work *aio_work =
1353 container_of(work, struct ceph_aio_work, work);
1354 struct ceph_osd_request *orig_req = aio_work->req;
1355 struct ceph_aio_request *aio_req = orig_req->r_priv;
1356 struct inode *inode = orig_req->r_inode;
1357 struct ceph_inode_info *ci = ceph_inode(inode);
1358 struct ceph_snap_context *snapc;
1359 struct ceph_osd_request *req;
1360 int ret;
1361
1362 spin_lock(&ci->i_ceph_lock);
1363 if (__ceph_have_pending_cap_snap(ci)) {
1364 struct ceph_cap_snap *capsnap =
1365 list_last_entry(&ci->i_cap_snaps,
1366 struct ceph_cap_snap,
1367 ci_item);
1368 snapc = ceph_get_snap_context(capsnap->context);
1369 } else {
1370 BUG_ON(!ci->i_head_snapc);
1371 snapc = ceph_get_snap_context(ci->i_head_snapc);
1372 }
1373 spin_unlock(&ci->i_ceph_lock);
1374
1375 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1376 false, GFP_NOFS);
1377 if (!req) {
1378 ret = -ENOMEM;
1379 req = orig_req;
1380 goto out;
1381 }
1382
1383 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1384 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1385 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1386
1387 req->r_ops[0] = orig_req->r_ops[0];
1388
1389 req->r_mtime = aio_req->mtime;
1390 req->r_data_offset = req->r_ops[0].extent.offset;
1391
1392 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1393 if (ret) {
1394 ceph_osdc_put_request(req);
1395 req = orig_req;
1396 goto out;
1397 }
1398
1399 ceph_osdc_put_request(orig_req);
1400
1401 req->r_callback = ceph_aio_complete_req;
1402 req->r_inode = inode;
1403 req->r_priv = aio_req;
1404
1405 ceph_osdc_start_request(req->r_osdc, req);
1406out:
1407 if (ret < 0) {
1408 req->r_result = ret;
1409 ceph_aio_complete_req(req);
1410 }
1411
1412 ceph_put_snap_context(snapc);
1413 kfree(aio_work);
1414}
1415
1416static ssize_t
1417ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1418 struct ceph_snap_context *snapc,
1419 struct ceph_cap_flush **pcf)
1420{
1421 struct file *file = iocb->ki_filp;
1422 struct inode *inode = file_inode(file);
1423 struct ceph_inode_info *ci = ceph_inode(inode);
1424 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1425 struct ceph_client *cl = fsc->client;
1426 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1427 struct ceph_vino vino;
1428 struct ceph_osd_request *req;
1429 struct bio_vec *bvecs;
1430 struct ceph_aio_request *aio_req = NULL;
1431 int num_pages = 0;
1432 int flags;
1433 int ret = 0;
1434 struct timespec64 mtime = current_time(inode);
1435 size_t count = iov_iter_count(iter);
1436 loff_t pos = iocb->ki_pos;
1437 bool write = iov_iter_rw(iter) == WRITE;
1438 bool should_dirty = !write && user_backed_iter(iter);
1439 bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
1440
1441 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1442 return -EROFS;
1443
1444 doutc(cl, "sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1445 (write ? "write" : "read"), file, pos, (unsigned)count,
1446 snapc, snapc ? snapc->seq : 0);
1447
1448 if (write) {
1449 int ret2;
1450
1451 ceph_fscache_invalidate(inode, true);
1452
1453 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1454 pos >> PAGE_SHIFT,
1455 (pos + count - 1) >> PAGE_SHIFT);
1456 if (ret2 < 0)
1457 doutc(cl, "invalidate_inode_pages2_range returned %d\n",
1458 ret2);
1459
1460 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1461 } else {
1462 flags = CEPH_OSD_FLAG_READ;
1463 }
1464
1465 while (iov_iter_count(iter) > 0) {
1466 u64 size = iov_iter_count(iter);
1467 ssize_t len;
1468 struct ceph_osd_req_op *op;
1469 int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
1470 int extent_cnt;
1471
1472 if (write)
1473 size = min_t(u64, size, fsc->mount_options->wsize);
1474 else
1475 size = min_t(u64, size, fsc->mount_options->rsize);
1476
1477 vino = ceph_vino(inode);
1478 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1479 vino, pos, &size, 0,
1480 1,
1481 write ? CEPH_OSD_OP_WRITE : readop,
1482 flags, snapc,
1483 ci->i_truncate_seq,
1484 ci->i_truncate_size,
1485 false);
1486 if (IS_ERR(req)) {
1487 ret = PTR_ERR(req);
1488 break;
1489 }
1490
1491 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1492 if (len < 0) {
1493 ceph_osdc_put_request(req);
1494 ret = len;
1495 break;
1496 }
1497 if (len != size)
1498 osd_req_op_extent_update(req, 0, len);
1499
1500 /*
1501 * To simplify error handling, allow AIO when IO within i_size
1502 * or IO can be satisfied by single OSD request.
1503 */
1504 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1505 (len == count || pos + count <= i_size_read(inode))) {
1506 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1507 if (aio_req) {
1508 aio_req->iocb = iocb;
1509 aio_req->write = write;
1510 aio_req->should_dirty = should_dirty;
1511 INIT_LIST_HEAD(&aio_req->osd_reqs);
1512 if (write) {
1513 aio_req->mtime = mtime;
1514 swap(aio_req->prealloc_cf, *pcf);
1515 }
1516 }
1517 /* ignore error */
1518 }
1519
1520 if (write) {
1521 /*
1522 * throw out any page cache pages in this range. this
1523 * may block.
1524 */
1525 truncate_inode_pages_range(inode->i_mapping, pos,
1526 PAGE_ALIGN(pos + len) - 1);
1527
1528 req->r_mtime = mtime;
1529 }
1530
1531 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1532 op = &req->r_ops[0];
1533 if (sparse) {
1534 extent_cnt = __ceph_sparse_read_ext_count(inode, size);
1535 ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
1536 if (ret) {
1537 ceph_osdc_put_request(req);
1538 break;
1539 }
1540 }
1541
1542 if (aio_req) {
1543 aio_req->total_len += len;
1544 aio_req->num_reqs++;
1545 atomic_inc(&aio_req->pending_reqs);
1546
1547 req->r_callback = ceph_aio_complete_req;
1548 req->r_inode = inode;
1549 req->r_priv = aio_req;
1550 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1551
1552 pos += len;
1553 continue;
1554 }
1555
1556 ceph_osdc_start_request(req->r_osdc, req);
1557 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1558
1559 if (write)
1560 ceph_update_write_metrics(metric, req->r_start_latency,
1561 req->r_end_latency, len, ret);
1562 else
1563 ceph_update_read_metrics(metric, req->r_start_latency,
1564 req->r_end_latency, len, ret);
1565
1566 size = i_size_read(inode);
1567 if (!write) {
1568 if (sparse && ret >= 0)
1569 ret = ceph_sparse_ext_map_end(op);
1570 else if (ret == -ENOENT)
1571 ret = 0;
1572
1573 if (ret >= 0 && ret < len && pos + ret < size) {
1574 struct iov_iter i;
1575 int zlen = min_t(size_t, len - ret,
1576 size - pos - ret);
1577
1578 iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1579 iov_iter_advance(&i, ret);
1580 iov_iter_zero(zlen, &i);
1581 ret += zlen;
1582 }
1583 if (ret >= 0)
1584 len = ret;
1585 }
1586
1587 put_bvecs(bvecs, num_pages, should_dirty);
1588 ceph_osdc_put_request(req);
1589 if (ret < 0)
1590 break;
1591
1592 pos += len;
1593 if (!write && pos >= size)
1594 break;
1595
1596 if (write && pos > size) {
1597 if (ceph_inode_set_size(inode, pos))
1598 ceph_check_caps(ceph_inode(inode),
1599 CHECK_CAPS_AUTHONLY);
1600 }
1601 }
1602
1603 if (aio_req) {
1604 LIST_HEAD(osd_reqs);
1605
1606 if (aio_req->num_reqs == 0) {
1607 kfree(aio_req);
1608 return ret;
1609 }
1610
1611 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1612 CEPH_CAP_FILE_RD);
1613
1614 list_splice(&aio_req->osd_reqs, &osd_reqs);
1615 inode_dio_begin(inode);
1616 while (!list_empty(&osd_reqs)) {
1617 req = list_first_entry(&osd_reqs,
1618 struct ceph_osd_request,
1619 r_private_item);
1620 list_del_init(&req->r_private_item);
1621 if (ret >= 0)
1622 ceph_osdc_start_request(req->r_osdc, req);
1623 if (ret < 0) {
1624 req->r_result = ret;
1625 ceph_aio_complete_req(req);
1626 }
1627 }
1628 return -EIOCBQUEUED;
1629 }
1630
1631 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1632 ret = pos - iocb->ki_pos;
1633 iocb->ki_pos = pos;
1634 }
1635 return ret;
1636}
1637
1638/*
1639 * Synchronous write, straight from __user pointer or user pages.
1640 *
1641 * If write spans object boundary, just do multiple writes. (For a
1642 * correct atomic write, we should e.g. take write locks on all
1643 * objects, rollback on failure, etc.)
1644 */
1645static ssize_t
1646ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1647 struct ceph_snap_context *snapc)
1648{
1649 struct file *file = iocb->ki_filp;
1650 struct inode *inode = file_inode(file);
1651 struct ceph_inode_info *ci = ceph_inode(inode);
1652 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
1653 struct ceph_client *cl = fsc->client;
1654 struct ceph_osd_client *osdc = &fsc->client->osdc;
1655 struct ceph_osd_request *req;
1656 struct page **pages;
1657 u64 len;
1658 int num_pages;
1659 int written = 0;
1660 int ret;
1661 bool check_caps = false;
1662 struct timespec64 mtime = current_time(inode);
1663 size_t count = iov_iter_count(from);
1664
1665 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1666 return -EROFS;
1667
1668 doutc(cl, "on file %p %lld~%u snapc %p seq %lld\n", file, pos,
1669 (unsigned)count, snapc, snapc->seq);
1670
1671 ret = filemap_write_and_wait_range(inode->i_mapping,
1672 pos, pos + count - 1);
1673 if (ret < 0)
1674 return ret;
1675
1676 ceph_fscache_invalidate(inode, false);
1677
1678 while ((len = iov_iter_count(from)) > 0) {
1679 size_t left;
1680 int n;
1681 u64 write_pos = pos;
1682 u64 write_len = len;
1683 u64 objnum, objoff;
1684 u32 xlen;
1685 u64 assert_ver = 0;
1686 bool rmw;
1687 bool first, last;
1688 struct iov_iter saved_iter = *from;
1689 size_t off;
1690
1691 ceph_fscrypt_adjust_off_and_len(inode, &write_pos, &write_len);
1692
1693 /* clamp the length to the end of first object */
1694 ceph_calc_file_object_mapping(&ci->i_layout, write_pos,
1695 write_len, &objnum, &objoff,
1696 &xlen);
1697 write_len = xlen;
1698
1699 /* adjust len downward if it goes beyond current object */
1700 if (pos + len > write_pos + write_len)
1701 len = write_pos + write_len - pos;
1702
1703 /*
1704 * If we had to adjust the length or position to align with a
1705 * crypto block, then we must do a read/modify/write cycle. We
1706 * use a version assertion to redrive the thing if something
1707 * changes in between.
1708 */
1709 first = pos != write_pos;
1710 last = (pos + len) != (write_pos + write_len);
1711 rmw = first || last;
1712
1713 doutc(cl, "ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
1714 ci->i_vino.ino, pos, len, write_pos, write_len,
1715 rmw ? "" : "no ");
1716
1717 /*
1718 * The data is emplaced into the page as it would be if it were
1719 * in an array of pagecache pages.
1720 */
1721 num_pages = calc_pages_for(write_pos, write_len);
1722 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1723 if (IS_ERR(pages)) {
1724 ret = PTR_ERR(pages);
1725 break;
1726 }
1727
1728 /* Do we need to preload the pages? */
1729 if (rmw) {
1730 u64 first_pos = write_pos;
1731 u64 last_pos = (write_pos + write_len) - CEPH_FSCRYPT_BLOCK_SIZE;
1732 u64 read_len = CEPH_FSCRYPT_BLOCK_SIZE;
1733 struct ceph_osd_req_op *op;
1734
1735 /* We should only need to do this for encrypted inodes */
1736 WARN_ON_ONCE(!IS_ENCRYPTED(inode));
1737
1738 /* No need to do two reads if first and last blocks are same */
1739 if (first && last_pos == first_pos)
1740 last = false;
1741
1742 /*
1743 * Allocate a read request for one or two extents,
1744 * depending on how the request was aligned.
1745 */
1746 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1747 ci->i_vino, first ? first_pos : last_pos,
1748 &read_len, 0, (first && last) ? 2 : 1,
1749 CEPH_OSD_OP_SPARSE_READ, CEPH_OSD_FLAG_READ,
1750 NULL, ci->i_truncate_seq,
1751 ci->i_truncate_size, false);
1752 if (IS_ERR(req)) {
1753 ceph_release_page_vector(pages, num_pages);
1754 ret = PTR_ERR(req);
1755 break;
1756 }
1757
1758 /* Something is misaligned! */
1759 if (read_len != CEPH_FSCRYPT_BLOCK_SIZE) {
1760 ceph_osdc_put_request(req);
1761 ceph_release_page_vector(pages, num_pages);
1762 ret = -EIO;
1763 break;
1764 }
1765
1766 /* Add extent for first block? */
1767 op = &req->r_ops[0];
1768
1769 if (first) {
1770 osd_req_op_extent_osd_data_pages(req, 0, pages,
1771 CEPH_FSCRYPT_BLOCK_SIZE,
1772 offset_in_page(first_pos),
1773 false, false);
1774 /* We only expect a single extent here */
1775 ret = __ceph_alloc_sparse_ext_map(op, 1);
1776 if (ret) {
1777 ceph_osdc_put_request(req);
1778 ceph_release_page_vector(pages, num_pages);
1779 break;
1780 }
1781 }
1782
1783 /* Add extent for last block */
1784 if (last) {
1785 /* Init the other extent if first extent has been used */
1786 if (first) {
1787 op = &req->r_ops[1];
1788 osd_req_op_extent_init(req, 1,
1789 CEPH_OSD_OP_SPARSE_READ,
1790 last_pos, CEPH_FSCRYPT_BLOCK_SIZE,
1791 ci->i_truncate_size,
1792 ci->i_truncate_seq);
1793 }
1794
1795 ret = __ceph_alloc_sparse_ext_map(op, 1);
1796 if (ret) {
1797 ceph_osdc_put_request(req);
1798 ceph_release_page_vector(pages, num_pages);
1799 break;
1800 }
1801
1802 osd_req_op_extent_osd_data_pages(req, first ? 1 : 0,
1803 &pages[num_pages - 1],
1804 CEPH_FSCRYPT_BLOCK_SIZE,
1805 offset_in_page(last_pos),
1806 false, false);
1807 }
1808
1809 ceph_osdc_start_request(osdc, req);
1810 ret = ceph_osdc_wait_request(osdc, req);
1811
1812 /* FIXME: length field is wrong if there are 2 extents */
1813 ceph_update_read_metrics(&fsc->mdsc->metric,
1814 req->r_start_latency,
1815 req->r_end_latency,
1816 read_len, ret);
1817
1818 /* Ok if object is not already present */
1819 if (ret == -ENOENT) {
1820 /*
1821 * If there is no object, then we can't assert
1822 * on its version. Set it to 0, and we'll use an
1823 * exclusive create instead.
1824 */
1825 ceph_osdc_put_request(req);
1826 ret = 0;
1827
1828 /*
1829 * zero out the soon-to-be uncopied parts of the
1830 * first and last pages.
1831 */
1832 if (first)
1833 zero_user_segment(pages[0], 0,
1834 offset_in_page(first_pos));
1835 if (last)
1836 zero_user_segment(pages[num_pages - 1],
1837 offset_in_page(last_pos),
1838 PAGE_SIZE);
1839 } else {
1840 if (ret < 0) {
1841 ceph_osdc_put_request(req);
1842 ceph_release_page_vector(pages, num_pages);
1843 break;
1844 }
1845
1846 op = &req->r_ops[0];
1847 if (op->extent.sparse_ext_cnt == 0) {
1848 if (first)
1849 zero_user_segment(pages[0], 0,
1850 offset_in_page(first_pos));
1851 else
1852 zero_user_segment(pages[num_pages - 1],
1853 offset_in_page(last_pos),
1854 PAGE_SIZE);
1855 } else if (op->extent.sparse_ext_cnt != 1 ||
1856 ceph_sparse_ext_map_end(op) !=
1857 CEPH_FSCRYPT_BLOCK_SIZE) {
1858 ret = -EIO;
1859 ceph_osdc_put_request(req);
1860 ceph_release_page_vector(pages, num_pages);
1861 break;
1862 }
1863
1864 if (first && last) {
1865 op = &req->r_ops[1];
1866 if (op->extent.sparse_ext_cnt == 0) {
1867 zero_user_segment(pages[num_pages - 1],
1868 offset_in_page(last_pos),
1869 PAGE_SIZE);
1870 } else if (op->extent.sparse_ext_cnt != 1 ||
1871 ceph_sparse_ext_map_end(op) !=
1872 CEPH_FSCRYPT_BLOCK_SIZE) {
1873 ret = -EIO;
1874 ceph_osdc_put_request(req);
1875 ceph_release_page_vector(pages, num_pages);
1876 break;
1877 }
1878 }
1879
1880 /* Grab assert version. It must be non-zero. */
1881 assert_ver = req->r_version;
1882 WARN_ON_ONCE(ret > 0 && assert_ver == 0);
1883
1884 ceph_osdc_put_request(req);
1885 if (first) {
1886 ret = ceph_fscrypt_decrypt_block_inplace(inode,
1887 pages[0], CEPH_FSCRYPT_BLOCK_SIZE,
1888 offset_in_page(first_pos),
1889 first_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
1890 if (ret < 0) {
1891 ceph_release_page_vector(pages, num_pages);
1892 break;
1893 }
1894 }
1895 if (last) {
1896 ret = ceph_fscrypt_decrypt_block_inplace(inode,
1897 pages[num_pages - 1],
1898 CEPH_FSCRYPT_BLOCK_SIZE,
1899 offset_in_page(last_pos),
1900 last_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
1901 if (ret < 0) {
1902 ceph_release_page_vector(pages, num_pages);
1903 break;
1904 }
1905 }
1906 }
1907 }
1908
1909 left = len;
1910 off = offset_in_page(pos);
1911 for (n = 0; n < num_pages; n++) {
1912 size_t plen = min_t(size_t, left, PAGE_SIZE - off);
1913
1914 /* copy the data */
1915 ret = copy_page_from_iter(pages[n], off, plen, from);
1916 if (ret != plen) {
1917 ret = -EFAULT;
1918 break;
1919 }
1920 off = 0;
1921 left -= ret;
1922 }
1923 if (ret < 0) {
1924 doutc(cl, "write failed with %d\n", ret);
1925 ceph_release_page_vector(pages, num_pages);
1926 break;
1927 }
1928
1929 if (IS_ENCRYPTED(inode)) {
1930 ret = ceph_fscrypt_encrypt_pages(inode, pages,
1931 write_pos, write_len,
1932 GFP_KERNEL);
1933 if (ret < 0) {
1934 doutc(cl, "encryption failed with %d\n", ret);
1935 ceph_release_page_vector(pages, num_pages);
1936 break;
1937 }
1938 }
1939
1940 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1941 ci->i_vino, write_pos, &write_len,
1942 rmw ? 1 : 0, rmw ? 2 : 1,
1943 CEPH_OSD_OP_WRITE,
1944 CEPH_OSD_FLAG_WRITE,
1945 snapc, ci->i_truncate_seq,
1946 ci->i_truncate_size, false);
1947 if (IS_ERR(req)) {
1948 ret = PTR_ERR(req);
1949 ceph_release_page_vector(pages, num_pages);
1950 break;
1951 }
1952
1953 doutc(cl, "write op %lld~%llu\n", write_pos, write_len);
1954 osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
1955 offset_in_page(write_pos), false,
1956 true);
1957 req->r_inode = inode;
1958 req->r_mtime = mtime;
1959
1960 /* Set up the assertion */
1961 if (rmw) {
1962 /*
1963 * Set up the assertion. If we don't have a version
1964 * number, then the object doesn't exist yet. Use an
1965 * exclusive create instead of a version assertion in
1966 * that case.
1967 */
1968 if (assert_ver) {
1969 osd_req_op_init(req, 0, CEPH_OSD_OP_ASSERT_VER, 0);
1970 req->r_ops[0].assert_ver.ver = assert_ver;
1971 } else {
1972 osd_req_op_init(req, 0, CEPH_OSD_OP_CREATE,
1973 CEPH_OSD_OP_FLAG_EXCL);
1974 }
1975 }
1976
1977 ceph_osdc_start_request(osdc, req);
1978 ret = ceph_osdc_wait_request(osdc, req);
1979
1980 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1981 req->r_end_latency, len, ret);
1982 ceph_osdc_put_request(req);
1983 if (ret != 0) {
1984 doutc(cl, "osd write returned %d\n", ret);
1985 /* Version changed! Must re-do the rmw cycle */
1986 if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
1987 (!assert_ver && ret == -EEXIST)) {
1988 /* We should only ever see this on a rmw */
1989 WARN_ON_ONCE(!rmw);
1990
1991 /* The version should never go backward */
1992 WARN_ON_ONCE(ret == -EOVERFLOW);
1993
1994 *from = saved_iter;
1995
1996 /* FIXME: limit number of times we loop? */
1997 continue;
1998 }
1999 ceph_set_error_write(ci);
2000 break;
2001 }
2002
2003 ceph_clear_error_write(ci);
2004
2005 /*
2006 * We successfully wrote to a range of the file. Declare
2007 * that region of the pagecache invalid.
2008 */
2009 ret = invalidate_inode_pages2_range(
2010 inode->i_mapping,
2011 pos >> PAGE_SHIFT,
2012 (pos + len - 1) >> PAGE_SHIFT);
2013 if (ret < 0) {
2014 doutc(cl, "invalidate_inode_pages2_range returned %d\n",
2015 ret);
2016 ret = 0;
2017 }
2018 pos += len;
2019 written += len;
2020 doutc(cl, "written %d\n", written);
2021 if (pos > i_size_read(inode)) {
2022 check_caps = ceph_inode_set_size(inode, pos);
2023 if (check_caps)
2024 ceph_check_caps(ceph_inode(inode),
2025 CHECK_CAPS_AUTHONLY);
2026 }
2027
2028 }
2029
2030 if (ret != -EOLDSNAPC && written > 0) {
2031 ret = written;
2032 iocb->ki_pos = pos;
2033 }
2034 doutc(cl, "returning %d\n", ret);
2035 return ret;
2036}
2037
2038/*
2039 * Wrap generic_file_aio_read with checks for cap bits on the inode.
2040 * Atomically grab references, so that those bits are not released
2041 * back to the MDS mid-read.
2042 *
2043 * Hmm, the sync read case isn't actually async... should it be?
2044 */
2045static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
2046{
2047 struct file *filp = iocb->ki_filp;
2048 struct ceph_file_info *fi = filp->private_data;
2049 size_t len = iov_iter_count(to);
2050 struct inode *inode = file_inode(filp);
2051 struct ceph_inode_info *ci = ceph_inode(inode);
2052 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
2053 struct ceph_client *cl = ceph_inode_to_client(inode);
2054 ssize_t ret;
2055 int want = 0, got = 0;
2056 int retry_op = 0, read = 0;
2057
2058again:
2059 doutc(cl, "%llu~%u trying to get caps on %p %llx.%llx\n",
2060 iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode));
2061
2062 if (ceph_inode_is_shutdown(inode))
2063 return -ESTALE;
2064
2065 if (direct_lock)
2066 ceph_start_io_direct(inode);
2067 else
2068 ceph_start_io_read(inode);
2069
2070 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
2071 want |= CEPH_CAP_FILE_CACHE;
2072 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2073 want |= CEPH_CAP_FILE_LAZYIO;
2074
2075 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
2076 if (ret < 0) {
2077 if (direct_lock)
2078 ceph_end_io_direct(inode);
2079 else
2080 ceph_end_io_read(inode);
2081 return ret;
2082 }
2083
2084 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2085 (iocb->ki_flags & IOCB_DIRECT) ||
2086 (fi->flags & CEPH_F_SYNC)) {
2087
2088 doutc(cl, "sync %p %llx.%llx %llu~%u got cap refs on %s\n",
2089 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
2090 ceph_cap_string(got));
2091
2092 if (!ceph_has_inline_data(ci)) {
2093 if (!retry_op &&
2094 (iocb->ki_flags & IOCB_DIRECT) &&
2095 !IS_ENCRYPTED(inode)) {
2096 ret = ceph_direct_read_write(iocb, to,
2097 NULL, NULL);
2098 if (ret >= 0 && ret < len)
2099 retry_op = CHECK_EOF;
2100 } else {
2101 ret = ceph_sync_read(iocb, to, &retry_op);
2102 }
2103 } else {
2104 retry_op = READ_INLINE;
2105 }
2106 } else {
2107 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
2108 doutc(cl, "async %p %llx.%llx %llu~%u got cap refs on %s\n",
2109 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
2110 ceph_cap_string(got));
2111 ceph_add_rw_context(fi, &rw_ctx);
2112 ret = generic_file_read_iter(iocb, to);
2113 ceph_del_rw_context(fi, &rw_ctx);
2114 }
2115
2116 doutc(cl, "%p %llx.%llx dropping cap refs on %s = %d\n",
2117 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
2118 ceph_put_cap_refs(ci, got);
2119
2120 if (direct_lock)
2121 ceph_end_io_direct(inode);
2122 else
2123 ceph_end_io_read(inode);
2124
2125 if (retry_op > HAVE_RETRIED && ret >= 0) {
2126 int statret;
2127 struct page *page = NULL;
2128 loff_t i_size;
2129 if (retry_op == READ_INLINE) {
2130 page = __page_cache_alloc(GFP_KERNEL);
2131 if (!page)
2132 return -ENOMEM;
2133 }
2134
2135 statret = __ceph_do_getattr(inode, page,
2136 CEPH_STAT_CAP_INLINE_DATA, !!page);
2137 if (statret < 0) {
2138 if (page)
2139 __free_page(page);
2140 if (statret == -ENODATA) {
2141 BUG_ON(retry_op != READ_INLINE);
2142 goto again;
2143 }
2144 return statret;
2145 }
2146
2147 i_size = i_size_read(inode);
2148 if (retry_op == READ_INLINE) {
2149 BUG_ON(ret > 0 || read > 0);
2150 if (iocb->ki_pos < i_size &&
2151 iocb->ki_pos < PAGE_SIZE) {
2152 loff_t end = min_t(loff_t, i_size,
2153 iocb->ki_pos + len);
2154 end = min_t(loff_t, end, PAGE_SIZE);
2155 if (statret < end)
2156 zero_user_segment(page, statret, end);
2157 ret = copy_page_to_iter(page,
2158 iocb->ki_pos & ~PAGE_MASK,
2159 end - iocb->ki_pos, to);
2160 iocb->ki_pos += ret;
2161 read += ret;
2162 }
2163 if (iocb->ki_pos < i_size && read < len) {
2164 size_t zlen = min_t(size_t, len - read,
2165 i_size - iocb->ki_pos);
2166 ret = iov_iter_zero(zlen, to);
2167 iocb->ki_pos += ret;
2168 read += ret;
2169 }
2170 __free_pages(page, 0);
2171 return read;
2172 }
2173
2174 /* hit EOF or hole? */
2175 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
2176 ret < len) {
2177 doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n",
2178 iocb->ki_pos, i_size);
2179
2180 read += ret;
2181 len -= ret;
2182 retry_op = HAVE_RETRIED;
2183 goto again;
2184 }
2185 }
2186
2187 if (ret >= 0)
2188 ret += read;
2189
2190 return ret;
2191}
2192
2193/*
2194 * Wrap filemap_splice_read with checks for cap bits on the inode.
2195 * Atomically grab references, so that those bits are not released
2196 * back to the MDS mid-read.
2197 */
2198static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
2199 struct pipe_inode_info *pipe,
2200 size_t len, unsigned int flags)
2201{
2202 struct ceph_file_info *fi = in->private_data;
2203 struct inode *inode = file_inode(in);
2204 struct ceph_inode_info *ci = ceph_inode(inode);
2205 ssize_t ret;
2206 int want = 0, got = 0;
2207 CEPH_DEFINE_RW_CONTEXT(rw_ctx, 0);
2208
2209 dout("splice_read %p %llx.%llx %llu~%zu trying to get caps on %p\n",
2210 inode, ceph_vinop(inode), *ppos, len, inode);
2211
2212 if (ceph_inode_is_shutdown(inode))
2213 return -ESTALE;
2214
2215 if (ceph_has_inline_data(ci) ||
2216 (fi->flags & CEPH_F_SYNC))
2217 return copy_splice_read(in, ppos, pipe, len, flags);
2218
2219 ceph_start_io_read(inode);
2220
2221 want = CEPH_CAP_FILE_CACHE;
2222 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2223 want |= CEPH_CAP_FILE_LAZYIO;
2224
2225 ret = ceph_get_caps(in, CEPH_CAP_FILE_RD, want, -1, &got);
2226 if (ret < 0)
2227 goto out_end;
2228
2229 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) == 0) {
2230 dout("splice_read/sync %p %llx.%llx %llu~%zu got cap refs on %s\n",
2231 inode, ceph_vinop(inode), *ppos, len,
2232 ceph_cap_string(got));
2233
2234 ceph_put_cap_refs(ci, got);
2235 ceph_end_io_read(inode);
2236 return copy_splice_read(in, ppos, pipe, len, flags);
2237 }
2238
2239 dout("splice_read %p %llx.%llx %llu~%zu got cap refs on %s\n",
2240 inode, ceph_vinop(inode), *ppos, len, ceph_cap_string(got));
2241
2242 rw_ctx.caps = got;
2243 ceph_add_rw_context(fi, &rw_ctx);
2244 ret = filemap_splice_read(in, ppos, pipe, len, flags);
2245 ceph_del_rw_context(fi, &rw_ctx);
2246
2247 dout("splice_read %p %llx.%llx dropping cap refs on %s = %zd\n",
2248 inode, ceph_vinop(inode), ceph_cap_string(got), ret);
2249
2250 ceph_put_cap_refs(ci, got);
2251out_end:
2252 ceph_end_io_read(inode);
2253 return ret;
2254}
2255
2256/*
2257 * Take cap references to avoid releasing caps to MDS mid-write.
2258 *
2259 * If we are synchronous, and write with an old snap context, the OSD
2260 * may return EOLDSNAPC. In that case, retry the write.. _after_
2261 * dropping our cap refs and allowing the pending snap to logically
2262 * complete _before_ this write occurs.
2263 *
2264 * If we are near ENOSPC, write synchronously.
2265 */
2266static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
2267{
2268 struct file *file = iocb->ki_filp;
2269 struct ceph_file_info *fi = file->private_data;
2270 struct inode *inode = file_inode(file);
2271 struct ceph_inode_info *ci = ceph_inode(inode);
2272 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2273 struct ceph_client *cl = fsc->client;
2274 struct ceph_osd_client *osdc = &fsc->client->osdc;
2275 struct ceph_cap_flush *prealloc_cf;
2276 ssize_t count, written = 0;
2277 int err, want = 0, got;
2278 bool direct_lock = false;
2279 u32 map_flags;
2280 u64 pool_flags;
2281 loff_t pos;
2282 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
2283
2284 if (ceph_inode_is_shutdown(inode))
2285 return -ESTALE;
2286
2287 if (ceph_snap(inode) != CEPH_NOSNAP)
2288 return -EROFS;
2289
2290 prealloc_cf = ceph_alloc_cap_flush();
2291 if (!prealloc_cf)
2292 return -ENOMEM;
2293
2294 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
2295 direct_lock = true;
2296
2297retry_snap:
2298 if (direct_lock)
2299 ceph_start_io_direct(inode);
2300 else
2301 ceph_start_io_write(inode);
2302
2303 if (iocb->ki_flags & IOCB_APPEND) {
2304 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2305 if (err < 0)
2306 goto out;
2307 }
2308
2309 err = generic_write_checks(iocb, from);
2310 if (err <= 0)
2311 goto out;
2312
2313 pos = iocb->ki_pos;
2314 if (unlikely(pos >= limit)) {
2315 err = -EFBIG;
2316 goto out;
2317 } else {
2318 iov_iter_truncate(from, limit - pos);
2319 }
2320
2321 count = iov_iter_count(from);
2322 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
2323 err = -EDQUOT;
2324 goto out;
2325 }
2326
2327 down_read(&osdc->lock);
2328 map_flags = osdc->osdmap->flags;
2329 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
2330 up_read(&osdc->lock);
2331 if ((map_flags & CEPH_OSDMAP_FULL) ||
2332 (pool_flags & CEPH_POOL_FLAG_FULL)) {
2333 err = -ENOSPC;
2334 goto out;
2335 }
2336
2337 err = file_remove_privs(file);
2338 if (err)
2339 goto out;
2340
2341 doutc(cl, "%p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
2342 inode, ceph_vinop(inode), pos, count,
2343 i_size_read(inode));
2344 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
2345 want |= CEPH_CAP_FILE_BUFFER;
2346 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2347 want |= CEPH_CAP_FILE_LAZYIO;
2348 got = 0;
2349 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
2350 if (err < 0)
2351 goto out;
2352
2353 err = file_update_time(file);
2354 if (err)
2355 goto out_caps;
2356
2357 inode_inc_iversion_raw(inode);
2358
2359 doutc(cl, "%p %llx.%llx %llu~%zd got cap refs on %s\n",
2360 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
2361
2362 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2363 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
2364 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
2365 struct ceph_snap_context *snapc;
2366 struct iov_iter data;
2367
2368 spin_lock(&ci->i_ceph_lock);
2369 if (__ceph_have_pending_cap_snap(ci)) {
2370 struct ceph_cap_snap *capsnap =
2371 list_last_entry(&ci->i_cap_snaps,
2372 struct ceph_cap_snap,
2373 ci_item);
2374 snapc = ceph_get_snap_context(capsnap->context);
2375 } else {
2376 BUG_ON(!ci->i_head_snapc);
2377 snapc = ceph_get_snap_context(ci->i_head_snapc);
2378 }
2379 spin_unlock(&ci->i_ceph_lock);
2380
2381 /* we might need to revert back to that point */
2382 data = *from;
2383 if ((iocb->ki_flags & IOCB_DIRECT) && !IS_ENCRYPTED(inode))
2384 written = ceph_direct_read_write(iocb, &data, snapc,
2385 &prealloc_cf);
2386 else
2387 written = ceph_sync_write(iocb, &data, pos, snapc);
2388 if (direct_lock)
2389 ceph_end_io_direct(inode);
2390 else
2391 ceph_end_io_write(inode);
2392 if (written > 0)
2393 iov_iter_advance(from, written);
2394 ceph_put_snap_context(snapc);
2395 } else {
2396 /*
2397 * No need to acquire the i_truncate_mutex. Because
2398 * the MDS revokes Fwb caps before sending truncate
2399 * message to us. We can't get Fwb cap while there
2400 * are pending vmtruncate. So write and vmtruncate
2401 * can not run at the same time
2402 */
2403 written = generic_perform_write(iocb, from);
2404 ceph_end_io_write(inode);
2405 }
2406
2407 if (written >= 0) {
2408 int dirty;
2409
2410 spin_lock(&ci->i_ceph_lock);
2411 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2412 &prealloc_cf);
2413 spin_unlock(&ci->i_ceph_lock);
2414 if (dirty)
2415 __mark_inode_dirty(inode, dirty);
2416 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
2417 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
2418 }
2419
2420 doutc(cl, "%p %llx.%llx %llu~%u dropping cap refs on %s\n",
2421 inode, ceph_vinop(inode), pos, (unsigned)count,
2422 ceph_cap_string(got));
2423 ceph_put_cap_refs(ci, got);
2424
2425 if (written == -EOLDSNAPC) {
2426 doutc(cl, "%p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
2427 inode, ceph_vinop(inode), pos, (unsigned)count);
2428 goto retry_snap;
2429 }
2430
2431 if (written >= 0) {
2432 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
2433 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
2434 iocb->ki_flags |= IOCB_DSYNC;
2435 written = generic_write_sync(iocb, written);
2436 }
2437
2438 goto out_unlocked;
2439out_caps:
2440 ceph_put_cap_refs(ci, got);
2441out:
2442 if (direct_lock)
2443 ceph_end_io_direct(inode);
2444 else
2445 ceph_end_io_write(inode);
2446out_unlocked:
2447 ceph_free_cap_flush(prealloc_cf);
2448 return written ? written : err;
2449}
2450
2451/*
2452 * llseek. be sure to verify file size on SEEK_END.
2453 */
2454static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
2455{
2456 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
2457 struct inode *inode = file_inode(file);
2458 int ret;
2459
2460 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2461 if (ret < 0)
2462 return ret;
2463 }
2464 return generic_file_llseek(file, offset, whence);
2465}
2466
2467static inline void ceph_zero_partial_page(
2468 struct inode *inode, loff_t offset, unsigned size)
2469{
2470 struct page *page;
2471 pgoff_t index = offset >> PAGE_SHIFT;
2472
2473 page = find_lock_page(inode->i_mapping, index);
2474 if (page) {
2475 wait_on_page_writeback(page);
2476 zero_user(page, offset & (PAGE_SIZE - 1), size);
2477 unlock_page(page);
2478 put_page(page);
2479 }
2480}
2481
2482static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
2483 loff_t length)
2484{
2485 loff_t nearly = round_up(offset, PAGE_SIZE);
2486 if (offset < nearly) {
2487 loff_t size = nearly - offset;
2488 if (length < size)
2489 size = length;
2490 ceph_zero_partial_page(inode, offset, size);
2491 offset += size;
2492 length -= size;
2493 }
2494 if (length >= PAGE_SIZE) {
2495 loff_t size = round_down(length, PAGE_SIZE);
2496 truncate_pagecache_range(inode, offset, offset + size - 1);
2497 offset += size;
2498 length -= size;
2499 }
2500 if (length)
2501 ceph_zero_partial_page(inode, offset, length);
2502}
2503
2504static int ceph_zero_partial_object(struct inode *inode,
2505 loff_t offset, loff_t *length)
2506{
2507 struct ceph_inode_info *ci = ceph_inode(inode);
2508 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
2509 struct ceph_osd_request *req;
2510 int ret = 0;
2511 loff_t zero = 0;
2512 int op;
2513
2514 if (ceph_inode_is_shutdown(inode))
2515 return -EIO;
2516
2517 if (!length) {
2518 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2519 length = &zero;
2520 } else {
2521 op = CEPH_OSD_OP_ZERO;
2522 }
2523
2524 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2525 ceph_vino(inode),
2526 offset, length,
2527 0, 1, op,
2528 CEPH_OSD_FLAG_WRITE,
2529 NULL, 0, 0, false);
2530 if (IS_ERR(req)) {
2531 ret = PTR_ERR(req);
2532 goto out;
2533 }
2534
2535 req->r_mtime = inode_get_mtime(inode);
2536 ceph_osdc_start_request(&fsc->client->osdc, req);
2537 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2538 if (ret == -ENOENT)
2539 ret = 0;
2540 ceph_osdc_put_request(req);
2541
2542out:
2543 return ret;
2544}
2545
2546static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2547{
2548 int ret = 0;
2549 struct ceph_inode_info *ci = ceph_inode(inode);
2550 s32 stripe_unit = ci->i_layout.stripe_unit;
2551 s32 stripe_count = ci->i_layout.stripe_count;
2552 s32 object_size = ci->i_layout.object_size;
2553 u64 object_set_size = object_size * stripe_count;
2554 u64 nearly, t;
2555
2556 /* round offset up to next period boundary */
2557 nearly = offset + object_set_size - 1;
2558 t = nearly;
2559 nearly -= do_div(t, object_set_size);
2560
2561 while (length && offset < nearly) {
2562 loff_t size = length;
2563 ret = ceph_zero_partial_object(inode, offset, &size);
2564 if (ret < 0)
2565 return ret;
2566 offset += size;
2567 length -= size;
2568 }
2569 while (length >= object_set_size) {
2570 int i;
2571 loff_t pos = offset;
2572 for (i = 0; i < stripe_count; ++i) {
2573 ret = ceph_zero_partial_object(inode, pos, NULL);
2574 if (ret < 0)
2575 return ret;
2576 pos += stripe_unit;
2577 }
2578 offset += object_set_size;
2579 length -= object_set_size;
2580 }
2581 while (length) {
2582 loff_t size = length;
2583 ret = ceph_zero_partial_object(inode, offset, &size);
2584 if (ret < 0)
2585 return ret;
2586 offset += size;
2587 length -= size;
2588 }
2589 return ret;
2590}
2591
2592static long ceph_fallocate(struct file *file, int mode,
2593 loff_t offset, loff_t length)
2594{
2595 struct ceph_file_info *fi = file->private_data;
2596 struct inode *inode = file_inode(file);
2597 struct ceph_inode_info *ci = ceph_inode(inode);
2598 struct ceph_cap_flush *prealloc_cf;
2599 struct ceph_client *cl = ceph_inode_to_client(inode);
2600 int want, got = 0;
2601 int dirty;
2602 int ret = 0;
2603 loff_t endoff = 0;
2604 loff_t size;
2605
2606 doutc(cl, "%p %llx.%llx mode %x, offset %llu length %llu\n",
2607 inode, ceph_vinop(inode), mode, offset, length);
2608
2609 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2610 return -EOPNOTSUPP;
2611
2612 if (!S_ISREG(inode->i_mode))
2613 return -EOPNOTSUPP;
2614
2615 if (IS_ENCRYPTED(inode))
2616 return -EOPNOTSUPP;
2617
2618 prealloc_cf = ceph_alloc_cap_flush();
2619 if (!prealloc_cf)
2620 return -ENOMEM;
2621
2622 inode_lock(inode);
2623
2624 if (ceph_snap(inode) != CEPH_NOSNAP) {
2625 ret = -EROFS;
2626 goto unlock;
2627 }
2628
2629 size = i_size_read(inode);
2630
2631 /* Are we punching a hole beyond EOF? */
2632 if (offset >= size)
2633 goto unlock;
2634 if ((offset + length) > size)
2635 length = size - offset;
2636
2637 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2638 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2639 else
2640 want = CEPH_CAP_FILE_BUFFER;
2641
2642 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2643 if (ret < 0)
2644 goto unlock;
2645
2646 ret = file_modified(file);
2647 if (ret)
2648 goto put_caps;
2649
2650 filemap_invalidate_lock(inode->i_mapping);
2651 ceph_fscache_invalidate(inode, false);
2652 ceph_zero_pagecache_range(inode, offset, length);
2653 ret = ceph_zero_objects(inode, offset, length);
2654
2655 if (!ret) {
2656 spin_lock(&ci->i_ceph_lock);
2657 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2658 &prealloc_cf);
2659 spin_unlock(&ci->i_ceph_lock);
2660 if (dirty)
2661 __mark_inode_dirty(inode, dirty);
2662 }
2663 filemap_invalidate_unlock(inode->i_mapping);
2664
2665put_caps:
2666 ceph_put_cap_refs(ci, got);
2667unlock:
2668 inode_unlock(inode);
2669 ceph_free_cap_flush(prealloc_cf);
2670 return ret;
2671}
2672
2673/*
2674 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2675 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2676 * this fails; zero is returned on success.
2677 */
2678static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2679 struct file *dst_filp,
2680 loff_t dst_endoff, int *dst_got)
2681{
2682 int ret = 0;
2683 bool retrying = false;
2684
2685retry_caps:
2686 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2687 dst_endoff, dst_got);
2688 if (ret < 0)
2689 return ret;
2690
2691 /*
2692 * Since we're already holding the FILE_WR capability for the dst file,
2693 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2694 * retry dance instead to try to get both capabilities.
2695 */
2696 ret = ceph_try_get_caps(file_inode(src_filp),
2697 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2698 false, src_got);
2699 if (ret <= 0) {
2700 /* Start by dropping dst_ci caps and getting src_ci caps */
2701 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2702 if (retrying) {
2703 if (!ret)
2704 /* ceph_try_get_caps masks EAGAIN */
2705 ret = -EAGAIN;
2706 return ret;
2707 }
2708 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2709 CEPH_CAP_FILE_SHARED, -1, src_got);
2710 if (ret < 0)
2711 return ret;
2712 /*... drop src_ci caps too, and retry */
2713 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2714 retrying = true;
2715 goto retry_caps;
2716 }
2717 return ret;
2718}
2719
2720static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2721 struct ceph_inode_info *dst_ci, int dst_got)
2722{
2723 ceph_put_cap_refs(src_ci, src_got);
2724 ceph_put_cap_refs(dst_ci, dst_got);
2725}
2726
2727/*
2728 * This function does several size-related checks, returning an error if:
2729 * - source file is smaller than off+len
2730 * - destination file size is not OK (inode_newsize_ok())
2731 * - max bytes quotas is exceeded
2732 */
2733static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2734 loff_t src_off, loff_t dst_off, size_t len)
2735{
2736 struct ceph_client *cl = ceph_inode_to_client(src_inode);
2737 loff_t size, endoff;
2738
2739 size = i_size_read(src_inode);
2740 /*
2741 * Don't copy beyond source file EOF. Instead of simply setting length
2742 * to (size - src_off), just drop to VFS default implementation, as the
2743 * local i_size may be stale due to other clients writing to the source
2744 * inode.
2745 */
2746 if (src_off + len > size) {
2747 doutc(cl, "Copy beyond EOF (%llu + %zu > %llu)\n", src_off,
2748 len, size);
2749 return -EOPNOTSUPP;
2750 }
2751 size = i_size_read(dst_inode);
2752
2753 endoff = dst_off + len;
2754 if (inode_newsize_ok(dst_inode, endoff))
2755 return -EOPNOTSUPP;
2756
2757 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2758 return -EDQUOT;
2759
2760 return 0;
2761}
2762
2763static struct ceph_osd_request *
2764ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2765 u64 src_snapid,
2766 struct ceph_object_id *src_oid,
2767 struct ceph_object_locator *src_oloc,
2768 struct ceph_object_id *dst_oid,
2769 struct ceph_object_locator *dst_oloc,
2770 u32 truncate_seq, u64 truncate_size)
2771{
2772 struct ceph_osd_request *req;
2773 int ret;
2774 u32 src_fadvise_flags =
2775 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2776 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2777 u32 dst_fadvise_flags =
2778 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2779 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2780
2781 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2782 if (!req)
2783 return ERR_PTR(-ENOMEM);
2784
2785 req->r_flags = CEPH_OSD_FLAG_WRITE;
2786
2787 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2788 ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2789
2790 ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2791 src_oid, src_oloc,
2792 src_fadvise_flags,
2793 dst_fadvise_flags,
2794 truncate_seq,
2795 truncate_size,
2796 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2797 if (ret)
2798 goto out;
2799
2800 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2801 if (ret)
2802 goto out;
2803
2804 return req;
2805
2806out:
2807 ceph_osdc_put_request(req);
2808 return ERR_PTR(ret);
2809}
2810
2811static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2812 struct ceph_inode_info *dst_ci, u64 *dst_off,
2813 struct ceph_fs_client *fsc,
2814 size_t len, unsigned int flags)
2815{
2816 struct ceph_object_locator src_oloc, dst_oloc;
2817 struct ceph_object_id src_oid, dst_oid;
2818 struct ceph_osd_client *osdc;
2819 struct ceph_osd_request *req;
2820 size_t bytes = 0;
2821 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2822 u32 src_objlen, dst_objlen;
2823 u32 object_size = src_ci->i_layout.object_size;
2824 struct ceph_client *cl = fsc->client;
2825 int ret;
2826
2827 src_oloc.pool = src_ci->i_layout.pool_id;
2828 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2829 dst_oloc.pool = dst_ci->i_layout.pool_id;
2830 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2831 osdc = &fsc->client->osdc;
2832
2833 while (len >= object_size) {
2834 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2835 object_size, &src_objnum,
2836 &src_objoff, &src_objlen);
2837 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2838 object_size, &dst_objnum,
2839 &dst_objoff, &dst_objlen);
2840 ceph_oid_init(&src_oid);
2841 ceph_oid_printf(&src_oid, "%llx.%08llx",
2842 src_ci->i_vino.ino, src_objnum);
2843 ceph_oid_init(&dst_oid);
2844 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2845 dst_ci->i_vino.ino, dst_objnum);
2846 /* Do an object remote copy */
2847 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2848 &src_oid, &src_oloc,
2849 &dst_oid, &dst_oloc,
2850 dst_ci->i_truncate_seq,
2851 dst_ci->i_truncate_size);
2852 if (IS_ERR(req))
2853 ret = PTR_ERR(req);
2854 else {
2855 ceph_osdc_start_request(osdc, req);
2856 ret = ceph_osdc_wait_request(osdc, req);
2857 ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2858 req->r_start_latency,
2859 req->r_end_latency,
2860 object_size, ret);
2861 ceph_osdc_put_request(req);
2862 }
2863 if (ret) {
2864 if (ret == -EOPNOTSUPP) {
2865 fsc->have_copy_from2 = false;
2866 pr_notice_client(cl,
2867 "OSDs don't support copy-from2; disabling copy offload\n");
2868 }
2869 doutc(cl, "returned %d\n", ret);
2870 if (!bytes)
2871 bytes = ret;
2872 goto out;
2873 }
2874 len -= object_size;
2875 bytes += object_size;
2876 *src_off += object_size;
2877 *dst_off += object_size;
2878 }
2879
2880out:
2881 ceph_oloc_destroy(&src_oloc);
2882 ceph_oloc_destroy(&dst_oloc);
2883 return bytes;
2884}
2885
2886static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2887 struct file *dst_file, loff_t dst_off,
2888 size_t len, unsigned int flags)
2889{
2890 struct inode *src_inode = file_inode(src_file);
2891 struct inode *dst_inode = file_inode(dst_file);
2892 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2893 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2894 struct ceph_cap_flush *prealloc_cf;
2895 struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
2896 struct ceph_client *cl = src_fsc->client;
2897 loff_t size;
2898 ssize_t ret = -EIO, bytes;
2899 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2900 u32 src_objlen, dst_objlen;
2901 int src_got = 0, dst_got = 0, err, dirty;
2902
2903 if (src_inode->i_sb != dst_inode->i_sb) {
2904 struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
2905
2906 if (ceph_fsid_compare(&src_fsc->client->fsid,
2907 &dst_fsc->client->fsid)) {
2908 dout("Copying files across clusters: src: %pU dst: %pU\n",
2909 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2910 return -EXDEV;
2911 }
2912 }
2913 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2914 return -EROFS;
2915
2916 /*
2917 * Some of the checks below will return -EOPNOTSUPP, which will force a
2918 * fallback to the default VFS copy_file_range implementation. This is
2919 * desirable in several cases (for ex, the 'len' is smaller than the
2920 * size of the objects, or in cases where that would be more
2921 * efficient).
2922 */
2923
2924 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2925 return -EOPNOTSUPP;
2926
2927 if (!src_fsc->have_copy_from2)
2928 return -EOPNOTSUPP;
2929
2930 /*
2931 * Striped file layouts require that we copy partial objects, but the
2932 * OSD copy-from operation only supports full-object copies. Limit
2933 * this to non-striped file layouts for now.
2934 */
2935 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2936 (src_ci->i_layout.stripe_count != 1) ||
2937 (dst_ci->i_layout.stripe_count != 1) ||
2938 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2939 doutc(cl, "Invalid src/dst files layout\n");
2940 return -EOPNOTSUPP;
2941 }
2942
2943 /* Every encrypted inode gets its own key, so we can't offload them */
2944 if (IS_ENCRYPTED(src_inode) || IS_ENCRYPTED(dst_inode))
2945 return -EOPNOTSUPP;
2946
2947 if (len < src_ci->i_layout.object_size)
2948 return -EOPNOTSUPP; /* no remote copy will be done */
2949
2950 prealloc_cf = ceph_alloc_cap_flush();
2951 if (!prealloc_cf)
2952 return -ENOMEM;
2953
2954 /* Start by sync'ing the source and destination files */
2955 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2956 if (ret < 0) {
2957 doutc(cl, "failed to write src file (%zd)\n", ret);
2958 goto out;
2959 }
2960 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2961 if (ret < 0) {
2962 doutc(cl, "failed to write dst file (%zd)\n", ret);
2963 goto out;
2964 }
2965
2966 /*
2967 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2968 * clients may have dirty data in their caches. And OSDs know nothing
2969 * about caps, so they can't safely do the remote object copies.
2970 */
2971 err = get_rd_wr_caps(src_file, &src_got,
2972 dst_file, (dst_off + len), &dst_got);
2973 if (err < 0) {
2974 doutc(cl, "get_rd_wr_caps returned %d\n", err);
2975 ret = -EOPNOTSUPP;
2976 goto out;
2977 }
2978
2979 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2980 if (ret < 0)
2981 goto out_caps;
2982
2983 /* Drop dst file cached pages */
2984 ceph_fscache_invalidate(dst_inode, false);
2985 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2986 dst_off >> PAGE_SHIFT,
2987 (dst_off + len) >> PAGE_SHIFT);
2988 if (ret < 0) {
2989 doutc(cl, "Failed to invalidate inode pages (%zd)\n",
2990 ret);
2991 ret = 0; /* XXX */
2992 }
2993 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2994 src_ci->i_layout.object_size,
2995 &src_objnum, &src_objoff, &src_objlen);
2996 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2997 dst_ci->i_layout.object_size,
2998 &dst_objnum, &dst_objoff, &dst_objlen);
2999 /* object-level offsets need to the same */
3000 if (src_objoff != dst_objoff) {
3001 ret = -EOPNOTSUPP;
3002 goto out_caps;
3003 }
3004
3005 /*
3006 * Do a manual copy if the object offset isn't object aligned.
3007 * 'src_objlen' contains the bytes left until the end of the object,
3008 * starting at the src_off
3009 */
3010 if (src_objoff) {
3011 doutc(cl, "Initial partial copy of %u bytes\n", src_objlen);
3012
3013 /*
3014 * we need to temporarily drop all caps as we'll be calling
3015 * {read,write}_iter, which will get caps again.
3016 */
3017 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
3018 ret = splice_file_range(src_file, &src_off, dst_file, &dst_off,
3019 src_objlen);
3020 /* Abort on short copies or on error */
3021 if (ret < (long)src_objlen) {
3022 doutc(cl, "Failed partial copy (%zd)\n", ret);
3023 goto out;
3024 }
3025 len -= ret;
3026 err = get_rd_wr_caps(src_file, &src_got,
3027 dst_file, (dst_off + len), &dst_got);
3028 if (err < 0)
3029 goto out;
3030 err = is_file_size_ok(src_inode, dst_inode,
3031 src_off, dst_off, len);
3032 if (err < 0)
3033 goto out_caps;
3034 }
3035
3036 size = i_size_read(dst_inode);
3037 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
3038 src_fsc, len, flags);
3039 if (bytes <= 0) {
3040 if (!ret)
3041 ret = bytes;
3042 goto out_caps;
3043 }
3044 doutc(cl, "Copied %zu bytes out of %zu\n", bytes, len);
3045 len -= bytes;
3046 ret += bytes;
3047
3048 file_update_time(dst_file);
3049 inode_inc_iversion_raw(dst_inode);
3050
3051 if (dst_off > size) {
3052 /* Let the MDS know about dst file size change */
3053 if (ceph_inode_set_size(dst_inode, dst_off) ||
3054 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
3055 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
3056 }
3057 /* Mark Fw dirty */
3058 spin_lock(&dst_ci->i_ceph_lock);
3059 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
3060 spin_unlock(&dst_ci->i_ceph_lock);
3061 if (dirty)
3062 __mark_inode_dirty(dst_inode, dirty);
3063
3064out_caps:
3065 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
3066
3067 /*
3068 * Do the final manual copy if we still have some bytes left, unless
3069 * there were errors in remote object copies (len >= object_size).
3070 */
3071 if (len && (len < src_ci->i_layout.object_size)) {
3072 doutc(cl, "Final partial copy of %zu bytes\n", len);
3073 bytes = splice_file_range(src_file, &src_off, dst_file,
3074 &dst_off, len);
3075 if (bytes > 0)
3076 ret += bytes;
3077 else
3078 doutc(cl, "Failed partial copy (%zd)\n", bytes);
3079 }
3080
3081out:
3082 ceph_free_cap_flush(prealloc_cf);
3083
3084 return ret;
3085}
3086
3087static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
3088 struct file *dst_file, loff_t dst_off,
3089 size_t len, unsigned int flags)
3090{
3091 ssize_t ret;
3092
3093 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
3094 len, flags);
3095
3096 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3097 ret = splice_copy_file_range(src_file, src_off, dst_file,
3098 dst_off, len);
3099 return ret;
3100}
3101
3102const struct file_operations ceph_file_fops = {
3103 .open = ceph_open,
3104 .release = ceph_release,
3105 .llseek = ceph_llseek,
3106 .read_iter = ceph_read_iter,
3107 .write_iter = ceph_write_iter,
3108 .mmap = ceph_mmap,
3109 .fsync = ceph_fsync,
3110 .lock = ceph_lock,
3111 .setlease = simple_nosetlease,
3112 .flock = ceph_flock,
3113 .splice_read = ceph_splice_read,
3114 .splice_write = iter_file_splice_write,
3115 .unlocked_ioctl = ceph_ioctl,
3116 .compat_ioctl = compat_ptr_ioctl,
3117 .fallocate = ceph_fallocate,
3118 .copy_file_range = ceph_copy_file_range,
3119};