Loading...
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/mount.h>
8#include <linux/namei.h>
9#include <linux/writeback.h>
10#include <linux/aio.h>
11#include <linux/falloc.h>
12
13#include "super.h"
14#include "mds_client.h"
15#include "cache.h"
16
17/*
18 * Ceph file operations
19 *
20 * Implement basic open/close functionality, and implement
21 * read/write.
22 *
23 * We implement three modes of file I/O:
24 * - buffered uses the generic_file_aio_{read,write} helpers
25 *
26 * - synchronous is used when there is multi-client read/write
27 * sharing, avoids the page cache, and synchronously waits for an
28 * ack from the OSD.
29 *
30 * - direct io takes the variant of the sync path that references
31 * user pages directly.
32 *
33 * fsync() flushes and waits on dirty pages, but just queues metadata
34 * for writeback: since the MDS can recover size and mtime there is no
35 * need to wait for MDS acknowledgement.
36 */
37
38
39/*
40 * Prepare an open request. Preallocate ceph_cap to avoid an
41 * inopportune ENOMEM later.
42 */
43static struct ceph_mds_request *
44prepare_open_request(struct super_block *sb, int flags, int create_mode)
45{
46 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
47 struct ceph_mds_client *mdsc = fsc->mdsc;
48 struct ceph_mds_request *req;
49 int want_auth = USE_ANY_MDS;
50 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
51
52 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
53 want_auth = USE_AUTH_MDS;
54
55 req = ceph_mdsc_create_request(mdsc, op, want_auth);
56 if (IS_ERR(req))
57 goto out;
58 req->r_fmode = ceph_flags_to_mode(flags);
59 req->r_args.open.flags = cpu_to_le32(flags);
60 req->r_args.open.mode = cpu_to_le32(create_mode);
61out:
62 return req;
63}
64
65/*
66 * initialize private struct file data.
67 * if we fail, clean up by dropping fmode reference on the ceph_inode
68 */
69static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
70{
71 struct ceph_file_info *cf;
72 int ret = 0;
73 struct ceph_inode_info *ci = ceph_inode(inode);
74 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
75 struct ceph_mds_client *mdsc = fsc->mdsc;
76
77 switch (inode->i_mode & S_IFMT) {
78 case S_IFREG:
79 /* First file open request creates the cookie, we want to keep
80 * this cookie around for the filetime of the inode as not to
81 * have to worry about fscache register / revoke / operation
82 * races.
83 *
84 * Also, if we know the operation is going to invalidate data
85 * (non readonly) just nuke the cache right away.
86 */
87 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
88 if ((fmode & CEPH_FILE_MODE_WR))
89 ceph_fscache_invalidate(inode);
90 case S_IFDIR:
91 dout("init_file %p %p 0%o (regular)\n", inode, file,
92 inode->i_mode);
93 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
94 if (cf == NULL) {
95 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
96 return -ENOMEM;
97 }
98 cf->fmode = fmode;
99 cf->next_offset = 2;
100 file->private_data = cf;
101 BUG_ON(inode->i_fop->release != ceph_release);
102 break;
103
104 case S_IFLNK:
105 dout("init_file %p %p 0%o (symlink)\n", inode, file,
106 inode->i_mode);
107 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
108 break;
109
110 default:
111 dout("init_file %p %p 0%o (special)\n", inode, file,
112 inode->i_mode);
113 /*
114 * we need to drop the open ref now, since we don't
115 * have .release set to ceph_release.
116 */
117 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
118 BUG_ON(inode->i_fop->release == ceph_release);
119
120 /* call the proper open fop */
121 ret = inode->i_fop->open(inode, file);
122 }
123 return ret;
124}
125
126/*
127 * If we already have the requisite capabilities, we can satisfy
128 * the open request locally (no need to request new caps from the
129 * MDS). We do, however, need to inform the MDS (asynchronously)
130 * if our wanted caps set expands.
131 */
132int ceph_open(struct inode *inode, struct file *file)
133{
134 struct ceph_inode_info *ci = ceph_inode(inode);
135 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
136 struct ceph_mds_client *mdsc = fsc->mdsc;
137 struct ceph_mds_request *req;
138 struct ceph_file_info *cf = file->private_data;
139 struct inode *parent_inode = NULL;
140 int err;
141 int flags, fmode, wanted;
142
143 if (cf) {
144 dout("open file %p is already opened\n", file);
145 return 0;
146 }
147
148 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
149 flags = file->f_flags & ~(O_CREAT|O_EXCL);
150 if (S_ISDIR(inode->i_mode))
151 flags = O_DIRECTORY; /* mds likes to know */
152
153 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
154 ceph_vinop(inode), file, flags, file->f_flags);
155 fmode = ceph_flags_to_mode(flags);
156 wanted = ceph_caps_for_mode(fmode);
157
158 /* snapped files are read-only */
159 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
160 return -EROFS;
161
162 /* trivially open snapdir */
163 if (ceph_snap(inode) == CEPH_SNAPDIR) {
164 spin_lock(&ci->i_ceph_lock);
165 __ceph_get_fmode(ci, fmode);
166 spin_unlock(&ci->i_ceph_lock);
167 return ceph_init_file(inode, file, fmode);
168 }
169
170 /*
171 * No need to block if we have caps on the auth MDS (for
172 * write) or any MDS (for read). Update wanted set
173 * asynchronously.
174 */
175 spin_lock(&ci->i_ceph_lock);
176 if (__ceph_is_any_real_caps(ci) &&
177 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
178 int mds_wanted = __ceph_caps_mds_wanted(ci);
179 int issued = __ceph_caps_issued(ci, NULL);
180
181 dout("open %p fmode %d want %s issued %s using existing\n",
182 inode, fmode, ceph_cap_string(wanted),
183 ceph_cap_string(issued));
184 __ceph_get_fmode(ci, fmode);
185 spin_unlock(&ci->i_ceph_lock);
186
187 /* adjust wanted? */
188 if ((issued & wanted) != wanted &&
189 (mds_wanted & wanted) != wanted &&
190 ceph_snap(inode) != CEPH_SNAPDIR)
191 ceph_check_caps(ci, 0, NULL);
192
193 return ceph_init_file(inode, file, fmode);
194 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
195 (ci->i_snap_caps & wanted) == wanted) {
196 __ceph_get_fmode(ci, fmode);
197 spin_unlock(&ci->i_ceph_lock);
198 return ceph_init_file(inode, file, fmode);
199 }
200
201 spin_unlock(&ci->i_ceph_lock);
202
203 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
204 req = prepare_open_request(inode->i_sb, flags, 0);
205 if (IS_ERR(req)) {
206 err = PTR_ERR(req);
207 goto out;
208 }
209 req->r_inode = inode;
210 ihold(inode);
211
212 req->r_num_caps = 1;
213 if (flags & O_CREAT)
214 parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
215 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
216 iput(parent_inode);
217 if (!err)
218 err = ceph_init_file(inode, file, req->r_fmode);
219 ceph_mdsc_put_request(req);
220 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
221out:
222 return err;
223}
224
225
226/*
227 * Do a lookup + open with a single request. If we get a non-existent
228 * file or symlink, return 1 so the VFS can retry.
229 */
230int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
231 struct file *file, unsigned flags, umode_t mode,
232 int *opened)
233{
234 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
235 struct ceph_mds_client *mdsc = fsc->mdsc;
236 struct ceph_mds_request *req;
237 struct dentry *dn;
238 int err;
239
240 dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n",
241 dir, dentry, dentry->d_name.len, dentry->d_name.name,
242 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
243
244 if (dentry->d_name.len > NAME_MAX)
245 return -ENAMETOOLONG;
246
247 err = ceph_init_dentry(dentry);
248 if (err < 0)
249 return err;
250
251 /* do the open */
252 req = prepare_open_request(dir->i_sb, flags, mode);
253 if (IS_ERR(req))
254 return PTR_ERR(req);
255 req->r_dentry = dget(dentry);
256 req->r_num_caps = 2;
257 if (flags & O_CREAT) {
258 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
259 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
260 }
261 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
262 err = ceph_mdsc_do_request(mdsc,
263 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
264 req);
265 if (err)
266 goto out_err;
267
268 err = ceph_handle_snapdir(req, dentry, err);
269 if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
270 err = ceph_handle_notrace_create(dir, dentry);
271
272 if (d_unhashed(dentry)) {
273 dn = ceph_finish_lookup(req, dentry, err);
274 if (IS_ERR(dn))
275 err = PTR_ERR(dn);
276 } else {
277 /* we were given a hashed negative dentry */
278 dn = NULL;
279 }
280 if (err)
281 goto out_err;
282 if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
283 /* make vfs retry on splice, ENOENT, or symlink */
284 dout("atomic_open finish_no_open on dn %p\n", dn);
285 err = finish_no_open(file, dn);
286 } else {
287 dout("atomic_open finish_open on dn %p\n", dn);
288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
289 ceph_init_acl(dentry, dentry->d_inode, dir);
290 *opened |= FILE_CREATED;
291 }
292 err = finish_open(file, dentry, ceph_open, opened);
293 }
294out_err:
295 if (!req->r_err && req->r_target_inode)
296 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
297 ceph_mdsc_put_request(req);
298 dout("atomic_open result=%d\n", err);
299 return err;
300}
301
302int ceph_release(struct inode *inode, struct file *file)
303{
304 struct ceph_inode_info *ci = ceph_inode(inode);
305 struct ceph_file_info *cf = file->private_data;
306
307 dout("release inode %p file %p\n", inode, file);
308 ceph_put_fmode(ci, cf->fmode);
309 if (cf->last_readdir)
310 ceph_mdsc_put_request(cf->last_readdir);
311 kfree(cf->last_name);
312 kfree(cf->dir_info);
313 dput(cf->dentry);
314 kmem_cache_free(ceph_file_cachep, cf);
315
316 /* wake up anyone waiting for caps on this inode */
317 wake_up_all(&ci->i_cap_wq);
318 return 0;
319}
320
321/*
322 * Read a range of bytes striped over one or more objects. Iterate over
323 * objects we stripe over. (That's not atomic, but good enough for now.)
324 *
325 * If we get a short result from the OSD, check against i_size; we need to
326 * only return a short read to the caller if we hit EOF.
327 */
328static int striped_read(struct inode *inode,
329 u64 off, u64 len,
330 struct page **pages, int num_pages,
331 int *checkeof, bool o_direct,
332 unsigned long buf_align)
333{
334 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
335 struct ceph_inode_info *ci = ceph_inode(inode);
336 u64 pos, this_len, left;
337 int io_align, page_align;
338 int pages_left;
339 int read;
340 struct page **page_pos;
341 int ret;
342 bool hit_stripe, was_short;
343
344 /*
345 * we may need to do multiple reads. not atomic, unfortunately.
346 */
347 pos = off;
348 left = len;
349 page_pos = pages;
350 pages_left = num_pages;
351 read = 0;
352 io_align = off & ~PAGE_MASK;
353
354more:
355 if (o_direct)
356 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
357 else
358 page_align = pos & ~PAGE_MASK;
359 this_len = left;
360 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
361 &ci->i_layout, pos, &this_len,
362 ci->i_truncate_seq,
363 ci->i_truncate_size,
364 page_pos, pages_left, page_align);
365 if (ret == -ENOENT)
366 ret = 0;
367 hit_stripe = this_len < left;
368 was_short = ret >= 0 && ret < this_len;
369 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
370 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
371
372 if (ret >= 0) {
373 int didpages;
374 if (was_short && (pos + ret < inode->i_size)) {
375 u64 tmp = min(this_len - ret,
376 inode->i_size - pos - ret);
377 dout(" zero gap %llu to %llu\n",
378 pos + ret, pos + ret + tmp);
379 ceph_zero_page_vector_range(page_align + read + ret,
380 tmp, pages);
381 ret += tmp;
382 }
383
384 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
385 pos += ret;
386 read = pos - off;
387 left -= ret;
388 page_pos += didpages;
389 pages_left -= didpages;
390
391 /* hit stripe and need continue*/
392 if (left && hit_stripe && pos < inode->i_size)
393 goto more;
394 }
395
396 if (read > 0) {
397 ret = read;
398 /* did we bounce off eof? */
399 if (pos + left > inode->i_size)
400 *checkeof = 1;
401 }
402
403 dout("striped_read returns %d\n", ret);
404 return ret;
405}
406
407/*
408 * Completely synchronous read and write methods. Direct from __user
409 * buffer to osd, or directly to user pages (if O_DIRECT).
410 *
411 * If the read spans object boundary, just do multiple reads.
412 */
413static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
414 int *checkeof)
415{
416 struct file *file = iocb->ki_filp;
417 struct inode *inode = file_inode(file);
418 struct page **pages;
419 u64 off = iocb->ki_pos;
420 int num_pages, ret;
421 size_t len = i->count;
422
423 dout("sync_read on file %p %llu~%u %s\n", file, off,
424 (unsigned)len,
425 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
426 /*
427 * flush any page cache pages in this range. this
428 * will make concurrent normal and sync io slow,
429 * but it will at least behave sensibly when they are
430 * in sequence.
431 */
432 ret = filemap_write_and_wait_range(inode->i_mapping, off,
433 off + len);
434 if (ret < 0)
435 return ret;
436
437 if (file->f_flags & O_DIRECT) {
438 while (iov_iter_count(i)) {
439 void __user *data = i->iov[0].iov_base + i->iov_offset;
440 size_t len = i->iov[0].iov_len - i->iov_offset;
441
442 num_pages = calc_pages_for((unsigned long)data, len);
443 pages = ceph_get_direct_page_vector(data,
444 num_pages, true);
445 if (IS_ERR(pages))
446 return PTR_ERR(pages);
447
448 ret = striped_read(inode, off, len,
449 pages, num_pages, checkeof,
450 1, (unsigned long)data & ~PAGE_MASK);
451 ceph_put_page_vector(pages, num_pages, true);
452
453 if (ret <= 0)
454 break;
455 off += ret;
456 iov_iter_advance(i, ret);
457 if (ret < len)
458 break;
459 }
460 } else {
461 num_pages = calc_pages_for(off, len);
462 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
463 if (IS_ERR(pages))
464 return PTR_ERR(pages);
465 ret = striped_read(inode, off, len, pages,
466 num_pages, checkeof, 0, 0);
467 if (ret > 0) {
468 int l, k = 0;
469 size_t left = len = ret;
470
471 while (left) {
472 void __user *data = i->iov[0].iov_base
473 + i->iov_offset;
474 l = min(i->iov[0].iov_len - i->iov_offset,
475 left);
476
477 ret = ceph_copy_page_vector_to_user(&pages[k],
478 data, off,
479 l);
480 if (ret > 0) {
481 iov_iter_advance(i, ret);
482 left -= ret;
483 off += ret;
484 k = calc_pages_for(iocb->ki_pos,
485 len - left + 1) - 1;
486 BUG_ON(k >= num_pages && left);
487 } else
488 break;
489 }
490 }
491 ceph_release_page_vector(pages, num_pages);
492 }
493
494 if (off > iocb->ki_pos) {
495 ret = off - iocb->ki_pos;
496 iocb->ki_pos = off;
497 }
498
499 dout("sync_read result %d\n", ret);
500 return ret;
501}
502
503/*
504 * Write commit request unsafe callback, called to tell us when a
505 * request is unsafe (that is, in flight--has been handed to the
506 * messenger to send to its target osd). It is called again when
507 * we've received a response message indicating the request is
508 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
509 * is completed early (and unsuccessfully) due to a timeout or
510 * interrupt.
511 *
512 * This is used if we requested both an ACK and ONDISK commit reply
513 * from the OSD.
514 */
515static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
516{
517 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
518
519 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
520 unsafe ? "un" : "");
521 if (unsafe) {
522 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
523 spin_lock(&ci->i_unsafe_lock);
524 list_add_tail(&req->r_unsafe_item,
525 &ci->i_unsafe_writes);
526 spin_unlock(&ci->i_unsafe_lock);
527 } else {
528 spin_lock(&ci->i_unsafe_lock);
529 list_del_init(&req->r_unsafe_item);
530 spin_unlock(&ci->i_unsafe_lock);
531 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
532 }
533}
534
535
536/*
537 * Synchronous write, straight from __user pointer or user pages.
538 *
539 * If write spans object boundary, just do multiple writes. (For a
540 * correct atomic write, we should e.g. take write locks on all
541 * objects, rollback on failure, etc.)
542 */
543static ssize_t
544ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
545 unsigned long nr_segs, size_t count)
546{
547 struct file *file = iocb->ki_filp;
548 struct inode *inode = file_inode(file);
549 struct ceph_inode_info *ci = ceph_inode(inode);
550 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
551 struct ceph_snap_context *snapc;
552 struct ceph_vino vino;
553 struct ceph_osd_request *req;
554 struct page **pages;
555 int num_pages;
556 int written = 0;
557 int flags;
558 int check_caps = 0;
559 int page_align;
560 int ret;
561 struct timespec mtime = CURRENT_TIME;
562 loff_t pos = iocb->ki_pos;
563 struct iov_iter i;
564
565 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
566 return -EROFS;
567
568 dout("sync_direct_write on file %p %lld~%u\n", file, pos,
569 (unsigned)count);
570
571 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
572 if (ret < 0)
573 return ret;
574
575 ret = invalidate_inode_pages2_range(inode->i_mapping,
576 pos >> PAGE_CACHE_SHIFT,
577 (pos + count) >> PAGE_CACHE_SHIFT);
578 if (ret < 0)
579 dout("invalidate_inode_pages2_range returned %d\n", ret);
580
581 flags = CEPH_OSD_FLAG_ORDERSNAP |
582 CEPH_OSD_FLAG_ONDISK |
583 CEPH_OSD_FLAG_WRITE;
584
585 iov_iter_init(&i, iov, nr_segs, count, 0);
586
587 while (iov_iter_count(&i) > 0) {
588 void __user *data = i.iov->iov_base + i.iov_offset;
589 u64 len = i.iov->iov_len - i.iov_offset;
590
591 page_align = (unsigned long)data & ~PAGE_MASK;
592
593 snapc = ci->i_snap_realm->cached_context;
594 vino = ceph_vino(inode);
595 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
596 vino, pos, &len,
597 2,/*include a 'startsync' command*/
598 CEPH_OSD_OP_WRITE, flags, snapc,
599 ci->i_truncate_seq,
600 ci->i_truncate_size,
601 false);
602 if (IS_ERR(req)) {
603 ret = PTR_ERR(req);
604 break;
605 }
606
607 num_pages = calc_pages_for(page_align, len);
608 pages = ceph_get_direct_page_vector(data, num_pages, false);
609 if (IS_ERR(pages)) {
610 ret = PTR_ERR(pages);
611 goto out;
612 }
613
614 /*
615 * throw out any page cache pages in this range. this
616 * may block.
617 */
618 truncate_inode_pages_range(inode->i_mapping, pos,
619 (pos+len) | (PAGE_CACHE_SIZE-1));
620 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
621 false, false);
622
623 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
624 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
625
626 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
627 if (!ret)
628 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
629
630 ceph_put_page_vector(pages, num_pages, false);
631
632out:
633 ceph_osdc_put_request(req);
634 if (ret == 0) {
635 pos += len;
636 written += len;
637 iov_iter_advance(&i, (size_t)len);
638
639 if (pos > i_size_read(inode)) {
640 check_caps = ceph_inode_set_size(inode, pos);
641 if (check_caps)
642 ceph_check_caps(ceph_inode(inode),
643 CHECK_CAPS_AUTHONLY,
644 NULL);
645 }
646 } else
647 break;
648 }
649
650 if (ret != -EOLDSNAPC && written > 0) {
651 iocb->ki_pos = pos;
652 ret = written;
653 }
654 return ret;
655}
656
657
658/*
659 * Synchronous write, straight from __user pointer or user pages.
660 *
661 * If write spans object boundary, just do multiple writes. (For a
662 * correct atomic write, we should e.g. take write locks on all
663 * objects, rollback on failure, etc.)
664 */
665static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
666 unsigned long nr_segs, size_t count)
667{
668 struct file *file = iocb->ki_filp;
669 struct inode *inode = file_inode(file);
670 struct ceph_inode_info *ci = ceph_inode(inode);
671 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
672 struct ceph_snap_context *snapc;
673 struct ceph_vino vino;
674 struct ceph_osd_request *req;
675 struct page **pages;
676 u64 len;
677 int num_pages;
678 int written = 0;
679 int flags;
680 int check_caps = 0;
681 int ret;
682 struct timespec mtime = CURRENT_TIME;
683 loff_t pos = iocb->ki_pos;
684 struct iov_iter i;
685
686 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
687 return -EROFS;
688
689 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
690
691 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
692 if (ret < 0)
693 return ret;
694
695 ret = invalidate_inode_pages2_range(inode->i_mapping,
696 pos >> PAGE_CACHE_SHIFT,
697 (pos + count) >> PAGE_CACHE_SHIFT);
698 if (ret < 0)
699 dout("invalidate_inode_pages2_range returned %d\n", ret);
700
701 flags = CEPH_OSD_FLAG_ORDERSNAP |
702 CEPH_OSD_FLAG_ONDISK |
703 CEPH_OSD_FLAG_WRITE |
704 CEPH_OSD_FLAG_ACK;
705
706 iov_iter_init(&i, iov, nr_segs, count, 0);
707
708 while ((len = iov_iter_count(&i)) > 0) {
709 size_t left;
710 int n;
711
712 snapc = ci->i_snap_realm->cached_context;
713 vino = ceph_vino(inode);
714 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
715 vino, pos, &len, 1,
716 CEPH_OSD_OP_WRITE, flags, snapc,
717 ci->i_truncate_seq,
718 ci->i_truncate_size,
719 false);
720 if (IS_ERR(req)) {
721 ret = PTR_ERR(req);
722 break;
723 }
724
725 /*
726 * write from beginning of first page,
727 * regardless of io alignment
728 */
729 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
730
731 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
732 if (IS_ERR(pages)) {
733 ret = PTR_ERR(pages);
734 goto out;
735 }
736
737 left = len;
738 for (n = 0; n < num_pages; n++) {
739 size_t plen = min_t(size_t, left, PAGE_SIZE);
740 ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
741 if (ret != plen) {
742 ret = -EFAULT;
743 break;
744 }
745 left -= ret;
746 iov_iter_advance(&i, ret);
747 }
748
749 if (ret < 0) {
750 ceph_release_page_vector(pages, num_pages);
751 goto out;
752 }
753
754 /* get a second commit callback */
755 req->r_unsafe_callback = ceph_sync_write_unsafe;
756 req->r_inode = inode;
757
758 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
759 false, true);
760
761 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
762 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
763
764 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
765 if (!ret)
766 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
767
768out:
769 ceph_osdc_put_request(req);
770 if (ret == 0) {
771 pos += len;
772 written += len;
773
774 if (pos > i_size_read(inode)) {
775 check_caps = ceph_inode_set_size(inode, pos);
776 if (check_caps)
777 ceph_check_caps(ceph_inode(inode),
778 CHECK_CAPS_AUTHONLY,
779 NULL);
780 }
781 } else
782 break;
783 }
784
785 if (ret != -EOLDSNAPC && written > 0) {
786 ret = written;
787 iocb->ki_pos = pos;
788 }
789 return ret;
790}
791
792/*
793 * Wrap generic_file_aio_read with checks for cap bits on the inode.
794 * Atomically grab references, so that those bits are not released
795 * back to the MDS mid-read.
796 *
797 * Hmm, the sync read case isn't actually async... should it be?
798 */
799static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
800 unsigned long nr_segs, loff_t pos)
801{
802 struct file *filp = iocb->ki_filp;
803 struct ceph_file_info *fi = filp->private_data;
804 size_t len = iocb->ki_nbytes;
805 struct inode *inode = file_inode(filp);
806 struct ceph_inode_info *ci = ceph_inode(inode);
807 ssize_t ret;
808 int want, got = 0;
809 int checkeof = 0, read = 0;
810
811again:
812 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
813 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
814
815 if (fi->fmode & CEPH_FILE_MODE_LAZY)
816 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
817 else
818 want = CEPH_CAP_FILE_CACHE;
819 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
820 if (ret < 0)
821 return ret;
822
823 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
824 (iocb->ki_filp->f_flags & O_DIRECT) ||
825 (fi->flags & CEPH_F_SYNC)) {
826 struct iov_iter i;
827
828 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
829 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
830 ceph_cap_string(got));
831
832 if (!read) {
833 ret = generic_segment_checks(iov, &nr_segs,
834 &len, VERIFY_WRITE);
835 if (ret)
836 goto out;
837 }
838
839 iov_iter_init(&i, iov, nr_segs, len, read);
840
841 /* hmm, this isn't really async... */
842 ret = ceph_sync_read(iocb, &i, &checkeof);
843 } else {
844 /*
845 * We can't modify the content of iov,
846 * so we only read from beginning.
847 */
848 if (read) {
849 iocb->ki_pos = pos;
850 len = iocb->ki_nbytes;
851 read = 0;
852 }
853 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
854 inode, ceph_vinop(inode), pos, (unsigned)len,
855 ceph_cap_string(got));
856
857 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
858 }
859out:
860 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
861 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
862 ceph_put_cap_refs(ci, got);
863
864 if (checkeof && ret >= 0) {
865 int statret = ceph_do_getattr(inode,
866 CEPH_STAT_CAP_SIZE);
867
868 /* hit EOF or hole? */
869 if (statret == 0 && iocb->ki_pos < inode->i_size &&
870 ret < len) {
871 dout("sync_read hit hole, ppos %lld < size %lld"
872 ", reading more\n", iocb->ki_pos,
873 inode->i_size);
874
875 read += ret;
876 len -= ret;
877 checkeof = 0;
878 goto again;
879 }
880 }
881
882 if (ret >= 0)
883 ret += read;
884
885 return ret;
886}
887
888/*
889 * Take cap references to avoid releasing caps to MDS mid-write.
890 *
891 * If we are synchronous, and write with an old snap context, the OSD
892 * may return EOLDSNAPC. In that case, retry the write.. _after_
893 * dropping our cap refs and allowing the pending snap to logically
894 * complete _before_ this write occurs.
895 *
896 * If we are near ENOSPC, write synchronously.
897 */
898static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
899 unsigned long nr_segs, loff_t pos)
900{
901 struct file *file = iocb->ki_filp;
902 struct ceph_file_info *fi = file->private_data;
903 struct inode *inode = file_inode(file);
904 struct ceph_inode_info *ci = ceph_inode(inode);
905 struct ceph_osd_client *osdc =
906 &ceph_sb_to_client(inode->i_sb)->client->osdc;
907 ssize_t count, written = 0;
908 int err, want, got;
909
910 if (ceph_snap(inode) != CEPH_NOSNAP)
911 return -EROFS;
912
913 mutex_lock(&inode->i_mutex);
914
915 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
916 if (err)
917 goto out;
918
919 /* We can write back this queue in page reclaim */
920 current->backing_dev_info = file->f_mapping->backing_dev_info;
921
922 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
923 if (err)
924 goto out;
925
926 if (count == 0)
927 goto out;
928
929 err = file_remove_suid(file);
930 if (err)
931 goto out;
932
933 err = file_update_time(file);
934 if (err)
935 goto out;
936
937retry_snap:
938 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
939 err = -ENOSPC;
940 goto out;
941 }
942
943 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
944 inode, ceph_vinop(inode), pos, count, inode->i_size);
945 if (fi->fmode & CEPH_FILE_MODE_LAZY)
946 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
947 else
948 want = CEPH_CAP_FILE_BUFFER;
949 got = 0;
950 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count);
951 if (err < 0)
952 goto out;
953
954 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
955 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
956
957 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
958 (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
959 mutex_unlock(&inode->i_mutex);
960 if (file->f_flags & O_DIRECT)
961 written = ceph_sync_direct_write(iocb, iov,
962 nr_segs, count);
963 else
964 written = ceph_sync_write(iocb, iov, nr_segs, count);
965 if (written == -EOLDSNAPC) {
966 dout("aio_write %p %llx.%llx %llu~%u"
967 "got EOLDSNAPC, retrying\n",
968 inode, ceph_vinop(inode),
969 pos, (unsigned)iov->iov_len);
970 mutex_lock(&inode->i_mutex);
971 goto retry_snap;
972 }
973 } else {
974 loff_t old_size = inode->i_size;
975 struct iov_iter from;
976 /*
977 * No need to acquire the i_truncate_mutex. Because
978 * the MDS revokes Fwb caps before sending truncate
979 * message to us. We can't get Fwb cap while there
980 * are pending vmtruncate. So write and vmtruncate
981 * can not run at the same time
982 */
983 iov_iter_init(&from, iov, nr_segs, count, 0);
984 written = generic_perform_write(file, &from, pos);
985 if (likely(written >= 0))
986 iocb->ki_pos = pos + written;
987 if (inode->i_size > old_size)
988 ceph_fscache_update_objectsize(inode);
989 mutex_unlock(&inode->i_mutex);
990 }
991
992 if (written >= 0) {
993 int dirty;
994 spin_lock(&ci->i_ceph_lock);
995 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
996 spin_unlock(&ci->i_ceph_lock);
997 if (dirty)
998 __mark_inode_dirty(inode, dirty);
999 }
1000
1001 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1002 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
1003 ceph_cap_string(got));
1004 ceph_put_cap_refs(ci, got);
1005
1006 if (written >= 0 &&
1007 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1008 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
1009 err = vfs_fsync_range(file, pos, pos + written - 1, 1);
1010 if (err < 0)
1011 written = err;
1012 }
1013
1014 goto out_unlocked;
1015
1016out:
1017 mutex_unlock(&inode->i_mutex);
1018out_unlocked:
1019 current->backing_dev_info = NULL;
1020 return written ? written : err;
1021}
1022
1023/*
1024 * llseek. be sure to verify file size on SEEK_END.
1025 */
1026static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1027{
1028 struct inode *inode = file->f_mapping->host;
1029 int ret;
1030
1031 mutex_lock(&inode->i_mutex);
1032
1033 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1034 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
1035 if (ret < 0) {
1036 offset = ret;
1037 goto out;
1038 }
1039 }
1040
1041 switch (whence) {
1042 case SEEK_END:
1043 offset += inode->i_size;
1044 break;
1045 case SEEK_CUR:
1046 /*
1047 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1048 * position-querying operation. Avoid rewriting the "same"
1049 * f_pos value back to the file because a concurrent read(),
1050 * write() or lseek() might have altered it
1051 */
1052 if (offset == 0) {
1053 offset = file->f_pos;
1054 goto out;
1055 }
1056 offset += file->f_pos;
1057 break;
1058 case SEEK_DATA:
1059 if (offset >= inode->i_size) {
1060 ret = -ENXIO;
1061 goto out;
1062 }
1063 break;
1064 case SEEK_HOLE:
1065 if (offset >= inode->i_size) {
1066 ret = -ENXIO;
1067 goto out;
1068 }
1069 offset = inode->i_size;
1070 break;
1071 }
1072
1073 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1074
1075out:
1076 mutex_unlock(&inode->i_mutex);
1077 return offset;
1078}
1079
1080static inline void ceph_zero_partial_page(
1081 struct inode *inode, loff_t offset, unsigned size)
1082{
1083 struct page *page;
1084 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
1085
1086 page = find_lock_page(inode->i_mapping, index);
1087 if (page) {
1088 wait_on_page_writeback(page);
1089 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
1090 unlock_page(page);
1091 page_cache_release(page);
1092 }
1093}
1094
1095static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1096 loff_t length)
1097{
1098 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
1099 if (offset < nearly) {
1100 loff_t size = nearly - offset;
1101 if (length < size)
1102 size = length;
1103 ceph_zero_partial_page(inode, offset, size);
1104 offset += size;
1105 length -= size;
1106 }
1107 if (length >= PAGE_CACHE_SIZE) {
1108 loff_t size = round_down(length, PAGE_CACHE_SIZE);
1109 truncate_pagecache_range(inode, offset, offset + size - 1);
1110 offset += size;
1111 length -= size;
1112 }
1113 if (length)
1114 ceph_zero_partial_page(inode, offset, length);
1115}
1116
1117static int ceph_zero_partial_object(struct inode *inode,
1118 loff_t offset, loff_t *length)
1119{
1120 struct ceph_inode_info *ci = ceph_inode(inode);
1121 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1122 struct ceph_osd_request *req;
1123 int ret = 0;
1124 loff_t zero = 0;
1125 int op;
1126
1127 if (!length) {
1128 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1129 length = &zero;
1130 } else {
1131 op = CEPH_OSD_OP_ZERO;
1132 }
1133
1134 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1135 ceph_vino(inode),
1136 offset, length,
1137 1, op,
1138 CEPH_OSD_FLAG_WRITE |
1139 CEPH_OSD_FLAG_ONDISK,
1140 NULL, 0, 0, false);
1141 if (IS_ERR(req)) {
1142 ret = PTR_ERR(req);
1143 goto out;
1144 }
1145
1146 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1147 &inode->i_mtime);
1148
1149 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1150 if (!ret) {
1151 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1152 if (ret == -ENOENT)
1153 ret = 0;
1154 }
1155 ceph_osdc_put_request(req);
1156
1157out:
1158 return ret;
1159}
1160
1161static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1162{
1163 int ret = 0;
1164 struct ceph_inode_info *ci = ceph_inode(inode);
1165 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1166 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1167 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1168 u64 object_set_size = object_size * stripe_count;
1169 u64 nearly, t;
1170
1171 /* round offset up to next period boundary */
1172 nearly = offset + object_set_size - 1;
1173 t = nearly;
1174 nearly -= do_div(t, object_set_size);
1175
1176 while (length && offset < nearly) {
1177 loff_t size = length;
1178 ret = ceph_zero_partial_object(inode, offset, &size);
1179 if (ret < 0)
1180 return ret;
1181 offset += size;
1182 length -= size;
1183 }
1184 while (length >= object_set_size) {
1185 int i;
1186 loff_t pos = offset;
1187 for (i = 0; i < stripe_count; ++i) {
1188 ret = ceph_zero_partial_object(inode, pos, NULL);
1189 if (ret < 0)
1190 return ret;
1191 pos += stripe_unit;
1192 }
1193 offset += object_set_size;
1194 length -= object_set_size;
1195 }
1196 while (length) {
1197 loff_t size = length;
1198 ret = ceph_zero_partial_object(inode, offset, &size);
1199 if (ret < 0)
1200 return ret;
1201 offset += size;
1202 length -= size;
1203 }
1204 return ret;
1205}
1206
1207static long ceph_fallocate(struct file *file, int mode,
1208 loff_t offset, loff_t length)
1209{
1210 struct ceph_file_info *fi = file->private_data;
1211 struct inode *inode = file_inode(file);
1212 struct ceph_inode_info *ci = ceph_inode(inode);
1213 struct ceph_osd_client *osdc =
1214 &ceph_inode_to_client(inode)->client->osdc;
1215 int want, got = 0;
1216 int dirty;
1217 int ret = 0;
1218 loff_t endoff = 0;
1219 loff_t size;
1220
1221 if (!S_ISREG(inode->i_mode))
1222 return -EOPNOTSUPP;
1223
1224 mutex_lock(&inode->i_mutex);
1225
1226 if (ceph_snap(inode) != CEPH_NOSNAP) {
1227 ret = -EROFS;
1228 goto unlock;
1229 }
1230
1231 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1232 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1233 ret = -ENOSPC;
1234 goto unlock;
1235 }
1236
1237 size = i_size_read(inode);
1238 if (!(mode & FALLOC_FL_KEEP_SIZE))
1239 endoff = offset + length;
1240
1241 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1242 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1243 else
1244 want = CEPH_CAP_FILE_BUFFER;
1245
1246 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
1247 if (ret < 0)
1248 goto unlock;
1249
1250 if (mode & FALLOC_FL_PUNCH_HOLE) {
1251 if (offset < size)
1252 ceph_zero_pagecache_range(inode, offset, length);
1253 ret = ceph_zero_objects(inode, offset, length);
1254 } else if (endoff > size) {
1255 truncate_pagecache_range(inode, size, -1);
1256 if (ceph_inode_set_size(inode, endoff))
1257 ceph_check_caps(ceph_inode(inode),
1258 CHECK_CAPS_AUTHONLY, NULL);
1259 }
1260
1261 if (!ret) {
1262 spin_lock(&ci->i_ceph_lock);
1263 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
1264 spin_unlock(&ci->i_ceph_lock);
1265 if (dirty)
1266 __mark_inode_dirty(inode, dirty);
1267 }
1268
1269 ceph_put_cap_refs(ci, got);
1270unlock:
1271 mutex_unlock(&inode->i_mutex);
1272 return ret;
1273}
1274
1275const struct file_operations ceph_file_fops = {
1276 .open = ceph_open,
1277 .release = ceph_release,
1278 .llseek = ceph_llseek,
1279 .read = do_sync_read,
1280 .write = do_sync_write,
1281 .aio_read = ceph_aio_read,
1282 .aio_write = ceph_aio_write,
1283 .mmap = ceph_mmap,
1284 .fsync = ceph_fsync,
1285 .lock = ceph_lock,
1286 .flock = ceph_flock,
1287 .splice_read = generic_file_splice_read,
1288 .splice_write = generic_file_splice_write,
1289 .unlocked_ioctl = ceph_ioctl,
1290 .compat_ioctl = ceph_ioctl,
1291 .fallocate = ceph_fallocate,
1292};
1293
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/ceph/ceph_debug.h>
3#include <linux/ceph/striper.h>
4
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/file.h>
9#include <linux/mount.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/falloc.h>
13#include <linux/iversion.h>
14#include <linux/ktime.h>
15
16#include "super.h"
17#include "mds_client.h"
18#include "cache.h"
19#include "io.h"
20#include "metric.h"
21
22static __le32 ceph_flags_sys2wire(u32 flags)
23{
24 u32 wire_flags = 0;
25
26 switch (flags & O_ACCMODE) {
27 case O_RDONLY:
28 wire_flags |= CEPH_O_RDONLY;
29 break;
30 case O_WRONLY:
31 wire_flags |= CEPH_O_WRONLY;
32 break;
33 case O_RDWR:
34 wire_flags |= CEPH_O_RDWR;
35 break;
36 }
37
38 flags &= ~O_ACCMODE;
39
40#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42 ceph_sys2wire(O_CREAT);
43 ceph_sys2wire(O_EXCL);
44 ceph_sys2wire(O_TRUNC);
45 ceph_sys2wire(O_DIRECTORY);
46 ceph_sys2wire(O_NOFOLLOW);
47
48#undef ceph_sys2wire
49
50 if (flags)
51 dout("unused open flags: %x\n", flags);
52
53 return cpu_to_le32(wire_flags);
54}
55
56/*
57 * Ceph file operations
58 *
59 * Implement basic open/close functionality, and implement
60 * read/write.
61 *
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
64 *
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
67 * ack from the OSD.
68 *
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
71 *
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
75 */
76
77/*
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
80 */
81#define ITER_GET_BVECS_PAGES 64
82
83static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 struct bio_vec *bvecs)
85{
86 size_t size = 0;
87 int bvec_idx = 0;
88
89 if (maxsize > iov_iter_count(iter))
90 maxsize = iov_iter_count(iter);
91
92 while (size < maxsize) {
93 struct page *pages[ITER_GET_BVECS_PAGES];
94 ssize_t bytes;
95 size_t start;
96 int idx = 0;
97
98 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99 ITER_GET_BVECS_PAGES, &start);
100 if (bytes < 0)
101 return size ?: bytes;
102
103 iov_iter_advance(iter, bytes);
104 size += bytes;
105
106 for ( ; bytes; idx++, bvec_idx++) {
107 struct bio_vec bv = {
108 .bv_page = pages[idx],
109 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
110 .bv_offset = start,
111 };
112
113 bvecs[bvec_idx] = bv;
114 bytes -= bv.bv_len;
115 start = 0;
116 }
117 }
118
119 return size;
120}
121
122/*
123 * iov_iter_get_pages() only considers one iov_iter segment, no matter
124 * what maxsize or maxpages are given. For ITER_BVEC that is a single
125 * page.
126 *
127 * Attempt to get up to @maxsize bytes worth of pages from @iter.
128 * Return the number of bytes in the created bio_vec array, or an error.
129 */
130static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131 struct bio_vec **bvecs, int *num_bvecs)
132{
133 struct bio_vec *bv;
134 size_t orig_count = iov_iter_count(iter);
135 ssize_t bytes;
136 int npages;
137
138 iov_iter_truncate(iter, maxsize);
139 npages = iov_iter_npages(iter, INT_MAX);
140 iov_iter_reexpand(iter, orig_count);
141
142 /*
143 * __iter_get_bvecs() may populate only part of the array -- zero it
144 * out.
145 */
146 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
147 if (!bv)
148 return -ENOMEM;
149
150 bytes = __iter_get_bvecs(iter, maxsize, bv);
151 if (bytes < 0) {
152 /*
153 * No pages were pinned -- just free the array.
154 */
155 kvfree(bv);
156 return bytes;
157 }
158
159 *bvecs = bv;
160 *num_bvecs = npages;
161 return bytes;
162}
163
164static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
165{
166 int i;
167
168 for (i = 0; i < num_bvecs; i++) {
169 if (bvecs[i].bv_page) {
170 if (should_dirty)
171 set_page_dirty_lock(bvecs[i].bv_page);
172 put_page(bvecs[i].bv_page);
173 }
174 }
175 kvfree(bvecs);
176}
177
178/*
179 * Prepare an open request. Preallocate ceph_cap to avoid an
180 * inopportune ENOMEM later.
181 */
182static struct ceph_mds_request *
183prepare_open_request(struct super_block *sb, int flags, int create_mode)
184{
185 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
186 struct ceph_mds_client *mdsc = fsc->mdsc;
187 struct ceph_mds_request *req;
188 int want_auth = USE_ANY_MDS;
189 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
190
191 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
192 want_auth = USE_AUTH_MDS;
193
194 req = ceph_mdsc_create_request(mdsc, op, want_auth);
195 if (IS_ERR(req))
196 goto out;
197 req->r_fmode = ceph_flags_to_mode(flags);
198 req->r_args.open.flags = ceph_flags_sys2wire(flags);
199 req->r_args.open.mode = cpu_to_le32(create_mode);
200out:
201 return req;
202}
203
204static int ceph_init_file_info(struct inode *inode, struct file *file,
205 int fmode, bool isdir)
206{
207 struct ceph_inode_info *ci = ceph_inode(inode);
208 struct ceph_file_info *fi;
209
210 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
211 inode->i_mode, isdir ? "dir" : "regular");
212 BUG_ON(inode->i_fop->release != ceph_release);
213
214 if (isdir) {
215 struct ceph_dir_file_info *dfi =
216 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
217 if (!dfi)
218 return -ENOMEM;
219
220 file->private_data = dfi;
221 fi = &dfi->file_info;
222 dfi->next_offset = 2;
223 dfi->readdir_cache_idx = -1;
224 } else {
225 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
226 if (!fi)
227 return -ENOMEM;
228
229 file->private_data = fi;
230 }
231
232 ceph_get_fmode(ci, fmode, 1);
233 fi->fmode = fmode;
234
235 spin_lock_init(&fi->rw_contexts_lock);
236 INIT_LIST_HEAD(&fi->rw_contexts);
237 fi->meta_err = errseq_sample(&ci->i_meta_err);
238 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
239
240 return 0;
241}
242
243/*
244 * initialize private struct file data.
245 * if we fail, clean up by dropping fmode reference on the ceph_inode
246 */
247static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
248{
249 int ret = 0;
250
251 switch (inode->i_mode & S_IFMT) {
252 case S_IFREG:
253 ceph_fscache_register_inode_cookie(inode);
254 ceph_fscache_file_set_cookie(inode, file);
255 fallthrough;
256 case S_IFDIR:
257 ret = ceph_init_file_info(inode, file, fmode,
258 S_ISDIR(inode->i_mode));
259 if (ret)
260 return ret;
261 break;
262
263 case S_IFLNK:
264 dout("init_file %p %p 0%o (symlink)\n", inode, file,
265 inode->i_mode);
266 break;
267
268 default:
269 dout("init_file %p %p 0%o (special)\n", inode, file,
270 inode->i_mode);
271 /*
272 * we need to drop the open ref now, since we don't
273 * have .release set to ceph_release.
274 */
275 BUG_ON(inode->i_fop->release == ceph_release);
276
277 /* call the proper open fop */
278 ret = inode->i_fop->open(inode, file);
279 }
280 return ret;
281}
282
283/*
284 * try renew caps after session gets killed.
285 */
286int ceph_renew_caps(struct inode *inode, int fmode)
287{
288 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
289 struct ceph_inode_info *ci = ceph_inode(inode);
290 struct ceph_mds_request *req;
291 int err, flags, wanted;
292
293 spin_lock(&ci->i_ceph_lock);
294 __ceph_touch_fmode(ci, mdsc, fmode);
295 wanted = __ceph_caps_file_wanted(ci);
296 if (__ceph_is_any_real_caps(ci) &&
297 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
298 int issued = __ceph_caps_issued(ci, NULL);
299 spin_unlock(&ci->i_ceph_lock);
300 dout("renew caps %p want %s issued %s updating mds_wanted\n",
301 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
302 ceph_check_caps(ci, 0, NULL);
303 return 0;
304 }
305 spin_unlock(&ci->i_ceph_lock);
306
307 flags = 0;
308 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
309 flags = O_RDWR;
310 else if (wanted & CEPH_CAP_FILE_RD)
311 flags = O_RDONLY;
312 else if (wanted & CEPH_CAP_FILE_WR)
313 flags = O_WRONLY;
314#ifdef O_LAZY
315 if (wanted & CEPH_CAP_FILE_LAZYIO)
316 flags |= O_LAZY;
317#endif
318
319 req = prepare_open_request(inode->i_sb, flags, 0);
320 if (IS_ERR(req)) {
321 err = PTR_ERR(req);
322 goto out;
323 }
324
325 req->r_inode = inode;
326 ihold(inode);
327 req->r_num_caps = 1;
328
329 err = ceph_mdsc_do_request(mdsc, NULL, req);
330 ceph_mdsc_put_request(req);
331out:
332 dout("renew caps %p open result=%d\n", inode, err);
333 return err < 0 ? err : 0;
334}
335
336/*
337 * If we already have the requisite capabilities, we can satisfy
338 * the open request locally (no need to request new caps from the
339 * MDS). We do, however, need to inform the MDS (asynchronously)
340 * if our wanted caps set expands.
341 */
342int ceph_open(struct inode *inode, struct file *file)
343{
344 struct ceph_inode_info *ci = ceph_inode(inode);
345 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
346 struct ceph_mds_client *mdsc = fsc->mdsc;
347 struct ceph_mds_request *req;
348 struct ceph_file_info *fi = file->private_data;
349 int err;
350 int flags, fmode, wanted;
351
352 if (fi) {
353 dout("open file %p is already opened\n", file);
354 return 0;
355 }
356
357 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
358 flags = file->f_flags & ~(O_CREAT|O_EXCL);
359 if (S_ISDIR(inode->i_mode))
360 flags = O_DIRECTORY; /* mds likes to know */
361
362 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
363 ceph_vinop(inode), file, flags, file->f_flags);
364 fmode = ceph_flags_to_mode(flags);
365 wanted = ceph_caps_for_mode(fmode);
366
367 /* snapped files are read-only */
368 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
369 return -EROFS;
370
371 /* trivially open snapdir */
372 if (ceph_snap(inode) == CEPH_SNAPDIR) {
373 return ceph_init_file(inode, file, fmode);
374 }
375
376 /*
377 * No need to block if we have caps on the auth MDS (for
378 * write) or any MDS (for read). Update wanted set
379 * asynchronously.
380 */
381 spin_lock(&ci->i_ceph_lock);
382 if (__ceph_is_any_real_caps(ci) &&
383 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
384 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
385 int issued = __ceph_caps_issued(ci, NULL);
386
387 dout("open %p fmode %d want %s issued %s using existing\n",
388 inode, fmode, ceph_cap_string(wanted),
389 ceph_cap_string(issued));
390 __ceph_touch_fmode(ci, mdsc, fmode);
391 spin_unlock(&ci->i_ceph_lock);
392
393 /* adjust wanted? */
394 if ((issued & wanted) != wanted &&
395 (mds_wanted & wanted) != wanted &&
396 ceph_snap(inode) != CEPH_SNAPDIR)
397 ceph_check_caps(ci, 0, NULL);
398
399 return ceph_init_file(inode, file, fmode);
400 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
401 (ci->i_snap_caps & wanted) == wanted) {
402 __ceph_touch_fmode(ci, mdsc, fmode);
403 spin_unlock(&ci->i_ceph_lock);
404 return ceph_init_file(inode, file, fmode);
405 }
406
407 spin_unlock(&ci->i_ceph_lock);
408
409 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
410 req = prepare_open_request(inode->i_sb, flags, 0);
411 if (IS_ERR(req)) {
412 err = PTR_ERR(req);
413 goto out;
414 }
415 req->r_inode = inode;
416 ihold(inode);
417
418 req->r_num_caps = 1;
419 err = ceph_mdsc_do_request(mdsc, NULL, req);
420 if (!err)
421 err = ceph_init_file(inode, file, req->r_fmode);
422 ceph_mdsc_put_request(req);
423 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
424out:
425 return err;
426}
427
428/* Clone the layout from a synchronous create, if the dir now has Dc caps */
429static void
430cache_file_layout(struct inode *dst, struct inode *src)
431{
432 struct ceph_inode_info *cdst = ceph_inode(dst);
433 struct ceph_inode_info *csrc = ceph_inode(src);
434
435 spin_lock(&cdst->i_ceph_lock);
436 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
437 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
438 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
439 sizeof(cdst->i_cached_layout));
440 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
441 ceph_try_get_string(csrc->i_layout.pool_ns));
442 }
443 spin_unlock(&cdst->i_ceph_lock);
444}
445
446/*
447 * Try to set up an async create. We need caps, a file layout, and inode number,
448 * and either a lease on the dentry or complete dir info. If any of those
449 * criteria are not satisfied, then return false and the caller can go
450 * synchronous.
451 */
452static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
453 struct ceph_file_layout *lo, u64 *pino)
454{
455 struct ceph_inode_info *ci = ceph_inode(dir);
456 struct ceph_dentry_info *di = ceph_dentry(dentry);
457 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
458 u64 ino;
459
460 spin_lock(&ci->i_ceph_lock);
461 /* No auth cap means no chance for Dc caps */
462 if (!ci->i_auth_cap)
463 goto no_async;
464
465 /* Any delegated inos? */
466 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
467 goto no_async;
468
469 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
470 goto no_async;
471
472 if ((__ceph_caps_issued(ci, NULL) & want) != want)
473 goto no_async;
474
475 if (d_in_lookup(dentry)) {
476 if (!__ceph_dir_is_complete(ci))
477 goto no_async;
478 spin_lock(&dentry->d_lock);
479 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
480 spin_unlock(&dentry->d_lock);
481 } else if (atomic_read(&ci->i_shared_gen) !=
482 READ_ONCE(di->lease_shared_gen)) {
483 goto no_async;
484 }
485
486 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
487 if (!ino)
488 goto no_async;
489
490 *pino = ino;
491 ceph_take_cap_refs(ci, want, false);
492 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
493 rcu_assign_pointer(lo->pool_ns,
494 ceph_try_get_string(ci->i_cached_layout.pool_ns));
495 got = want;
496no_async:
497 spin_unlock(&ci->i_ceph_lock);
498 return got;
499}
500
501static void restore_deleg_ino(struct inode *dir, u64 ino)
502{
503 struct ceph_inode_info *ci = ceph_inode(dir);
504 struct ceph_mds_session *s = NULL;
505
506 spin_lock(&ci->i_ceph_lock);
507 if (ci->i_auth_cap)
508 s = ceph_get_mds_session(ci->i_auth_cap->session);
509 spin_unlock(&ci->i_ceph_lock);
510 if (s) {
511 int err = ceph_restore_deleg_ino(s, ino);
512 if (err)
513 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
514 ino, err);
515 ceph_put_mds_session(s);
516 }
517}
518
519static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
520 struct ceph_mds_request *req)
521{
522 int result = req->r_err ? req->r_err :
523 le32_to_cpu(req->r_reply_info.head->result);
524
525 if (result == -EJUKEBOX)
526 goto out;
527
528 mapping_set_error(req->r_parent->i_mapping, result);
529
530 if (result) {
531 struct dentry *dentry = req->r_dentry;
532 int pathlen = 0;
533 u64 base = 0;
534 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
535 &base, 0);
536
537 ceph_dir_clear_complete(req->r_parent);
538 if (!d_unhashed(dentry))
539 d_drop(dentry);
540
541 /* FIXME: start returning I/O errors on all accesses? */
542 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
543 base, IS_ERR(path) ? "<<bad>>" : path, result);
544 ceph_mdsc_free_path(path, pathlen);
545 }
546
547 if (req->r_target_inode) {
548 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
549 u64 ino = ceph_vino(req->r_target_inode).ino;
550
551 if (req->r_deleg_ino != ino)
552 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
553 __func__, req->r_err, req->r_deleg_ino, ino);
554 mapping_set_error(req->r_target_inode->i_mapping, result);
555
556 spin_lock(&ci->i_ceph_lock);
557 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
558 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
559 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
560 }
561 ceph_kick_flushing_inode_caps(req->r_session, ci);
562 spin_unlock(&ci->i_ceph_lock);
563 } else {
564 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
565 req->r_deleg_ino);
566 }
567out:
568 ceph_mdsc_release_dir_caps(req);
569}
570
571static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
572 struct file *file, umode_t mode,
573 struct ceph_mds_request *req,
574 struct ceph_acl_sec_ctx *as_ctx,
575 struct ceph_file_layout *lo)
576{
577 int ret;
578 char xattr_buf[4];
579 struct ceph_mds_reply_inode in = { };
580 struct ceph_mds_reply_info_in iinfo = { .in = &in };
581 struct ceph_inode_info *ci = ceph_inode(dir);
582 struct inode *inode;
583 struct timespec64 now;
584 struct ceph_vino vino = { .ino = req->r_deleg_ino,
585 .snap = CEPH_NOSNAP };
586
587 ktime_get_real_ts64(&now);
588
589 inode = ceph_get_inode(dentry->d_sb, vino);
590 if (IS_ERR(inode))
591 return PTR_ERR(inode);
592
593 iinfo.inline_version = CEPH_INLINE_NONE;
594 iinfo.change_attr = 1;
595 ceph_encode_timespec64(&iinfo.btime, &now);
596
597 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
598 iinfo.xattr_data = xattr_buf;
599 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
600
601 in.ino = cpu_to_le64(vino.ino);
602 in.snapid = cpu_to_le64(CEPH_NOSNAP);
603 in.version = cpu_to_le64(1); // ???
604 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
605 in.cap.cap_id = cpu_to_le64(1);
606 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
607 in.cap.flags = CEPH_CAP_FLAG_AUTH;
608 in.ctime = in.mtime = in.atime = iinfo.btime;
609 in.mode = cpu_to_le32((u32)mode);
610 in.truncate_seq = cpu_to_le32(1);
611 in.truncate_size = cpu_to_le64(-1ULL);
612 in.xattr_version = cpu_to_le64(1);
613 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
614 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
615 dir->i_gid : current_fsgid()));
616 in.nlink = cpu_to_le32(1);
617 in.max_size = cpu_to_le64(lo->stripe_unit);
618
619 ceph_file_layout_to_legacy(lo, &in.layout);
620
621 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
622 req->r_fmode, NULL);
623 if (ret) {
624 dout("%s failed to fill inode: %d\n", __func__, ret);
625 ceph_dir_clear_complete(dir);
626 if (!d_unhashed(dentry))
627 d_drop(dentry);
628 if (inode->i_state & I_NEW)
629 discard_new_inode(inode);
630 } else {
631 struct dentry *dn;
632
633 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
634 vino.ino, ceph_ino(dir), dentry->d_name.name);
635 ceph_dir_clear_ordered(dir);
636 ceph_init_inode_acls(inode, as_ctx);
637 if (inode->i_state & I_NEW) {
638 /*
639 * If it's not I_NEW, then someone created this before
640 * we got here. Assume the server is aware of it at
641 * that point and don't worry about setting
642 * CEPH_I_ASYNC_CREATE.
643 */
644 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
645 unlock_new_inode(inode);
646 }
647 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
648 if (!d_unhashed(dentry))
649 d_drop(dentry);
650 dn = d_splice_alias(inode, dentry);
651 WARN_ON_ONCE(dn && dn != dentry);
652 }
653 file->f_mode |= FMODE_CREATED;
654 ret = finish_open(file, dentry, ceph_open);
655 }
656 return ret;
657}
658
659/*
660 * Do a lookup + open with a single request. If we get a non-existent
661 * file or symlink, return 1 so the VFS can retry.
662 */
663int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
664 struct file *file, unsigned flags, umode_t mode)
665{
666 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
667 struct ceph_mds_client *mdsc = fsc->mdsc;
668 struct ceph_mds_request *req;
669 struct dentry *dn;
670 struct ceph_acl_sec_ctx as_ctx = {};
671 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
672 int mask;
673 int err;
674
675 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
676 dir, dentry, dentry,
677 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
678
679 if (dentry->d_name.len > NAME_MAX)
680 return -ENAMETOOLONG;
681
682 if (flags & O_CREAT) {
683 if (ceph_quota_is_max_files_exceeded(dir))
684 return -EDQUOT;
685 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
686 if (err < 0)
687 return err;
688 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
689 if (err < 0)
690 goto out_ctx;
691 } else if (!d_in_lookup(dentry)) {
692 /* If it's not being looked up, it's negative */
693 return -ENOENT;
694 }
695retry:
696 /* do the open */
697 req = prepare_open_request(dir->i_sb, flags, mode);
698 if (IS_ERR(req)) {
699 err = PTR_ERR(req);
700 goto out_ctx;
701 }
702 req->r_dentry = dget(dentry);
703 req->r_num_caps = 2;
704 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
705 if (ceph_security_xattr_wanted(dir))
706 mask |= CEPH_CAP_XATTR_SHARED;
707 req->r_args.open.mask = cpu_to_le32(mask);
708 req->r_parent = dir;
709
710 if (flags & O_CREAT) {
711 struct ceph_file_layout lo;
712
713 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
714 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
715 if (as_ctx.pagelist) {
716 req->r_pagelist = as_ctx.pagelist;
717 as_ctx.pagelist = NULL;
718 }
719 if (try_async &&
720 (req->r_dir_caps =
721 try_prep_async_create(dir, dentry, &lo,
722 &req->r_deleg_ino))) {
723 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
724 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
725 req->r_callback = ceph_async_create_cb;
726 err = ceph_mdsc_submit_request(mdsc, dir, req);
727 if (!err) {
728 err = ceph_finish_async_create(dir, dentry,
729 file, mode, req,
730 &as_ctx, &lo);
731 } else if (err == -EJUKEBOX) {
732 restore_deleg_ino(dir, req->r_deleg_ino);
733 ceph_mdsc_put_request(req);
734 try_async = false;
735 goto retry;
736 }
737 goto out_req;
738 }
739 }
740
741 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
742 err = ceph_mdsc_do_request(mdsc,
743 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
744 req);
745 err = ceph_handle_snapdir(req, dentry, err);
746 if (err)
747 goto out_req;
748
749 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
750 err = ceph_handle_notrace_create(dir, dentry);
751
752 if (d_in_lookup(dentry)) {
753 dn = ceph_finish_lookup(req, dentry, err);
754 if (IS_ERR(dn))
755 err = PTR_ERR(dn);
756 } else {
757 /* we were given a hashed negative dentry */
758 dn = NULL;
759 }
760 if (err)
761 goto out_req;
762 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
763 /* make vfs retry on splice, ENOENT, or symlink */
764 dout("atomic_open finish_no_open on dn %p\n", dn);
765 err = finish_no_open(file, dn);
766 } else {
767 dout("atomic_open finish_open on dn %p\n", dn);
768 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
769 struct inode *newino = d_inode(dentry);
770
771 cache_file_layout(dir, newino);
772 ceph_init_inode_acls(newino, &as_ctx);
773 file->f_mode |= FMODE_CREATED;
774 }
775 err = finish_open(file, dentry, ceph_open);
776 }
777out_req:
778 ceph_mdsc_put_request(req);
779out_ctx:
780 ceph_release_acl_sec_ctx(&as_ctx);
781 dout("atomic_open result=%d\n", err);
782 return err;
783}
784
785int ceph_release(struct inode *inode, struct file *file)
786{
787 struct ceph_inode_info *ci = ceph_inode(inode);
788
789 if (S_ISDIR(inode->i_mode)) {
790 struct ceph_dir_file_info *dfi = file->private_data;
791 dout("release inode %p dir file %p\n", inode, file);
792 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
793
794 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
795
796 if (dfi->last_readdir)
797 ceph_mdsc_put_request(dfi->last_readdir);
798 kfree(dfi->last_name);
799 kfree(dfi->dir_info);
800 kmem_cache_free(ceph_dir_file_cachep, dfi);
801 } else {
802 struct ceph_file_info *fi = file->private_data;
803 dout("release inode %p regular file %p\n", inode, file);
804 WARN_ON(!list_empty(&fi->rw_contexts));
805
806 ceph_put_fmode(ci, fi->fmode, 1);
807
808 kmem_cache_free(ceph_file_cachep, fi);
809 }
810
811 /* wake up anyone waiting for caps on this inode */
812 wake_up_all(&ci->i_cap_wq);
813 return 0;
814}
815
816enum {
817 HAVE_RETRIED = 1,
818 CHECK_EOF = 2,
819 READ_INLINE = 3,
820};
821
822/*
823 * Completely synchronous read and write methods. Direct from __user
824 * buffer to osd, or directly to user pages (if O_DIRECT).
825 *
826 * If the read spans object boundary, just do multiple reads. (That's not
827 * atomic, but good enough for now.)
828 *
829 * If we get a short result from the OSD, check against i_size; we need to
830 * only return a short read to the caller if we hit EOF.
831 */
832static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
833 int *retry_op)
834{
835 struct file *file = iocb->ki_filp;
836 struct inode *inode = file_inode(file);
837 struct ceph_inode_info *ci = ceph_inode(inode);
838 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
839 struct ceph_osd_client *osdc = &fsc->client->osdc;
840 ssize_t ret;
841 u64 off = iocb->ki_pos;
842 u64 len = iov_iter_count(to);
843
844 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
845 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
846
847 if (!len)
848 return 0;
849 /*
850 * flush any page cache pages in this range. this
851 * will make concurrent normal and sync io slow,
852 * but it will at least behave sensibly when they are
853 * in sequence.
854 */
855 ret = filemap_write_and_wait_range(inode->i_mapping,
856 off, off + len - 1);
857 if (ret < 0)
858 return ret;
859
860 ret = 0;
861 while ((len = iov_iter_count(to)) > 0) {
862 struct ceph_osd_request *req;
863 struct page **pages;
864 int num_pages;
865 size_t page_off;
866 u64 i_size;
867 bool more;
868
869 req = ceph_osdc_new_request(osdc, &ci->i_layout,
870 ci->i_vino, off, &len, 0, 1,
871 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
872 NULL, ci->i_truncate_seq,
873 ci->i_truncate_size, false);
874 if (IS_ERR(req)) {
875 ret = PTR_ERR(req);
876 break;
877 }
878
879 more = len < iov_iter_count(to);
880
881 if (unlikely(iov_iter_is_pipe(to))) {
882 ret = iov_iter_get_pages_alloc(to, &pages, len,
883 &page_off);
884 if (ret <= 0) {
885 ceph_osdc_put_request(req);
886 ret = -ENOMEM;
887 break;
888 }
889 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
890 if (ret < len) {
891 len = ret;
892 osd_req_op_extent_update(req, 0, len);
893 more = false;
894 }
895 } else {
896 num_pages = calc_pages_for(off, len);
897 page_off = off & ~PAGE_MASK;
898 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
899 if (IS_ERR(pages)) {
900 ceph_osdc_put_request(req);
901 ret = PTR_ERR(pages);
902 break;
903 }
904 }
905
906 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
907 false, false);
908 ret = ceph_osdc_start_request(osdc, req, false);
909 if (!ret)
910 ret = ceph_osdc_wait_request(osdc, req);
911
912 ceph_update_read_latency(&fsc->mdsc->metric,
913 req->r_start_latency,
914 req->r_end_latency,
915 ret);
916
917 ceph_osdc_put_request(req);
918
919 i_size = i_size_read(inode);
920 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
921 off, len, ret, i_size, (more ? " MORE" : ""));
922
923 if (ret == -ENOENT)
924 ret = 0;
925 if (ret >= 0 && ret < len && (off + ret < i_size)) {
926 int zlen = min(len - ret, i_size - off - ret);
927 int zoff = page_off + ret;
928 dout("sync_read zero gap %llu~%llu\n",
929 off + ret, off + ret + zlen);
930 ceph_zero_page_vector_range(zoff, zlen, pages);
931 ret += zlen;
932 }
933
934 if (unlikely(iov_iter_is_pipe(to))) {
935 if (ret > 0) {
936 iov_iter_advance(to, ret);
937 off += ret;
938 } else {
939 iov_iter_advance(to, 0);
940 }
941 ceph_put_page_vector(pages, num_pages, false);
942 } else {
943 int idx = 0;
944 size_t left = ret > 0 ? ret : 0;
945 while (left > 0) {
946 size_t len, copied;
947 page_off = off & ~PAGE_MASK;
948 len = min_t(size_t, left, PAGE_SIZE - page_off);
949 copied = copy_page_to_iter(pages[idx++],
950 page_off, len, to);
951 off += copied;
952 left -= copied;
953 if (copied < len) {
954 ret = -EFAULT;
955 break;
956 }
957 }
958 ceph_release_page_vector(pages, num_pages);
959 }
960
961 if (ret < 0) {
962 if (ret == -EBLACKLISTED)
963 fsc->blacklisted = true;
964 break;
965 }
966
967 if (off >= i_size || !more)
968 break;
969 }
970
971 if (off > iocb->ki_pos) {
972 if (ret >= 0 &&
973 iov_iter_count(to) > 0 && off >= i_size_read(inode))
974 *retry_op = CHECK_EOF;
975 ret = off - iocb->ki_pos;
976 iocb->ki_pos = off;
977 }
978
979 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
980 return ret;
981}
982
983struct ceph_aio_request {
984 struct kiocb *iocb;
985 size_t total_len;
986 bool write;
987 bool should_dirty;
988 int error;
989 struct list_head osd_reqs;
990 unsigned num_reqs;
991 atomic_t pending_reqs;
992 struct timespec64 mtime;
993 struct ceph_cap_flush *prealloc_cf;
994};
995
996struct ceph_aio_work {
997 struct work_struct work;
998 struct ceph_osd_request *req;
999};
1000
1001static void ceph_aio_retry_work(struct work_struct *work);
1002
1003static void ceph_aio_complete(struct inode *inode,
1004 struct ceph_aio_request *aio_req)
1005{
1006 struct ceph_inode_info *ci = ceph_inode(inode);
1007 int ret;
1008
1009 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1010 return;
1011
1012 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1013 inode_dio_end(inode);
1014
1015 ret = aio_req->error;
1016 if (!ret)
1017 ret = aio_req->total_len;
1018
1019 dout("ceph_aio_complete %p rc %d\n", inode, ret);
1020
1021 if (ret >= 0 && aio_req->write) {
1022 int dirty;
1023
1024 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1025 if (endoff > i_size_read(inode)) {
1026 if (ceph_inode_set_size(inode, endoff))
1027 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1028 }
1029
1030 spin_lock(&ci->i_ceph_lock);
1031 ci->i_inline_version = CEPH_INLINE_NONE;
1032 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1033 &aio_req->prealloc_cf);
1034 spin_unlock(&ci->i_ceph_lock);
1035 if (dirty)
1036 __mark_inode_dirty(inode, dirty);
1037
1038 }
1039
1040 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1041 CEPH_CAP_FILE_RD));
1042
1043 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
1044
1045 ceph_free_cap_flush(aio_req->prealloc_cf);
1046 kfree(aio_req);
1047}
1048
1049static void ceph_aio_complete_req(struct ceph_osd_request *req)
1050{
1051 int rc = req->r_result;
1052 struct inode *inode = req->r_inode;
1053 struct ceph_aio_request *aio_req = req->r_priv;
1054 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1055 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1056 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1057
1058 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1059 BUG_ON(!osd_data->num_bvecs);
1060
1061 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
1062 inode, rc, osd_data->bvec_pos.iter.bi_size);
1063
1064 /* r_start_latency == 0 means the request was not submitted */
1065 if (req->r_start_latency) {
1066 if (aio_req->write)
1067 ceph_update_write_latency(metric, req->r_start_latency,
1068 req->r_end_latency, rc);
1069 else
1070 ceph_update_read_latency(metric, req->r_start_latency,
1071 req->r_end_latency, rc);
1072 }
1073
1074 if (rc == -EOLDSNAPC) {
1075 struct ceph_aio_work *aio_work;
1076 BUG_ON(!aio_req->write);
1077
1078 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1079 if (aio_work) {
1080 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1081 aio_work->req = req;
1082 queue_work(ceph_inode_to_client(inode)->inode_wq,
1083 &aio_work->work);
1084 return;
1085 }
1086 rc = -ENOMEM;
1087 } else if (!aio_req->write) {
1088 if (rc == -ENOENT)
1089 rc = 0;
1090 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
1091 struct iov_iter i;
1092 int zlen = osd_data->bvec_pos.iter.bi_size - rc;
1093
1094 /*
1095 * If read is satisfied by single OSD request,
1096 * it can pass EOF. Otherwise read is within
1097 * i_size.
1098 */
1099 if (aio_req->num_reqs == 1) {
1100 loff_t i_size = i_size_read(inode);
1101 loff_t endoff = aio_req->iocb->ki_pos + rc;
1102 if (endoff < i_size)
1103 zlen = min_t(size_t, zlen,
1104 i_size - endoff);
1105 aio_req->total_len = rc + zlen;
1106 }
1107
1108 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1109 osd_data->num_bvecs,
1110 osd_data->bvec_pos.iter.bi_size);
1111 iov_iter_advance(&i, rc);
1112 iov_iter_zero(zlen, &i);
1113 }
1114 }
1115
1116 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1117 aio_req->should_dirty);
1118 ceph_osdc_put_request(req);
1119
1120 if (rc < 0)
1121 cmpxchg(&aio_req->error, 0, rc);
1122
1123 ceph_aio_complete(inode, aio_req);
1124 return;
1125}
1126
1127static void ceph_aio_retry_work(struct work_struct *work)
1128{
1129 struct ceph_aio_work *aio_work =
1130 container_of(work, struct ceph_aio_work, work);
1131 struct ceph_osd_request *orig_req = aio_work->req;
1132 struct ceph_aio_request *aio_req = orig_req->r_priv;
1133 struct inode *inode = orig_req->r_inode;
1134 struct ceph_inode_info *ci = ceph_inode(inode);
1135 struct ceph_snap_context *snapc;
1136 struct ceph_osd_request *req;
1137 int ret;
1138
1139 spin_lock(&ci->i_ceph_lock);
1140 if (__ceph_have_pending_cap_snap(ci)) {
1141 struct ceph_cap_snap *capsnap =
1142 list_last_entry(&ci->i_cap_snaps,
1143 struct ceph_cap_snap,
1144 ci_item);
1145 snapc = ceph_get_snap_context(capsnap->context);
1146 } else {
1147 BUG_ON(!ci->i_head_snapc);
1148 snapc = ceph_get_snap_context(ci->i_head_snapc);
1149 }
1150 spin_unlock(&ci->i_ceph_lock);
1151
1152 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1153 false, GFP_NOFS);
1154 if (!req) {
1155 ret = -ENOMEM;
1156 req = orig_req;
1157 goto out;
1158 }
1159
1160 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1161 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1162 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1163
1164 req->r_ops[0] = orig_req->r_ops[0];
1165
1166 req->r_mtime = aio_req->mtime;
1167 req->r_data_offset = req->r_ops[0].extent.offset;
1168
1169 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1170 if (ret) {
1171 ceph_osdc_put_request(req);
1172 req = orig_req;
1173 goto out;
1174 }
1175
1176 ceph_osdc_put_request(orig_req);
1177
1178 req->r_callback = ceph_aio_complete_req;
1179 req->r_inode = inode;
1180 req->r_priv = aio_req;
1181
1182 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1183out:
1184 if (ret < 0) {
1185 req->r_result = ret;
1186 ceph_aio_complete_req(req);
1187 }
1188
1189 ceph_put_snap_context(snapc);
1190 kfree(aio_work);
1191}
1192
1193static ssize_t
1194ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1195 struct ceph_snap_context *snapc,
1196 struct ceph_cap_flush **pcf)
1197{
1198 struct file *file = iocb->ki_filp;
1199 struct inode *inode = file_inode(file);
1200 struct ceph_inode_info *ci = ceph_inode(inode);
1201 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1202 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1203 struct ceph_vino vino;
1204 struct ceph_osd_request *req;
1205 struct bio_vec *bvecs;
1206 struct ceph_aio_request *aio_req = NULL;
1207 int num_pages = 0;
1208 int flags;
1209 int ret = 0;
1210 struct timespec64 mtime = current_time(inode);
1211 size_t count = iov_iter_count(iter);
1212 loff_t pos = iocb->ki_pos;
1213 bool write = iov_iter_rw(iter) == WRITE;
1214 bool should_dirty = !write && iter_is_iovec(iter);
1215
1216 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1217 return -EROFS;
1218
1219 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1220 (write ? "write" : "read"), file, pos, (unsigned)count,
1221 snapc, snapc ? snapc->seq : 0);
1222
1223 if (write) {
1224 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1225 pos >> PAGE_SHIFT,
1226 (pos + count - 1) >> PAGE_SHIFT);
1227 if (ret2 < 0)
1228 dout("invalidate_inode_pages2_range returned %d\n", ret2);
1229
1230 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1231 } else {
1232 flags = CEPH_OSD_FLAG_READ;
1233 }
1234
1235 while (iov_iter_count(iter) > 0) {
1236 u64 size = iov_iter_count(iter);
1237 ssize_t len;
1238
1239 if (write)
1240 size = min_t(u64, size, fsc->mount_options->wsize);
1241 else
1242 size = min_t(u64, size, fsc->mount_options->rsize);
1243
1244 vino = ceph_vino(inode);
1245 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1246 vino, pos, &size, 0,
1247 1,
1248 write ? CEPH_OSD_OP_WRITE :
1249 CEPH_OSD_OP_READ,
1250 flags, snapc,
1251 ci->i_truncate_seq,
1252 ci->i_truncate_size,
1253 false);
1254 if (IS_ERR(req)) {
1255 ret = PTR_ERR(req);
1256 break;
1257 }
1258
1259 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1260 if (len < 0) {
1261 ceph_osdc_put_request(req);
1262 ret = len;
1263 break;
1264 }
1265 if (len != size)
1266 osd_req_op_extent_update(req, 0, len);
1267
1268 /*
1269 * To simplify error handling, allow AIO when IO within i_size
1270 * or IO can be satisfied by single OSD request.
1271 */
1272 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1273 (len == count || pos + count <= i_size_read(inode))) {
1274 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1275 if (aio_req) {
1276 aio_req->iocb = iocb;
1277 aio_req->write = write;
1278 aio_req->should_dirty = should_dirty;
1279 INIT_LIST_HEAD(&aio_req->osd_reqs);
1280 if (write) {
1281 aio_req->mtime = mtime;
1282 swap(aio_req->prealloc_cf, *pcf);
1283 }
1284 }
1285 /* ignore error */
1286 }
1287
1288 if (write) {
1289 /*
1290 * throw out any page cache pages in this range. this
1291 * may block.
1292 */
1293 truncate_inode_pages_range(inode->i_mapping, pos,
1294 PAGE_ALIGN(pos + len) - 1);
1295
1296 req->r_mtime = mtime;
1297 }
1298
1299 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1300
1301 if (aio_req) {
1302 aio_req->total_len += len;
1303 aio_req->num_reqs++;
1304 atomic_inc(&aio_req->pending_reqs);
1305
1306 req->r_callback = ceph_aio_complete_req;
1307 req->r_inode = inode;
1308 req->r_priv = aio_req;
1309 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1310
1311 pos += len;
1312 continue;
1313 }
1314
1315 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1316 if (!ret)
1317 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1318
1319 if (write)
1320 ceph_update_write_latency(metric, req->r_start_latency,
1321 req->r_end_latency, ret);
1322 else
1323 ceph_update_read_latency(metric, req->r_start_latency,
1324 req->r_end_latency, ret);
1325
1326 size = i_size_read(inode);
1327 if (!write) {
1328 if (ret == -ENOENT)
1329 ret = 0;
1330 if (ret >= 0 && ret < len && pos + ret < size) {
1331 struct iov_iter i;
1332 int zlen = min_t(size_t, len - ret,
1333 size - pos - ret);
1334
1335 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1336 iov_iter_advance(&i, ret);
1337 iov_iter_zero(zlen, &i);
1338 ret += zlen;
1339 }
1340 if (ret >= 0)
1341 len = ret;
1342 }
1343
1344 put_bvecs(bvecs, num_pages, should_dirty);
1345 ceph_osdc_put_request(req);
1346 if (ret < 0)
1347 break;
1348
1349 pos += len;
1350 if (!write && pos >= size)
1351 break;
1352
1353 if (write && pos > size) {
1354 if (ceph_inode_set_size(inode, pos))
1355 ceph_check_caps(ceph_inode(inode),
1356 CHECK_CAPS_AUTHONLY,
1357 NULL);
1358 }
1359 }
1360
1361 if (aio_req) {
1362 LIST_HEAD(osd_reqs);
1363
1364 if (aio_req->num_reqs == 0) {
1365 kfree(aio_req);
1366 return ret;
1367 }
1368
1369 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1370 CEPH_CAP_FILE_RD);
1371
1372 list_splice(&aio_req->osd_reqs, &osd_reqs);
1373 inode_dio_begin(inode);
1374 while (!list_empty(&osd_reqs)) {
1375 req = list_first_entry(&osd_reqs,
1376 struct ceph_osd_request,
1377 r_private_item);
1378 list_del_init(&req->r_private_item);
1379 if (ret >= 0)
1380 ret = ceph_osdc_start_request(req->r_osdc,
1381 req, false);
1382 if (ret < 0) {
1383 req->r_result = ret;
1384 ceph_aio_complete_req(req);
1385 }
1386 }
1387 return -EIOCBQUEUED;
1388 }
1389
1390 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1391 ret = pos - iocb->ki_pos;
1392 iocb->ki_pos = pos;
1393 }
1394 return ret;
1395}
1396
1397/*
1398 * Synchronous write, straight from __user pointer or user pages.
1399 *
1400 * If write spans object boundary, just do multiple writes. (For a
1401 * correct atomic write, we should e.g. take write locks on all
1402 * objects, rollback on failure, etc.)
1403 */
1404static ssize_t
1405ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1406 struct ceph_snap_context *snapc)
1407{
1408 struct file *file = iocb->ki_filp;
1409 struct inode *inode = file_inode(file);
1410 struct ceph_inode_info *ci = ceph_inode(inode);
1411 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1412 struct ceph_vino vino;
1413 struct ceph_osd_request *req;
1414 struct page **pages;
1415 u64 len;
1416 int num_pages;
1417 int written = 0;
1418 int flags;
1419 int ret;
1420 bool check_caps = false;
1421 struct timespec64 mtime = current_time(inode);
1422 size_t count = iov_iter_count(from);
1423
1424 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1425 return -EROFS;
1426
1427 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1428 file, pos, (unsigned)count, snapc, snapc->seq);
1429
1430 ret = filemap_write_and_wait_range(inode->i_mapping,
1431 pos, pos + count - 1);
1432 if (ret < 0)
1433 return ret;
1434
1435 ret = invalidate_inode_pages2_range(inode->i_mapping,
1436 pos >> PAGE_SHIFT,
1437 (pos + count - 1) >> PAGE_SHIFT);
1438 if (ret < 0)
1439 dout("invalidate_inode_pages2_range returned %d\n", ret);
1440
1441 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1442
1443 while ((len = iov_iter_count(from)) > 0) {
1444 size_t left;
1445 int n;
1446
1447 vino = ceph_vino(inode);
1448 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1449 vino, pos, &len, 0, 1,
1450 CEPH_OSD_OP_WRITE, flags, snapc,
1451 ci->i_truncate_seq,
1452 ci->i_truncate_size,
1453 false);
1454 if (IS_ERR(req)) {
1455 ret = PTR_ERR(req);
1456 break;
1457 }
1458
1459 /*
1460 * write from beginning of first page,
1461 * regardless of io alignment
1462 */
1463 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1464
1465 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1466 if (IS_ERR(pages)) {
1467 ret = PTR_ERR(pages);
1468 goto out;
1469 }
1470
1471 left = len;
1472 for (n = 0; n < num_pages; n++) {
1473 size_t plen = min_t(size_t, left, PAGE_SIZE);
1474 ret = copy_page_from_iter(pages[n], 0, plen, from);
1475 if (ret != plen) {
1476 ret = -EFAULT;
1477 break;
1478 }
1479 left -= ret;
1480 }
1481
1482 if (ret < 0) {
1483 ceph_release_page_vector(pages, num_pages);
1484 goto out;
1485 }
1486
1487 req->r_inode = inode;
1488
1489 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1490 false, true);
1491
1492 req->r_mtime = mtime;
1493 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1494 if (!ret)
1495 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1496
1497 ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
1498 req->r_end_latency, ret);
1499out:
1500 ceph_osdc_put_request(req);
1501 if (ret != 0) {
1502 ceph_set_error_write(ci);
1503 break;
1504 }
1505
1506 ceph_clear_error_write(ci);
1507 pos += len;
1508 written += len;
1509 if (pos > i_size_read(inode)) {
1510 check_caps = ceph_inode_set_size(inode, pos);
1511 if (check_caps)
1512 ceph_check_caps(ceph_inode(inode),
1513 CHECK_CAPS_AUTHONLY,
1514 NULL);
1515 }
1516
1517 }
1518
1519 if (ret != -EOLDSNAPC && written > 0) {
1520 ret = written;
1521 iocb->ki_pos = pos;
1522 }
1523 return ret;
1524}
1525
1526/*
1527 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1528 * Atomically grab references, so that those bits are not released
1529 * back to the MDS mid-read.
1530 *
1531 * Hmm, the sync read case isn't actually async... should it be?
1532 */
1533static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1534{
1535 struct file *filp = iocb->ki_filp;
1536 struct ceph_file_info *fi = filp->private_data;
1537 size_t len = iov_iter_count(to);
1538 struct inode *inode = file_inode(filp);
1539 struct ceph_inode_info *ci = ceph_inode(inode);
1540 struct page *pinned_page = NULL;
1541 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1542 ssize_t ret;
1543 int want, got = 0;
1544 int retry_op = 0, read = 0;
1545
1546again:
1547 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1548 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1549
1550 if (direct_lock)
1551 ceph_start_io_direct(inode);
1552 else
1553 ceph_start_io_read(inode);
1554
1555 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1556 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1557 else
1558 want = CEPH_CAP_FILE_CACHE;
1559 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
1560 &got, &pinned_page);
1561 if (ret < 0) {
1562 if (iocb->ki_flags & IOCB_DIRECT)
1563 ceph_end_io_direct(inode);
1564 else
1565 ceph_end_io_read(inode);
1566 return ret;
1567 }
1568
1569 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1570 (iocb->ki_flags & IOCB_DIRECT) ||
1571 (fi->flags & CEPH_F_SYNC)) {
1572
1573 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1574 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1575 ceph_cap_string(got));
1576
1577 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1578 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1579 ret = ceph_direct_read_write(iocb, to,
1580 NULL, NULL);
1581 if (ret >= 0 && ret < len)
1582 retry_op = CHECK_EOF;
1583 } else {
1584 ret = ceph_sync_read(iocb, to, &retry_op);
1585 }
1586 } else {
1587 retry_op = READ_INLINE;
1588 }
1589 } else {
1590 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1591 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1592 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1593 ceph_cap_string(got));
1594 ceph_add_rw_context(fi, &rw_ctx);
1595 ret = generic_file_read_iter(iocb, to);
1596 ceph_del_rw_context(fi, &rw_ctx);
1597 }
1598
1599 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1600 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1601 if (pinned_page) {
1602 put_page(pinned_page);
1603 pinned_page = NULL;
1604 }
1605 ceph_put_cap_refs(ci, got);
1606
1607 if (direct_lock)
1608 ceph_end_io_direct(inode);
1609 else
1610 ceph_end_io_read(inode);
1611
1612 if (retry_op > HAVE_RETRIED && ret >= 0) {
1613 int statret;
1614 struct page *page = NULL;
1615 loff_t i_size;
1616 if (retry_op == READ_INLINE) {
1617 page = __page_cache_alloc(GFP_KERNEL);
1618 if (!page)
1619 return -ENOMEM;
1620 }
1621
1622 statret = __ceph_do_getattr(inode, page,
1623 CEPH_STAT_CAP_INLINE_DATA, !!page);
1624 if (statret < 0) {
1625 if (page)
1626 __free_page(page);
1627 if (statret == -ENODATA) {
1628 BUG_ON(retry_op != READ_INLINE);
1629 goto again;
1630 }
1631 return statret;
1632 }
1633
1634 i_size = i_size_read(inode);
1635 if (retry_op == READ_INLINE) {
1636 BUG_ON(ret > 0 || read > 0);
1637 if (iocb->ki_pos < i_size &&
1638 iocb->ki_pos < PAGE_SIZE) {
1639 loff_t end = min_t(loff_t, i_size,
1640 iocb->ki_pos + len);
1641 end = min_t(loff_t, end, PAGE_SIZE);
1642 if (statret < end)
1643 zero_user_segment(page, statret, end);
1644 ret = copy_page_to_iter(page,
1645 iocb->ki_pos & ~PAGE_MASK,
1646 end - iocb->ki_pos, to);
1647 iocb->ki_pos += ret;
1648 read += ret;
1649 }
1650 if (iocb->ki_pos < i_size && read < len) {
1651 size_t zlen = min_t(size_t, len - read,
1652 i_size - iocb->ki_pos);
1653 ret = iov_iter_zero(zlen, to);
1654 iocb->ki_pos += ret;
1655 read += ret;
1656 }
1657 __free_pages(page, 0);
1658 return read;
1659 }
1660
1661 /* hit EOF or hole? */
1662 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1663 ret < len) {
1664 dout("sync_read hit hole, ppos %lld < size %lld"
1665 ", reading more\n", iocb->ki_pos, i_size);
1666
1667 read += ret;
1668 len -= ret;
1669 retry_op = HAVE_RETRIED;
1670 goto again;
1671 }
1672 }
1673
1674 if (ret >= 0)
1675 ret += read;
1676
1677 return ret;
1678}
1679
1680/*
1681 * Take cap references to avoid releasing caps to MDS mid-write.
1682 *
1683 * If we are synchronous, and write with an old snap context, the OSD
1684 * may return EOLDSNAPC. In that case, retry the write.. _after_
1685 * dropping our cap refs and allowing the pending snap to logically
1686 * complete _before_ this write occurs.
1687 *
1688 * If we are near ENOSPC, write synchronously.
1689 */
1690static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1691{
1692 struct file *file = iocb->ki_filp;
1693 struct ceph_file_info *fi = file->private_data;
1694 struct inode *inode = file_inode(file);
1695 struct ceph_inode_info *ci = ceph_inode(inode);
1696 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1697 struct ceph_osd_client *osdc = &fsc->client->osdc;
1698 struct ceph_cap_flush *prealloc_cf;
1699 ssize_t count, written = 0;
1700 int err, want, got;
1701 bool direct_lock = false;
1702 u32 map_flags;
1703 u64 pool_flags;
1704 loff_t pos;
1705 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1706
1707 if (ceph_snap(inode) != CEPH_NOSNAP)
1708 return -EROFS;
1709
1710 prealloc_cf = ceph_alloc_cap_flush();
1711 if (!prealloc_cf)
1712 return -ENOMEM;
1713
1714 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1715 direct_lock = true;
1716
1717retry_snap:
1718 if (direct_lock)
1719 ceph_start_io_direct(inode);
1720 else
1721 ceph_start_io_write(inode);
1722
1723 /* We can write back this queue in page reclaim */
1724 current->backing_dev_info = inode_to_bdi(inode);
1725
1726 if (iocb->ki_flags & IOCB_APPEND) {
1727 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1728 if (err < 0)
1729 goto out;
1730 }
1731
1732 err = generic_write_checks(iocb, from);
1733 if (err <= 0)
1734 goto out;
1735
1736 pos = iocb->ki_pos;
1737 if (unlikely(pos >= limit)) {
1738 err = -EFBIG;
1739 goto out;
1740 } else {
1741 iov_iter_truncate(from, limit - pos);
1742 }
1743
1744 count = iov_iter_count(from);
1745 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1746 err = -EDQUOT;
1747 goto out;
1748 }
1749
1750 err = file_remove_privs(file);
1751 if (err)
1752 goto out;
1753
1754 err = file_update_time(file);
1755 if (err)
1756 goto out;
1757
1758 inode_inc_iversion_raw(inode);
1759
1760 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1761 err = ceph_uninline_data(file, NULL);
1762 if (err < 0)
1763 goto out;
1764 }
1765
1766 down_read(&osdc->lock);
1767 map_flags = osdc->osdmap->flags;
1768 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1769 up_read(&osdc->lock);
1770 if ((map_flags & CEPH_OSDMAP_FULL) ||
1771 (pool_flags & CEPH_POOL_FLAG_FULL)) {
1772 err = -ENOSPC;
1773 goto out;
1774 }
1775
1776 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1777 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1778 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1779 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1780 else
1781 want = CEPH_CAP_FILE_BUFFER;
1782 got = 0;
1783 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
1784 &got, NULL);
1785 if (err < 0)
1786 goto out;
1787
1788 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1789 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1790
1791 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1792 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1793 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1794 struct ceph_snap_context *snapc;
1795 struct iov_iter data;
1796
1797 spin_lock(&ci->i_ceph_lock);
1798 if (__ceph_have_pending_cap_snap(ci)) {
1799 struct ceph_cap_snap *capsnap =
1800 list_last_entry(&ci->i_cap_snaps,
1801 struct ceph_cap_snap,
1802 ci_item);
1803 snapc = ceph_get_snap_context(capsnap->context);
1804 } else {
1805 BUG_ON(!ci->i_head_snapc);
1806 snapc = ceph_get_snap_context(ci->i_head_snapc);
1807 }
1808 spin_unlock(&ci->i_ceph_lock);
1809
1810 /* we might need to revert back to that point */
1811 data = *from;
1812 if (iocb->ki_flags & IOCB_DIRECT)
1813 written = ceph_direct_read_write(iocb, &data, snapc,
1814 &prealloc_cf);
1815 else
1816 written = ceph_sync_write(iocb, &data, pos, snapc);
1817 if (direct_lock)
1818 ceph_end_io_direct(inode);
1819 else
1820 ceph_end_io_write(inode);
1821 if (written > 0)
1822 iov_iter_advance(from, written);
1823 ceph_put_snap_context(snapc);
1824 } else {
1825 /*
1826 * No need to acquire the i_truncate_mutex. Because
1827 * the MDS revokes Fwb caps before sending truncate
1828 * message to us. We can't get Fwb cap while there
1829 * are pending vmtruncate. So write and vmtruncate
1830 * can not run at the same time
1831 */
1832 written = generic_perform_write(file, from, pos);
1833 if (likely(written >= 0))
1834 iocb->ki_pos = pos + written;
1835 ceph_end_io_write(inode);
1836 }
1837
1838 if (written >= 0) {
1839 int dirty;
1840
1841 spin_lock(&ci->i_ceph_lock);
1842 ci->i_inline_version = CEPH_INLINE_NONE;
1843 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1844 &prealloc_cf);
1845 spin_unlock(&ci->i_ceph_lock);
1846 if (dirty)
1847 __mark_inode_dirty(inode, dirty);
1848 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1849 ceph_check_caps(ci, 0, NULL);
1850 }
1851
1852 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1853 inode, ceph_vinop(inode), pos, (unsigned)count,
1854 ceph_cap_string(got));
1855 ceph_put_cap_refs(ci, got);
1856
1857 if (written == -EOLDSNAPC) {
1858 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1859 inode, ceph_vinop(inode), pos, (unsigned)count);
1860 goto retry_snap;
1861 }
1862
1863 if (written >= 0) {
1864 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1865 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1866 iocb->ki_flags |= IOCB_DSYNC;
1867 written = generic_write_sync(iocb, written);
1868 }
1869
1870 goto out_unlocked;
1871out:
1872 if (direct_lock)
1873 ceph_end_io_direct(inode);
1874 else
1875 ceph_end_io_write(inode);
1876out_unlocked:
1877 ceph_free_cap_flush(prealloc_cf);
1878 current->backing_dev_info = NULL;
1879 return written ? written : err;
1880}
1881
1882/*
1883 * llseek. be sure to verify file size on SEEK_END.
1884 */
1885static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1886{
1887 struct inode *inode = file->f_mapping->host;
1888 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1889 loff_t i_size;
1890 loff_t ret;
1891
1892 inode_lock(inode);
1893
1894 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1895 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1896 if (ret < 0)
1897 goto out;
1898 }
1899
1900 i_size = i_size_read(inode);
1901 switch (whence) {
1902 case SEEK_END:
1903 offset += i_size;
1904 break;
1905 case SEEK_CUR:
1906 /*
1907 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1908 * position-querying operation. Avoid rewriting the "same"
1909 * f_pos value back to the file because a concurrent read(),
1910 * write() or lseek() might have altered it
1911 */
1912 if (offset == 0) {
1913 ret = file->f_pos;
1914 goto out;
1915 }
1916 offset += file->f_pos;
1917 break;
1918 case SEEK_DATA:
1919 if (offset < 0 || offset >= i_size) {
1920 ret = -ENXIO;
1921 goto out;
1922 }
1923 break;
1924 case SEEK_HOLE:
1925 if (offset < 0 || offset >= i_size) {
1926 ret = -ENXIO;
1927 goto out;
1928 }
1929 offset = i_size;
1930 break;
1931 }
1932
1933 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1934
1935out:
1936 inode_unlock(inode);
1937 return ret;
1938}
1939
1940static inline void ceph_zero_partial_page(
1941 struct inode *inode, loff_t offset, unsigned size)
1942{
1943 struct page *page;
1944 pgoff_t index = offset >> PAGE_SHIFT;
1945
1946 page = find_lock_page(inode->i_mapping, index);
1947 if (page) {
1948 wait_on_page_writeback(page);
1949 zero_user(page, offset & (PAGE_SIZE - 1), size);
1950 unlock_page(page);
1951 put_page(page);
1952 }
1953}
1954
1955static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1956 loff_t length)
1957{
1958 loff_t nearly = round_up(offset, PAGE_SIZE);
1959 if (offset < nearly) {
1960 loff_t size = nearly - offset;
1961 if (length < size)
1962 size = length;
1963 ceph_zero_partial_page(inode, offset, size);
1964 offset += size;
1965 length -= size;
1966 }
1967 if (length >= PAGE_SIZE) {
1968 loff_t size = round_down(length, PAGE_SIZE);
1969 truncate_pagecache_range(inode, offset, offset + size - 1);
1970 offset += size;
1971 length -= size;
1972 }
1973 if (length)
1974 ceph_zero_partial_page(inode, offset, length);
1975}
1976
1977static int ceph_zero_partial_object(struct inode *inode,
1978 loff_t offset, loff_t *length)
1979{
1980 struct ceph_inode_info *ci = ceph_inode(inode);
1981 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1982 struct ceph_osd_request *req;
1983 int ret = 0;
1984 loff_t zero = 0;
1985 int op;
1986
1987 if (!length) {
1988 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1989 length = &zero;
1990 } else {
1991 op = CEPH_OSD_OP_ZERO;
1992 }
1993
1994 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1995 ceph_vino(inode),
1996 offset, length,
1997 0, 1, op,
1998 CEPH_OSD_FLAG_WRITE,
1999 NULL, 0, 0, false);
2000 if (IS_ERR(req)) {
2001 ret = PTR_ERR(req);
2002 goto out;
2003 }
2004
2005 req->r_mtime = inode->i_mtime;
2006 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
2007 if (!ret) {
2008 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2009 if (ret == -ENOENT)
2010 ret = 0;
2011 }
2012 ceph_osdc_put_request(req);
2013
2014out:
2015 return ret;
2016}
2017
2018static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2019{
2020 int ret = 0;
2021 struct ceph_inode_info *ci = ceph_inode(inode);
2022 s32 stripe_unit = ci->i_layout.stripe_unit;
2023 s32 stripe_count = ci->i_layout.stripe_count;
2024 s32 object_size = ci->i_layout.object_size;
2025 u64 object_set_size = object_size * stripe_count;
2026 u64 nearly, t;
2027
2028 /* round offset up to next period boundary */
2029 nearly = offset + object_set_size - 1;
2030 t = nearly;
2031 nearly -= do_div(t, object_set_size);
2032
2033 while (length && offset < nearly) {
2034 loff_t size = length;
2035 ret = ceph_zero_partial_object(inode, offset, &size);
2036 if (ret < 0)
2037 return ret;
2038 offset += size;
2039 length -= size;
2040 }
2041 while (length >= object_set_size) {
2042 int i;
2043 loff_t pos = offset;
2044 for (i = 0; i < stripe_count; ++i) {
2045 ret = ceph_zero_partial_object(inode, pos, NULL);
2046 if (ret < 0)
2047 return ret;
2048 pos += stripe_unit;
2049 }
2050 offset += object_set_size;
2051 length -= object_set_size;
2052 }
2053 while (length) {
2054 loff_t size = length;
2055 ret = ceph_zero_partial_object(inode, offset, &size);
2056 if (ret < 0)
2057 return ret;
2058 offset += size;
2059 length -= size;
2060 }
2061 return ret;
2062}
2063
2064static long ceph_fallocate(struct file *file, int mode,
2065 loff_t offset, loff_t length)
2066{
2067 struct ceph_file_info *fi = file->private_data;
2068 struct inode *inode = file_inode(file);
2069 struct ceph_inode_info *ci = ceph_inode(inode);
2070 struct ceph_cap_flush *prealloc_cf;
2071 int want, got = 0;
2072 int dirty;
2073 int ret = 0;
2074 loff_t endoff = 0;
2075 loff_t size;
2076
2077 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2078 return -EOPNOTSUPP;
2079
2080 if (!S_ISREG(inode->i_mode))
2081 return -EOPNOTSUPP;
2082
2083 prealloc_cf = ceph_alloc_cap_flush();
2084 if (!prealloc_cf)
2085 return -ENOMEM;
2086
2087 inode_lock(inode);
2088
2089 if (ceph_snap(inode) != CEPH_NOSNAP) {
2090 ret = -EROFS;
2091 goto unlock;
2092 }
2093
2094 if (ci->i_inline_version != CEPH_INLINE_NONE) {
2095 ret = ceph_uninline_data(file, NULL);
2096 if (ret < 0)
2097 goto unlock;
2098 }
2099
2100 size = i_size_read(inode);
2101
2102 /* Are we punching a hole beyond EOF? */
2103 if (offset >= size)
2104 goto unlock;
2105 if ((offset + length) > size)
2106 length = size - offset;
2107
2108 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2109 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2110 else
2111 want = CEPH_CAP_FILE_BUFFER;
2112
2113 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
2114 if (ret < 0)
2115 goto unlock;
2116
2117 ceph_zero_pagecache_range(inode, offset, length);
2118 ret = ceph_zero_objects(inode, offset, length);
2119
2120 if (!ret) {
2121 spin_lock(&ci->i_ceph_lock);
2122 ci->i_inline_version = CEPH_INLINE_NONE;
2123 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2124 &prealloc_cf);
2125 spin_unlock(&ci->i_ceph_lock);
2126 if (dirty)
2127 __mark_inode_dirty(inode, dirty);
2128 }
2129
2130 ceph_put_cap_refs(ci, got);
2131unlock:
2132 inode_unlock(inode);
2133 ceph_free_cap_flush(prealloc_cf);
2134 return ret;
2135}
2136
2137/*
2138 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2139 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2140 * this fails; zero is returned on success.
2141 */
2142static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2143 struct file *dst_filp,
2144 loff_t dst_endoff, int *dst_got)
2145{
2146 int ret = 0;
2147 bool retrying = false;
2148
2149retry_caps:
2150 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2151 dst_endoff, dst_got, NULL);
2152 if (ret < 0)
2153 return ret;
2154
2155 /*
2156 * Since we're already holding the FILE_WR capability for the dst file,
2157 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2158 * retry dance instead to try to get both capabilities.
2159 */
2160 ret = ceph_try_get_caps(file_inode(src_filp),
2161 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2162 false, src_got);
2163 if (ret <= 0) {
2164 /* Start by dropping dst_ci caps and getting src_ci caps */
2165 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2166 if (retrying) {
2167 if (!ret)
2168 /* ceph_try_get_caps masks EAGAIN */
2169 ret = -EAGAIN;
2170 return ret;
2171 }
2172 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2173 CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
2174 if (ret < 0)
2175 return ret;
2176 /*... drop src_ci caps too, and retry */
2177 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2178 retrying = true;
2179 goto retry_caps;
2180 }
2181 return ret;
2182}
2183
2184static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2185 struct ceph_inode_info *dst_ci, int dst_got)
2186{
2187 ceph_put_cap_refs(src_ci, src_got);
2188 ceph_put_cap_refs(dst_ci, dst_got);
2189}
2190
2191/*
2192 * This function does several size-related checks, returning an error if:
2193 * - source file is smaller than off+len
2194 * - destination file size is not OK (inode_newsize_ok())
2195 * - max bytes quotas is exceeded
2196 */
2197static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2198 loff_t src_off, loff_t dst_off, size_t len)
2199{
2200 loff_t size, endoff;
2201
2202 size = i_size_read(src_inode);
2203 /*
2204 * Don't copy beyond source file EOF. Instead of simply setting length
2205 * to (size - src_off), just drop to VFS default implementation, as the
2206 * local i_size may be stale due to other clients writing to the source
2207 * inode.
2208 */
2209 if (src_off + len > size) {
2210 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2211 src_off, len, size);
2212 return -EOPNOTSUPP;
2213 }
2214 size = i_size_read(dst_inode);
2215
2216 endoff = dst_off + len;
2217 if (inode_newsize_ok(dst_inode, endoff))
2218 return -EOPNOTSUPP;
2219
2220 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2221 return -EDQUOT;
2222
2223 return 0;
2224}
2225
2226static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2227 struct ceph_inode_info *dst_ci, u64 *dst_off,
2228 struct ceph_fs_client *fsc,
2229 size_t len, unsigned int flags)
2230{
2231 struct ceph_object_locator src_oloc, dst_oloc;
2232 struct ceph_object_id src_oid, dst_oid;
2233 size_t bytes = 0;
2234 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2235 u32 src_objlen, dst_objlen;
2236 u32 object_size = src_ci->i_layout.object_size;
2237 int ret;
2238
2239 src_oloc.pool = src_ci->i_layout.pool_id;
2240 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2241 dst_oloc.pool = dst_ci->i_layout.pool_id;
2242 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2243
2244 while (len >= object_size) {
2245 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2246 object_size, &src_objnum,
2247 &src_objoff, &src_objlen);
2248 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2249 object_size, &dst_objnum,
2250 &dst_objoff, &dst_objlen);
2251 ceph_oid_init(&src_oid);
2252 ceph_oid_printf(&src_oid, "%llx.%08llx",
2253 src_ci->i_vino.ino, src_objnum);
2254 ceph_oid_init(&dst_oid);
2255 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2256 dst_ci->i_vino.ino, dst_objnum);
2257 /* Do an object remote copy */
2258 ret = ceph_osdc_copy_from(&fsc->client->osdc,
2259 src_ci->i_vino.snap, 0,
2260 &src_oid, &src_oloc,
2261 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2262 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2263 &dst_oid, &dst_oloc,
2264 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2265 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
2266 dst_ci->i_truncate_seq,
2267 dst_ci->i_truncate_size,
2268 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2269 if (ret) {
2270 if (ret == -EOPNOTSUPP) {
2271 fsc->have_copy_from2 = false;
2272 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2273 }
2274 dout("ceph_osdc_copy_from returned %d\n", ret);
2275 if (!bytes)
2276 bytes = ret;
2277 goto out;
2278 }
2279 len -= object_size;
2280 bytes += object_size;
2281 *src_off += object_size;
2282 *dst_off += object_size;
2283 }
2284
2285out:
2286 ceph_oloc_destroy(&src_oloc);
2287 ceph_oloc_destroy(&dst_oloc);
2288 return bytes;
2289}
2290
2291static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2292 struct file *dst_file, loff_t dst_off,
2293 size_t len, unsigned int flags)
2294{
2295 struct inode *src_inode = file_inode(src_file);
2296 struct inode *dst_inode = file_inode(dst_file);
2297 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2298 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2299 struct ceph_cap_flush *prealloc_cf;
2300 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2301 loff_t size;
2302 ssize_t ret = -EIO, bytes;
2303 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2304 u32 src_objlen, dst_objlen;
2305 int src_got = 0, dst_got = 0, err, dirty;
2306
2307 if (src_inode->i_sb != dst_inode->i_sb) {
2308 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2309
2310 if (ceph_fsid_compare(&src_fsc->client->fsid,
2311 &dst_fsc->client->fsid)) {
2312 dout("Copying files across clusters: src: %pU dst: %pU\n",
2313 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2314 return -EXDEV;
2315 }
2316 }
2317 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2318 return -EROFS;
2319
2320 /*
2321 * Some of the checks below will return -EOPNOTSUPP, which will force a
2322 * fallback to the default VFS copy_file_range implementation. This is
2323 * desirable in several cases (for ex, the 'len' is smaller than the
2324 * size of the objects, or in cases where that would be more
2325 * efficient).
2326 */
2327
2328 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2329 return -EOPNOTSUPP;
2330
2331 if (!src_fsc->have_copy_from2)
2332 return -EOPNOTSUPP;
2333
2334 /*
2335 * Striped file layouts require that we copy partial objects, but the
2336 * OSD copy-from operation only supports full-object copies. Limit
2337 * this to non-striped file layouts for now.
2338 */
2339 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2340 (src_ci->i_layout.stripe_count != 1) ||
2341 (dst_ci->i_layout.stripe_count != 1) ||
2342 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2343 dout("Invalid src/dst files layout\n");
2344 return -EOPNOTSUPP;
2345 }
2346
2347 if (len < src_ci->i_layout.object_size)
2348 return -EOPNOTSUPP; /* no remote copy will be done */
2349
2350 prealloc_cf = ceph_alloc_cap_flush();
2351 if (!prealloc_cf)
2352 return -ENOMEM;
2353
2354 /* Start by sync'ing the source and destination files */
2355 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2356 if (ret < 0) {
2357 dout("failed to write src file (%zd)\n", ret);
2358 goto out;
2359 }
2360 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2361 if (ret < 0) {
2362 dout("failed to write dst file (%zd)\n", ret);
2363 goto out;
2364 }
2365
2366 /*
2367 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2368 * clients may have dirty data in their caches. And OSDs know nothing
2369 * about caps, so they can't safely do the remote object copies.
2370 */
2371 err = get_rd_wr_caps(src_file, &src_got,
2372 dst_file, (dst_off + len), &dst_got);
2373 if (err < 0) {
2374 dout("get_rd_wr_caps returned %d\n", err);
2375 ret = -EOPNOTSUPP;
2376 goto out;
2377 }
2378
2379 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2380 if (ret < 0)
2381 goto out_caps;
2382
2383 /* Drop dst file cached pages */
2384 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2385 dst_off >> PAGE_SHIFT,
2386 (dst_off + len) >> PAGE_SHIFT);
2387 if (ret < 0) {
2388 dout("Failed to invalidate inode pages (%zd)\n", ret);
2389 ret = 0; /* XXX */
2390 }
2391 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2392 src_ci->i_layout.object_size,
2393 &src_objnum, &src_objoff, &src_objlen);
2394 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2395 dst_ci->i_layout.object_size,
2396 &dst_objnum, &dst_objoff, &dst_objlen);
2397 /* object-level offsets need to the same */
2398 if (src_objoff != dst_objoff) {
2399 ret = -EOPNOTSUPP;
2400 goto out_caps;
2401 }
2402
2403 /*
2404 * Do a manual copy if the object offset isn't object aligned.
2405 * 'src_objlen' contains the bytes left until the end of the object,
2406 * starting at the src_off
2407 */
2408 if (src_objoff) {
2409 dout("Initial partial copy of %u bytes\n", src_objlen);
2410
2411 /*
2412 * we need to temporarily drop all caps as we'll be calling
2413 * {read,write}_iter, which will get caps again.
2414 */
2415 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2416 ret = do_splice_direct(src_file, &src_off, dst_file,
2417 &dst_off, src_objlen, flags);
2418 /* Abort on short copies or on error */
2419 if (ret < src_objlen) {
2420 dout("Failed partial copy (%zd)\n", ret);
2421 goto out;
2422 }
2423 len -= ret;
2424 err = get_rd_wr_caps(src_file, &src_got,
2425 dst_file, (dst_off + len), &dst_got);
2426 if (err < 0)
2427 goto out;
2428 err = is_file_size_ok(src_inode, dst_inode,
2429 src_off, dst_off, len);
2430 if (err < 0)
2431 goto out_caps;
2432 }
2433
2434 size = i_size_read(dst_inode);
2435 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2436 src_fsc, len, flags);
2437 if (bytes <= 0) {
2438 if (!ret)
2439 ret = bytes;
2440 goto out_caps;
2441 }
2442 dout("Copied %zu bytes out of %zu\n", bytes, len);
2443 len -= bytes;
2444 ret += bytes;
2445
2446 file_update_time(dst_file);
2447 inode_inc_iversion_raw(dst_inode);
2448
2449 if (dst_off > size) {
2450 /* Let the MDS know about dst file size change */
2451 if (ceph_inode_set_size(dst_inode, dst_off) ||
2452 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2453 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2454 }
2455 /* Mark Fw dirty */
2456 spin_lock(&dst_ci->i_ceph_lock);
2457 dst_ci->i_inline_version = CEPH_INLINE_NONE;
2458 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2459 spin_unlock(&dst_ci->i_ceph_lock);
2460 if (dirty)
2461 __mark_inode_dirty(dst_inode, dirty);
2462
2463out_caps:
2464 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2465
2466 /*
2467 * Do the final manual copy if we still have some bytes left, unless
2468 * there were errors in remote object copies (len >= object_size).
2469 */
2470 if (len && (len < src_ci->i_layout.object_size)) {
2471 dout("Final partial copy of %zu bytes\n", len);
2472 bytes = do_splice_direct(src_file, &src_off, dst_file,
2473 &dst_off, len, flags);
2474 if (bytes > 0)
2475 ret += bytes;
2476 else
2477 dout("Failed partial copy (%zd)\n", bytes);
2478 }
2479
2480out:
2481 ceph_free_cap_flush(prealloc_cf);
2482
2483 return ret;
2484}
2485
2486static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2487 struct file *dst_file, loff_t dst_off,
2488 size_t len, unsigned int flags)
2489{
2490 ssize_t ret;
2491
2492 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2493 len, flags);
2494
2495 if (ret == -EOPNOTSUPP || ret == -EXDEV)
2496 ret = generic_copy_file_range(src_file, src_off, dst_file,
2497 dst_off, len, flags);
2498 return ret;
2499}
2500
2501const struct file_operations ceph_file_fops = {
2502 .open = ceph_open,
2503 .release = ceph_release,
2504 .llseek = ceph_llseek,
2505 .read_iter = ceph_read_iter,
2506 .write_iter = ceph_write_iter,
2507 .mmap = ceph_mmap,
2508 .fsync = ceph_fsync,
2509 .lock = ceph_lock,
2510 .setlease = simple_nosetlease,
2511 .flock = ceph_flock,
2512 .splice_read = generic_file_splice_read,
2513 .splice_write = iter_file_splice_write,
2514 .unlocked_ioctl = ceph_ioctl,
2515 .compat_ioctl = compat_ptr_ioctl,
2516 .fallocate = ceph_fallocate,
2517 .copy_file_range = ceph_copy_file_range,
2518};