Linux Audio

Check our new training course

Loading...
v3.1
 
  1#include <linux/ceph/ceph_debug.h>
 
  2
  3#include <linux/module.h>
  4#include <linux/sched.h>
  5#include <linux/slab.h>
  6#include <linux/file.h>
 
  7#include <linux/namei.h>
  8#include <linux/writeback.h>
 
 
  9
 10#include "super.h"
 11#include "mds_client.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12
 13/*
 14 * Ceph file operations
 15 *
 16 * Implement basic open/close functionality, and implement
 17 * read/write.
 18 *
 19 * We implement three modes of file I/O:
 20 *  - buffered uses the generic_file_aio_{read,write} helpers
 21 *
 22 *  - synchronous is used when there is multi-client read/write
 23 *    sharing, avoids the page cache, and synchronously waits for an
 24 *    ack from the OSD.
 25 *
 26 *  - direct io takes the variant of the sync path that references
 27 *    user pages directly.
 28 *
 29 * fsync() flushes and waits on dirty pages, but just queues metadata
 30 * for writeback: since the MDS can recover size and mtime there is no
 31 * need to wait for MDS acknowledgement.
 32 */
 33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35/*
 36 * Prepare an open request.  Preallocate ceph_cap to avoid an
 37 * inopportune ENOMEM later.
 38 */
 39static struct ceph_mds_request *
 40prepare_open_request(struct super_block *sb, int flags, int create_mode)
 41{
 42	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 43	struct ceph_mds_client *mdsc = fsc->mdsc;
 44	struct ceph_mds_request *req;
 45	int want_auth = USE_ANY_MDS;
 46	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 47
 48	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 49		want_auth = USE_AUTH_MDS;
 50
 51	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 52	if (IS_ERR(req))
 53		goto out;
 54	req->r_fmode = ceph_flags_to_mode(flags);
 55	req->r_args.open.flags = cpu_to_le32(flags);
 56	req->r_args.open.mode = cpu_to_le32(create_mode);
 57	req->r_args.open.preferred = cpu_to_le32(-1);
 58out:
 59	return req;
 60}
 61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62/*
 63 * initialize private struct file data.
 64 * if we fail, clean up by dropping fmode reference on the ceph_inode
 65 */
 66static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 67{
 68	struct ceph_file_info *cf;
 69	int ret = 0;
 70
 71	switch (inode->i_mode & S_IFMT) {
 72	case S_IFREG:
 
 
 
 73	case S_IFDIR:
 74		dout("init_file %p %p 0%o (regular)\n", inode, file,
 75		     inode->i_mode);
 76		cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
 77		if (cf == NULL) {
 78			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 79			return -ENOMEM;
 80		}
 81		cf->fmode = fmode;
 82		cf->next_offset = 2;
 83		file->private_data = cf;
 84		BUG_ON(inode->i_fop->release != ceph_release);
 85		break;
 86
 87	case S_IFLNK:
 88		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 89		     inode->i_mode);
 90		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 91		break;
 92
 93	default:
 94		dout("init_file %p %p 0%o (special)\n", inode, file,
 95		     inode->i_mode);
 96		/*
 97		 * we need to drop the open ref now, since we don't
 98		 * have .release set to ceph_release.
 99		 */
100		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
101		BUG_ON(inode->i_fop->release == ceph_release);
102
103		/* call the proper open fop */
104		ret = inode->i_fop->open(inode, file);
105	}
106	return ret;
107}
108
109/*
110 * If the filp already has private_data, that means the file was
111 * already opened by intent during lookup, and we do nothing.
112 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113 * If we already have the requisite capabilities, we can satisfy
114 * the open request locally (no need to request new caps from the
115 * MDS).  We do, however, need to inform the MDS (asynchronously)
116 * if our wanted caps set expands.
117 */
118int ceph_open(struct inode *inode, struct file *file)
119{
120	struct ceph_inode_info *ci = ceph_inode(inode);
121	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
122	struct ceph_mds_client *mdsc = fsc->mdsc;
123	struct ceph_mds_request *req;
124	struct ceph_file_info *cf = file->private_data;
125	struct inode *parent_inode = NULL;
126	int err;
127	int flags, fmode, wanted;
128
129	if (cf) {
130		dout("open file %p is already opened\n", file);
131		return 0;
132	}
133
134	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
135	flags = file->f_flags & ~(O_CREAT|O_EXCL);
136	if (S_ISDIR(inode->i_mode))
137		flags = O_DIRECTORY;  /* mds likes to know */
138
139	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
140	     ceph_vinop(inode), file, flags, file->f_flags);
141	fmode = ceph_flags_to_mode(flags);
142	wanted = ceph_caps_for_mode(fmode);
143
144	/* snapped files are read-only */
145	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
146		return -EROFS;
147
148	/* trivially open snapdir */
149	if (ceph_snap(inode) == CEPH_SNAPDIR) {
150		spin_lock(&inode->i_lock);
151		__ceph_get_fmode(ci, fmode);
152		spin_unlock(&inode->i_lock);
153		return ceph_init_file(inode, file, fmode);
154	}
155
156	/*
157	 * No need to block if we have caps on the auth MDS (for
158	 * write) or any MDS (for read).  Update wanted set
159	 * asynchronously.
160	 */
161	spin_lock(&inode->i_lock);
162	if (__ceph_is_any_real_caps(ci) &&
163	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
164		int mds_wanted = __ceph_caps_mds_wanted(ci);
165		int issued = __ceph_caps_issued(ci, NULL);
166
167		dout("open %p fmode %d want %s issued %s using existing\n",
168		     inode, fmode, ceph_cap_string(wanted),
169		     ceph_cap_string(issued));
170		__ceph_get_fmode(ci, fmode);
171		spin_unlock(&inode->i_lock);
172
173		/* adjust wanted? */
174		if ((issued & wanted) != wanted &&
175		    (mds_wanted & wanted) != wanted &&
176		    ceph_snap(inode) != CEPH_SNAPDIR)
177			ceph_check_caps(ci, 0, NULL);
178
179		return ceph_init_file(inode, file, fmode);
180	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
181		   (ci->i_snap_caps & wanted) == wanted) {
182		__ceph_get_fmode(ci, fmode);
183		spin_unlock(&inode->i_lock);
184		return ceph_init_file(inode, file, fmode);
185	}
186	spin_unlock(&inode->i_lock);
 
187
188	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
189	req = prepare_open_request(inode->i_sb, flags, 0);
190	if (IS_ERR(req)) {
191		err = PTR_ERR(req);
192		goto out;
193	}
194	req->r_inode = inode;
195	ihold(inode);
 
196	req->r_num_caps = 1;
197	if (flags & (O_CREAT|O_TRUNC))
198		parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
199	err = ceph_mdsc_do_request(mdsc, parent_inode, req);
200	iput(parent_inode);
201	if (!err)
202		err = ceph_init_file(inode, file, req->r_fmode);
203	ceph_mdsc_put_request(req);
204	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
205out:
206	return err;
207}
208
209
210/*
211 * Do a lookup + open with a single request.
212 *
213 * If this succeeds, but some subsequent check in the vfs
214 * may_open() fails, the struct *file gets cleaned up (i.e.
215 * ceph_release gets called).  So fear not!
216 */
217/*
218 * flags
219 *  path_lookup_open   -> LOOKUP_OPEN
220 *  path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
221 */
222struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
223				struct nameidata *nd, int mode,
224				int locked_dir)
225{
226	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
227	struct ceph_mds_client *mdsc = fsc->mdsc;
228	struct file *file;
229	struct ceph_mds_request *req;
230	struct dentry *ret;
 
 
231	int err;
232	int flags = nd->intent.open.flags;
233
234	dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
235	     dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
237	/* do the open */
238	req = prepare_open_request(dir->i_sb, flags, mode);
239	if (IS_ERR(req))
240		return ERR_CAST(req);
 
 
241	req->r_dentry = dget(dentry);
242	req->r_num_caps = 2;
243	if (flags & O_CREAT) {
244		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
245		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 
 
 
 
246	}
247	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
 
 
 
 
 
 
 
248	err = ceph_mdsc_do_request(mdsc,
249				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
250				   req);
251	err = ceph_handle_snapdir(req, dentry, err);
252	if (err)
253		goto out;
 
254	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
255		err = ceph_handle_notrace_create(dir, dentry);
 
 
 
 
 
 
 
 
 
256	if (err)
257		goto out;
258	file = lookup_instantiate_filp(nd, req->r_dentry, ceph_open);
259	if (IS_ERR(file))
260		err = PTR_ERR(file);
261out:
262	ret = ceph_finish_lookup(req, dentry, err);
 
 
 
 
 
 
 
 
 
 
263	ceph_mdsc_put_request(req);
264	dout("ceph_lookup_open result=%p\n", ret);
265	return ret;
 
 
266}
267
268int ceph_release(struct inode *inode, struct file *file)
269{
270	struct ceph_inode_info *ci = ceph_inode(inode);
271	struct ceph_file_info *cf = file->private_data;
272
273	dout("release inode %p file %p\n", inode, file);
274	ceph_put_fmode(ci, cf->fmode);
275	if (cf->last_readdir)
276		ceph_mdsc_put_request(cf->last_readdir);
277	kfree(cf->last_name);
278	kfree(cf->dir_info);
279	dput(cf->dentry);
280	kmem_cache_free(ceph_file_cachep, cf);
 
 
 
 
 
 
 
 
 
 
 
 
281
282	/* wake up anyone waiting for caps on this inode */
283	wake_up_all(&ci->i_cap_wq);
284	return 0;
285}
286
 
 
 
 
 
 
287/*
288 * Read a range of bytes striped over one or more objects.  Iterate over
289 * objects we stripe over.  (That's not atomic, but good enough for now.)
 
 
 
290 *
291 * If we get a short result from the OSD, check against i_size; we need to
292 * only return a short read to the caller if we hit EOF.
293 */
294static int striped_read(struct inode *inode,
295			u64 off, u64 len,
296			struct page **pages, int num_pages,
297			int *checkeof, bool o_direct,
298			unsigned long buf_align)
299{
300	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
301	struct ceph_inode_info *ci = ceph_inode(inode);
302	u64 pos, this_len;
303	int io_align, page_align;
304	int left, pages_left;
305	int read;
306	struct page **page_pos;
307	int ret;
308	bool hit_stripe, was_short;
309
 
 
 
 
 
310	/*
311	 * we may need to do multiple reads.  not atomic, unfortunately.
 
 
 
312	 */
313	pos = off;
314	left = len;
315	page_pos = pages;
316	pages_left = num_pages;
317	read = 0;
318	io_align = off & ~PAGE_MASK;
319
320more:
321	if (o_direct)
322		page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
323	else
324		page_align = pos & ~PAGE_MASK;
325	this_len = left;
326	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
327				  &ci->i_layout, pos, &this_len,
328				  ci->i_truncate_seq,
329				  ci->i_truncate_size,
330				  page_pos, pages_left, page_align);
331	if (ret == -ENOENT)
332		ret = 0;
333	hit_stripe = this_len < left;
334	was_short = ret >= 0 && ret < this_len;
335	dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
336	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
337
338	if (ret > 0) {
339		int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
340
341		if (read < pos - off) {
342			dout(" zero gap %llu to %llu\n", off + read, pos);
343			ceph_zero_page_vector_range(page_align + read,
344						    pos - off - read, pages);
345		}
346		pos += ret;
347		read = pos - off;
348		left -= ret;
349		page_pos += didpages;
350		pages_left -= didpages;
351
352		/* hit stripe? */
353		if (left && hit_stripe)
354			goto more;
355	}
356
357	if (was_short) {
358		/* did we bounce off eof? */
359		if (pos + left > inode->i_size)
360			*checkeof = 1;
361
362		/* zero trailing bytes (inside i_size) */
363		if (left > 0 && pos < inode->i_size) {
364			if (pos + left > inode->i_size)
365				left = inode->i_size - pos;
366
367			dout("zero tail %d\n", left);
368			ceph_zero_page_vector_range(page_align + read, left,
369						    pages);
370			read += left;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371		}
 
 
 
372	}
373
374	if (ret >= 0)
375		ret = read;
376	dout("striped_read returns %d\n", ret);
 
 
 
 
 
 
377	return ret;
378}
379
380/*
381 * Completely synchronous read and write methods.  Direct from __user
382 * buffer to osd, or directly to user pages (if O_DIRECT).
383 *
384 * If the read spans object boundary, just do multiple reads.
385 */
386static ssize_t ceph_sync_read(struct file *file, char __user *data,
387			      unsigned len, loff_t *poff, int *checkeof)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388{
389	struct inode *inode = file->f_dentry->d_inode;
390	struct page **pages;
391	u64 off = *poff;
392	int num_pages, ret;
393
394	dout("sync_read on file %p %llu~%u %s\n", file, off, len,
395	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 
 
 
 
 
 
 
396
397	if (file->f_flags & O_DIRECT) {
398		num_pages = calc_pages_for((unsigned long)data, len);
399		pages = ceph_get_direct_page_vector(data, num_pages, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400	} else {
401		num_pages = calc_pages_for(off, len);
402		pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
403	}
404	if (IS_ERR(pages))
405		return PTR_ERR(pages);
406
407	/*
408	 * flush any page cache pages in this range.  this
409	 * will make concurrent normal and sync io slow,
410	 * but it will at least behave sensibly when they are
411	 * in sequence.
412	 */
413	ret = filemap_write_and_wait(inode->i_mapping);
414	if (ret < 0)
415		goto done;
416
417	ret = striped_read(inode, off, len, pages, num_pages, checkeof,
418			   file->f_flags & O_DIRECT,
419			   (unsigned long)data & ~PAGE_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
420
421	if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
422		ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
423	if (ret >= 0)
424		*poff = off + ret;
425
426done:
427	if (file->f_flags & O_DIRECT)
428		ceph_put_page_vector(pages, num_pages, true);
429	else
430		ceph_release_page_vector(pages, num_pages);
431	dout("sync_read result %d\n", ret);
432	return ret;
 
 
 
 
 
 
433}
434
435/*
436 * Write commit callback, called if we requested both an ACK and
437 * ONDISK commit reply from the OSD.
438 */
439static void sync_write_commit(struct ceph_osd_request *req,
440			      struct ceph_msg *msg)
441{
442	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
444	dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
445	spin_lock(&ci->i_unsafe_lock);
446	list_del_init(&req->r_unsafe_item);
447	spin_unlock(&ci->i_unsafe_lock);
448	ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449}
450
451/*
452 * Synchronous write, straight from __user pointer or user pages (if
453 * O_DIRECT).
454 *
455 * If write spans object boundary, just do multiple writes.  (For a
456 * correct atomic write, we should e.g. take write locks on all
457 * objects, rollback on failure, etc.)
458 */
459static ssize_t ceph_sync_write(struct file *file, const char __user *data,
460			       size_t left, loff_t *offset)
 
461{
462	struct inode *inode = file->f_dentry->d_inode;
 
463	struct ceph_inode_info *ci = ceph_inode(inode);
464	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
465	struct ceph_osd_request *req;
466	struct page **pages;
467	int num_pages;
468	long long unsigned pos;
469	u64 len;
 
470	int written = 0;
471	int flags;
472	int do_sync = 0;
473	int check_caps = 0;
474	int page_align, io_align;
475	unsigned long buf_align;
476	int ret;
477	struct timespec mtime = CURRENT_TIME;
 
 
478
479	if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
480		return -EROFS;
481
482	dout("sync_write on file %p %lld~%u %s\n", file, *offset,
483	     (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
484
485	if (file->f_flags & O_APPEND)
486		pos = i_size_read(inode);
487	else
488		pos = *offset;
489
490	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
 
491	if (ret < 0)
492		return ret;
493
494	ret = invalidate_inode_pages2_range(inode->i_mapping,
495					    pos >> PAGE_CACHE_SHIFT,
496					    (pos + left) >> PAGE_CACHE_SHIFT);
497	if (ret < 0)
498		dout("invalidate_inode_pages2_range returned %d\n", ret);
499
500	flags = CEPH_OSD_FLAG_ORDERSNAP |
501		CEPH_OSD_FLAG_ONDISK |
502		CEPH_OSD_FLAG_WRITE;
503	if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
504		flags |= CEPH_OSD_FLAG_ACK;
505	else
506		do_sync = 1;
507
508	/*
509	 * we may need to do multiple writes here if we span an object
510	 * boundary.  this isn't atomic, unfortunately.  :(
511	 */
512more:
513	io_align = pos & ~PAGE_MASK;
514	buf_align = (unsigned long)data & ~PAGE_MASK;
515	len = left;
516	if (file->f_flags & O_DIRECT) {
517		/* write from beginning of first page, regardless of
518		   io alignment */
519		page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
520		num_pages = calc_pages_for((unsigned long)data, len);
521	} else {
522		page_align = pos & ~PAGE_MASK;
523		num_pages = calc_pages_for(pos, len);
524	}
525	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
526				    ceph_vino(inode), pos, &len,
527				    CEPH_OSD_OP_WRITE, flags,
528				    ci->i_snap_realm->cached_context,
529				    do_sync,
530				    ci->i_truncate_seq, ci->i_truncate_size,
531				    &mtime, false, 2, page_align);
532	if (!req)
533		return -ENOMEM;
534
535	if (file->f_flags & O_DIRECT) {
536		pages = ceph_get_direct_page_vector(data, num_pages, false);
537		if (IS_ERR(pages)) {
538			ret = PTR_ERR(pages);
539			goto out;
540		}
541
542		/*
543		 * throw out any page cache pages in this range. this
544		 * may block.
545		 */
546		truncate_inode_pages_range(inode->i_mapping, pos,
547					   (pos+len) | (PAGE_CACHE_SIZE-1));
548	} else {
549		pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
550		if (IS_ERR(pages)) {
551			ret = PTR_ERR(pages);
552			goto out;
553		}
554		ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
 
 
 
 
 
 
 
 
 
 
 
555		if (ret < 0) {
556			ceph_release_page_vector(pages, num_pages);
557			goto out;
558		}
559
560		if ((file->f_flags & O_SYNC) == 0) {
561			/* get a second commit callback */
562			req->r_safe_callback = sync_write_commit;
563			req->r_own_pages = 1;
564		}
565	}
566	req->r_pages = pages;
567	req->r_num_pages = num_pages;
568	req->r_inode = inode;
569
570	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
571	if (!ret) {
572		if (req->r_safe_callback) {
573			/*
574			 * Add to inode unsafe list only after we
575			 * start_request so that a tid has been assigned.
576			 */
577			spin_lock(&ci->i_unsafe_lock);
578			list_add_tail(&req->r_unsafe_item,
579				      &ci->i_unsafe_writes);
580			spin_unlock(&ci->i_unsafe_lock);
581			ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
582		}
583		
584		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
585		if (ret < 0 && req->r_safe_callback) {
586			spin_lock(&ci->i_unsafe_lock);
587			list_del_init(&req->r_unsafe_item);
588			spin_unlock(&ci->i_unsafe_lock);
589			ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
590		}
591	}
592
593	if (file->f_flags & O_DIRECT)
594		ceph_put_page_vector(pages, num_pages, false);
595	else if (file->f_flags & O_SYNC)
596		ceph_release_page_vector(pages, num_pages);
597
598out:
599	ceph_osdc_put_request(req);
600	if (ret == 0) {
 
 
 
 
 
601		pos += len;
602		written += len;
603		left -= len;
604		data += written;
605		if (left)
606			goto more;
 
 
 
607
 
 
 
608		ret = written;
609		*offset = pos;
610		if (pos > i_size_read(inode))
611			check_caps = ceph_inode_set_size(inode, pos);
612		if (check_caps)
613			ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
614					NULL);
615	}
616	return ret;
617}
618
619/*
620 * Wrap generic_file_aio_read with checks for cap bits on the inode.
621 * Atomically grab references, so that those bits are not released
622 * back to the MDS mid-read.
623 *
624 * Hmm, the sync read case isn't actually async... should it be?
625 */
626static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
627			     unsigned long nr_segs, loff_t pos)
628{
629	struct file *filp = iocb->ki_filp;
630	struct ceph_file_info *fi = filp->private_data;
631	loff_t *ppos = &iocb->ki_pos;
632	size_t len = iov->iov_len;
633	struct inode *inode = filp->f_dentry->d_inode;
634	struct ceph_inode_info *ci = ceph_inode(inode);
635	void __user *base = iov->iov_base;
636	ssize_t ret;
637	int want, got = 0;
638	int checkeof = 0, read = 0;
639
640	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
641	     inode, ceph_vinop(inode), pos, (unsigned)len, inode);
642again:
643	__ceph_do_pending_vmtruncate(inode);
 
 
 
 
 
 
 
644	if (fi->fmode & CEPH_FILE_MODE_LAZY)
645		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
646	else
647		want = CEPH_CAP_FILE_CACHE;
648	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
649	if (ret < 0)
650		goto out;
651	dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
652	     inode, ceph_vinop(inode), pos, (unsigned)len,
653	     ceph_cap_string(got));
 
 
 
654
655	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
656	    (iocb->ki_filp->f_flags & O_DIRECT) ||
657	    (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
658	    (fi->flags & CEPH_F_SYNC))
659		/* hmm, this isn't really async... */
660		ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
661	else
662		ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
663
664out:
665	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
666	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
 
 
 
 
667	ceph_put_cap_refs(ci, got);
668
669	if (checkeof && ret >= 0) {
670		int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
671
672		/* hit EOF or hole? */
673		if (statret == 0 && *ppos < inode->i_size) {
674			dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
 
 
 
675			read += ret;
676			base += ret;
677			len -= ret;
678			checkeof = 0;
679			goto again;
680		}
681	}
 
682	if (ret >= 0)
683		ret += read;
684
685	return ret;
686}
687
688/*
689 * Take cap references to avoid releasing caps to MDS mid-write.
690 *
691 * If we are synchronous, and write with an old snap context, the OSD
692 * may return EOLDSNAPC.  In that case, retry the write.. _after_
693 * dropping our cap refs and allowing the pending snap to logically
694 * complete _before_ this write occurs.
695 *
696 * If we are near ENOSPC, write synchronously.
697 */
698static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
699		       unsigned long nr_segs, loff_t pos)
700{
701	struct file *file = iocb->ki_filp;
702	struct ceph_file_info *fi = file->private_data;
703	struct inode *inode = file->f_dentry->d_inode;
704	struct ceph_inode_info *ci = ceph_inode(inode);
705	struct ceph_osd_client *osdc =
706		&ceph_sb_to_client(inode->i_sb)->client->osdc;
707	loff_t endoff = pos + iov->iov_len;
708	int want, got = 0;
709	int ret, err;
 
710
711	if (ceph_snap(inode) != CEPH_NOSNAP)
712		return -EROFS;
713
 
 
 
 
714retry_snap:
715	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
716		return -ENOSPC;
717	__ceph_do_pending_vmtruncate(inode);
718	dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
719	     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
720	     inode->i_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
721	if (fi->fmode & CEPH_FILE_MODE_LAZY)
722		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
723	else
724		want = CEPH_CAP_FILE_BUFFER;
725	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
726	if (ret < 0)
727		goto out_put;
 
 
728
729	dout("aio_write %p %llx.%llx %llu~%u  got cap refs on %s\n",
730	     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
731	     ceph_cap_string(got));
732
733	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
734	    (iocb->ki_filp->f_flags & O_DIRECT) ||
735	    (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
736	    (fi->flags & CEPH_F_SYNC)) {
737		ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
738			&iocb->ki_pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739	} else {
740		/*
741		 * buffered write; drop Fw early to avoid slow
742		 * revocation if we get stuck on balance_dirty_pages
 
 
 
743		 */
744		int dirty;
745
746		spin_lock(&inode->i_lock);
747		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
748		spin_unlock(&inode->i_lock);
749		ceph_put_cap_refs(ci, got);
750
751		ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
752		if ((ret >= 0 || ret == -EIOCBQUEUED) &&
753		    ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
754		     || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
755			err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
756			if (err < 0)
757				ret = err;
758		}
759
760		if (dirty)
761			__mark_inode_dirty(inode, dirty);
762		goto out;
763	}
764
765	if (ret >= 0) {
766		int dirty;
767		spin_lock(&inode->i_lock);
768		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
769		spin_unlock(&inode->i_lock);
 
 
 
770		if (dirty)
771			__mark_inode_dirty(inode, dirty);
 
 
772	}
773
774out_put:
775	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
776	     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
777	     ceph_cap_string(got));
778	ceph_put_cap_refs(ci, got);
779
780out:
781	if (ret == -EOLDSNAPC) {
782		dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
783		     inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
784		goto retry_snap;
785	}
786
787	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
788}
789
790/*
791 * llseek.  be sure to verify file size on SEEK_END.
792 */
793static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
794{
795	struct inode *inode = file->f_mapping->host;
796	int ret;
 
 
797
798	mutex_lock(&inode->i_mutex);
799	__ceph_do_pending_vmtruncate(inode);
800	if (origin != SEEK_CUR || origin != SEEK_SET) {
801		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
802		if (ret < 0) {
803			offset = ret;
804			goto out;
805		}
806	}
807
808	switch (origin) {
 
809	case SEEK_END:
810		offset += inode->i_size;
811		break;
812	case SEEK_CUR:
813		/*
814		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
815		 * position-querying operation.  Avoid rewriting the "same"
816		 * f_pos value back to the file because a concurrent read(),
817		 * write() or lseek() might have altered it
818		 */
819		if (offset == 0) {
820			offset = file->f_pos;
821			goto out;
822		}
823		offset += file->f_pos;
824		break;
825	case SEEK_DATA:
826		if (offset >= inode->i_size) {
827			ret = -ENXIO;
828			goto out;
829		}
830		break;
831	case SEEK_HOLE:
832		if (offset >= inode->i_size) {
833			ret = -ENXIO;
834			goto out;
835		}
836		offset = inode->i_size;
837		break;
838	}
839
840	if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
841		offset = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842		goto out;
843	}
844
845	/* Special lock needed here? */
846	if (offset != file->f_pos) {
847		file->f_pos = offset;
848		file->f_version = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
849	}
850
851out:
852	mutex_unlock(&inode->i_mutex);
853	return offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854}
855
856const struct file_operations ceph_file_fops = {
857	.open = ceph_open,
858	.release = ceph_release,
859	.llseek = ceph_llseek,
860	.read = do_sync_read,
861	.write = do_sync_write,
862	.aio_read = ceph_aio_read,
863	.aio_write = ceph_aio_write,
864	.mmap = ceph_mmap,
865	.fsync = ceph_fsync,
866	.lock = ceph_lock,
867	.flock = ceph_flock,
868	.splice_read = generic_file_splice_read,
869	.splice_write = generic_file_splice_write,
870	.unlocked_ioctl = ceph_ioctl,
871	.compat_ioctl	= ceph_ioctl,
 
 
872};
873
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3#include <linux/ceph/striper.h>
   4
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/file.h>
   9#include <linux/mount.h>
  10#include <linux/namei.h>
  11#include <linux/writeback.h>
  12#include <linux/falloc.h>
  13#include <linux/iversion.h>
  14
  15#include "super.h"
  16#include "mds_client.h"
  17#include "cache.h"
  18#include "io.h"
  19
  20static __le32 ceph_flags_sys2wire(u32 flags)
  21{
  22	u32 wire_flags = 0;
  23
  24	switch (flags & O_ACCMODE) {
  25	case O_RDONLY:
  26		wire_flags |= CEPH_O_RDONLY;
  27		break;
  28	case O_WRONLY:
  29		wire_flags |= CEPH_O_WRONLY;
  30		break;
  31	case O_RDWR:
  32		wire_flags |= CEPH_O_RDWR;
  33		break;
  34	}
  35
  36	flags &= ~O_ACCMODE;
  37
  38#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  39
  40	ceph_sys2wire(O_CREAT);
  41	ceph_sys2wire(O_EXCL);
  42	ceph_sys2wire(O_TRUNC);
  43	ceph_sys2wire(O_DIRECTORY);
  44	ceph_sys2wire(O_NOFOLLOW);
  45
  46#undef ceph_sys2wire
  47
  48	if (flags)
  49		dout("unused open flags: %x\n", flags);
  50
  51	return cpu_to_le32(wire_flags);
  52}
  53
  54/*
  55 * Ceph file operations
  56 *
  57 * Implement basic open/close functionality, and implement
  58 * read/write.
  59 *
  60 * We implement three modes of file I/O:
  61 *  - buffered uses the generic_file_aio_{read,write} helpers
  62 *
  63 *  - synchronous is used when there is multi-client read/write
  64 *    sharing, avoids the page cache, and synchronously waits for an
  65 *    ack from the OSD.
  66 *
  67 *  - direct io takes the variant of the sync path that references
  68 *    user pages directly.
  69 *
  70 * fsync() flushes and waits on dirty pages, but just queues metadata
  71 * for writeback: since the MDS can recover size and mtime there is no
  72 * need to wait for MDS acknowledgement.
  73 */
  74
  75/*
  76 * How many pages to get in one call to iov_iter_get_pages().  This
  77 * determines the size of the on-stack array used as a buffer.
  78 */
  79#define ITER_GET_BVECS_PAGES	64
  80
  81static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  82				struct bio_vec *bvecs)
  83{
  84	size_t size = 0;
  85	int bvec_idx = 0;
  86
  87	if (maxsize > iov_iter_count(iter))
  88		maxsize = iov_iter_count(iter);
  89
  90	while (size < maxsize) {
  91		struct page *pages[ITER_GET_BVECS_PAGES];
  92		ssize_t bytes;
  93		size_t start;
  94		int idx = 0;
  95
  96		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
  97					   ITER_GET_BVECS_PAGES, &start);
  98		if (bytes < 0)
  99			return size ?: bytes;
 100
 101		iov_iter_advance(iter, bytes);
 102		size += bytes;
 103
 104		for ( ; bytes; idx++, bvec_idx++) {
 105			struct bio_vec bv = {
 106				.bv_page = pages[idx],
 107				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
 108				.bv_offset = start,
 109			};
 110
 111			bvecs[bvec_idx] = bv;
 112			bytes -= bv.bv_len;
 113			start = 0;
 114		}
 115	}
 116
 117	return size;
 118}
 119
 120/*
 121 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 122 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 123 * page.
 124 *
 125 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 126 * Return the number of bytes in the created bio_vec array, or an error.
 127 */
 128static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 129				    struct bio_vec **bvecs, int *num_bvecs)
 130{
 131	struct bio_vec *bv;
 132	size_t orig_count = iov_iter_count(iter);
 133	ssize_t bytes;
 134	int npages;
 135
 136	iov_iter_truncate(iter, maxsize);
 137	npages = iov_iter_npages(iter, INT_MAX);
 138	iov_iter_reexpand(iter, orig_count);
 139
 140	/*
 141	 * __iter_get_bvecs() may populate only part of the array -- zero it
 142	 * out.
 143	 */
 144	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
 145	if (!bv)
 146		return -ENOMEM;
 147
 148	bytes = __iter_get_bvecs(iter, maxsize, bv);
 149	if (bytes < 0) {
 150		/*
 151		 * No pages were pinned -- just free the array.
 152		 */
 153		kvfree(bv);
 154		return bytes;
 155	}
 156
 157	*bvecs = bv;
 158	*num_bvecs = npages;
 159	return bytes;
 160}
 161
 162static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
 163{
 164	int i;
 165
 166	for (i = 0; i < num_bvecs; i++) {
 167		if (bvecs[i].bv_page) {
 168			if (should_dirty)
 169				set_page_dirty_lock(bvecs[i].bv_page);
 170			put_page(bvecs[i].bv_page);
 171		}
 172	}
 173	kvfree(bvecs);
 174}
 175
 176/*
 177 * Prepare an open request.  Preallocate ceph_cap to avoid an
 178 * inopportune ENOMEM later.
 179 */
 180static struct ceph_mds_request *
 181prepare_open_request(struct super_block *sb, int flags, int create_mode)
 182{
 183	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 184	struct ceph_mds_client *mdsc = fsc->mdsc;
 185	struct ceph_mds_request *req;
 186	int want_auth = USE_ANY_MDS;
 187	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 188
 189	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 190		want_auth = USE_AUTH_MDS;
 191
 192	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 193	if (IS_ERR(req))
 194		goto out;
 195	req->r_fmode = ceph_flags_to_mode(flags);
 196	req->r_args.open.flags = ceph_flags_sys2wire(flags);
 197	req->r_args.open.mode = cpu_to_le32(create_mode);
 
 198out:
 199	return req;
 200}
 201
 202static int ceph_init_file_info(struct inode *inode, struct file *file,
 203					int fmode, bool isdir)
 204{
 205	struct ceph_inode_info *ci = ceph_inode(inode);
 206	struct ceph_file_info *fi;
 207
 208	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
 209			inode->i_mode, isdir ? "dir" : "regular");
 210	BUG_ON(inode->i_fop->release != ceph_release);
 211
 212	if (isdir) {
 213		struct ceph_dir_file_info *dfi =
 214			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 215		if (!dfi) {
 216			ceph_put_fmode(ci, fmode); /* clean up */
 217			return -ENOMEM;
 218		}
 219
 220		file->private_data = dfi;
 221		fi = &dfi->file_info;
 222		dfi->next_offset = 2;
 223		dfi->readdir_cache_idx = -1;
 224	} else {
 225		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 226		if (!fi) {
 227			ceph_put_fmode(ci, fmode); /* clean up */
 228			return -ENOMEM;
 229		}
 230
 231		file->private_data = fi;
 232	}
 233
 234	fi->fmode = fmode;
 235	spin_lock_init(&fi->rw_contexts_lock);
 236	INIT_LIST_HEAD(&fi->rw_contexts);
 237	fi->meta_err = errseq_sample(&ci->i_meta_err);
 238	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
 239
 240	return 0;
 241}
 242
 243/*
 244 * initialize private struct file data.
 245 * if we fail, clean up by dropping fmode reference on the ceph_inode
 246 */
 247static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 248{
 
 249	int ret = 0;
 250
 251	switch (inode->i_mode & S_IFMT) {
 252	case S_IFREG:
 253		ceph_fscache_register_inode_cookie(inode);
 254		ceph_fscache_file_set_cookie(inode, file);
 255		/* fall through */
 256	case S_IFDIR:
 257		ret = ceph_init_file_info(inode, file, fmode,
 258						S_ISDIR(inode->i_mode));
 259		if (ret)
 260			return ret;
 
 
 
 
 
 
 
 261		break;
 262
 263	case S_IFLNK:
 264		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 265		     inode->i_mode);
 266		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 267		break;
 268
 269	default:
 270		dout("init_file %p %p 0%o (special)\n", inode, file,
 271		     inode->i_mode);
 272		/*
 273		 * we need to drop the open ref now, since we don't
 274		 * have .release set to ceph_release.
 275		 */
 276		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 277		BUG_ON(inode->i_fop->release == ceph_release);
 278
 279		/* call the proper open fop */
 280		ret = inode->i_fop->open(inode, file);
 281	}
 282	return ret;
 283}
 284
 285/*
 286 * try renew caps after session gets killed.
 287 */
 288int ceph_renew_caps(struct inode *inode)
 289{
 290	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 291	struct ceph_inode_info *ci = ceph_inode(inode);
 292	struct ceph_mds_request *req;
 293	int err, flags, wanted;
 294
 295	spin_lock(&ci->i_ceph_lock);
 296	wanted = __ceph_caps_file_wanted(ci);
 297	if (__ceph_is_any_real_caps(ci) &&
 298	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
 299		int issued = __ceph_caps_issued(ci, NULL);
 300		spin_unlock(&ci->i_ceph_lock);
 301		dout("renew caps %p want %s issued %s updating mds_wanted\n",
 302		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 303		ceph_check_caps(ci, 0, NULL);
 304		return 0;
 305	}
 306	spin_unlock(&ci->i_ceph_lock);
 307
 308	flags = 0;
 309	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 310		flags = O_RDWR;
 311	else if (wanted & CEPH_CAP_FILE_RD)
 312		flags = O_RDONLY;
 313	else if (wanted & CEPH_CAP_FILE_WR)
 314		flags = O_WRONLY;
 315#ifdef O_LAZY
 316	if (wanted & CEPH_CAP_FILE_LAZYIO)
 317		flags |= O_LAZY;
 318#endif
 319
 320	req = prepare_open_request(inode->i_sb, flags, 0);
 321	if (IS_ERR(req)) {
 322		err = PTR_ERR(req);
 323		goto out;
 324	}
 325
 326	req->r_inode = inode;
 327	ihold(inode);
 328	req->r_num_caps = 1;
 329	req->r_fmode = -1;
 330
 331	err = ceph_mdsc_do_request(mdsc, NULL, req);
 332	ceph_mdsc_put_request(req);
 333out:
 334	dout("renew caps %p open result=%d\n", inode, err);
 335	return err < 0 ? err : 0;
 336}
 337
 338/*
 339 * If we already have the requisite capabilities, we can satisfy
 340 * the open request locally (no need to request new caps from the
 341 * MDS).  We do, however, need to inform the MDS (asynchronously)
 342 * if our wanted caps set expands.
 343 */
 344int ceph_open(struct inode *inode, struct file *file)
 345{
 346	struct ceph_inode_info *ci = ceph_inode(inode);
 347	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 348	struct ceph_mds_client *mdsc = fsc->mdsc;
 349	struct ceph_mds_request *req;
 350	struct ceph_file_info *fi = file->private_data;
 
 351	int err;
 352	int flags, fmode, wanted;
 353
 354	if (fi) {
 355		dout("open file %p is already opened\n", file);
 356		return 0;
 357	}
 358
 359	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 360	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 361	if (S_ISDIR(inode->i_mode))
 362		flags = O_DIRECTORY;  /* mds likes to know */
 363
 364	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 365	     ceph_vinop(inode), file, flags, file->f_flags);
 366	fmode = ceph_flags_to_mode(flags);
 367	wanted = ceph_caps_for_mode(fmode);
 368
 369	/* snapped files are read-only */
 370	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 371		return -EROFS;
 372
 373	/* trivially open snapdir */
 374	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 375		spin_lock(&ci->i_ceph_lock);
 376		__ceph_get_fmode(ci, fmode);
 377		spin_unlock(&ci->i_ceph_lock);
 378		return ceph_init_file(inode, file, fmode);
 379	}
 380
 381	/*
 382	 * No need to block if we have caps on the auth MDS (for
 383	 * write) or any MDS (for read).  Update wanted set
 384	 * asynchronously.
 385	 */
 386	spin_lock(&ci->i_ceph_lock);
 387	if (__ceph_is_any_real_caps(ci) &&
 388	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 389		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
 390		int issued = __ceph_caps_issued(ci, NULL);
 391
 392		dout("open %p fmode %d want %s issued %s using existing\n",
 393		     inode, fmode, ceph_cap_string(wanted),
 394		     ceph_cap_string(issued));
 395		__ceph_get_fmode(ci, fmode);
 396		spin_unlock(&ci->i_ceph_lock);
 397
 398		/* adjust wanted? */
 399		if ((issued & wanted) != wanted &&
 400		    (mds_wanted & wanted) != wanted &&
 401		    ceph_snap(inode) != CEPH_SNAPDIR)
 402			ceph_check_caps(ci, 0, NULL);
 403
 404		return ceph_init_file(inode, file, fmode);
 405	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 406		   (ci->i_snap_caps & wanted) == wanted) {
 407		__ceph_get_fmode(ci, fmode);
 408		spin_unlock(&ci->i_ceph_lock);
 409		return ceph_init_file(inode, file, fmode);
 410	}
 411
 412	spin_unlock(&ci->i_ceph_lock);
 413
 414	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 415	req = prepare_open_request(inode->i_sb, flags, 0);
 416	if (IS_ERR(req)) {
 417		err = PTR_ERR(req);
 418		goto out;
 419	}
 420	req->r_inode = inode;
 421	ihold(inode);
 422
 423	req->r_num_caps = 1;
 424	err = ceph_mdsc_do_request(mdsc, NULL, req);
 
 
 
 425	if (!err)
 426		err = ceph_init_file(inode, file, req->r_fmode);
 427	ceph_mdsc_put_request(req);
 428	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 429out:
 430	return err;
 431}
 432
 433
 434/*
 435 * Do a lookup + open with a single request.  If we get a non-existent
 436 * file or symlink, return 1 so the VFS can retry.
 
 
 
 
 
 
 
 
 437 */
 438int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 439		     struct file *file, unsigned flags, umode_t mode)
 
 440{
 441	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 442	struct ceph_mds_client *mdsc = fsc->mdsc;
 
 443	struct ceph_mds_request *req;
 444	struct dentry *dn;
 445	struct ceph_acl_sec_ctx as_ctx = {};
 446	int mask;
 447	int err;
 
 448
 449	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 450	     dir, dentry, dentry,
 451	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 452
 453	if (dentry->d_name.len > NAME_MAX)
 454		return -ENAMETOOLONG;
 455
 456	if (flags & O_CREAT) {
 457		if (ceph_quota_is_max_files_exceeded(dir))
 458			return -EDQUOT;
 459		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
 460		if (err < 0)
 461			return err;
 462		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
 463		if (err < 0)
 464			goto out_ctx;
 465	} else if (!d_in_lookup(dentry)) {
 466		/* If it's not being looked up, it's negative */
 467		return -ENOENT;
 468	}
 469
 470	/* do the open */
 471	req = prepare_open_request(dir->i_sb, flags, mode);
 472	if (IS_ERR(req)) {
 473		err = PTR_ERR(req);
 474		goto out_ctx;
 475	}
 476	req->r_dentry = dget(dentry);
 477	req->r_num_caps = 2;
 478	if (flags & O_CREAT) {
 479		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
 480		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 481		if (as_ctx.pagelist) {
 482			req->r_pagelist = as_ctx.pagelist;
 483			as_ctx.pagelist = NULL;
 484		}
 485	}
 486
 487       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 488       if (ceph_security_xattr_wanted(dir))
 489               mask |= CEPH_CAP_XATTR_SHARED;
 490       req->r_args.open.mask = cpu_to_le32(mask);
 491
 492	req->r_parent = dir;
 493	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
 494	err = ceph_mdsc_do_request(mdsc,
 495				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 496				   req);
 497	err = ceph_handle_snapdir(req, dentry, err);
 498	if (err)
 499		goto out_req;
 500
 501	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 502		err = ceph_handle_notrace_create(dir, dentry);
 503
 504	if (d_in_lookup(dentry)) {
 505		dn = ceph_finish_lookup(req, dentry, err);
 506		if (IS_ERR(dn))
 507			err = PTR_ERR(dn);
 508	} else {
 509		/* we were given a hashed negative dentry */
 510		dn = NULL;
 511	}
 512	if (err)
 513		goto out_req;
 514	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 515		/* make vfs retry on splice, ENOENT, or symlink */
 516		dout("atomic_open finish_no_open on dn %p\n", dn);
 517		err = finish_no_open(file, dn);
 518	} else {
 519		dout("atomic_open finish_open on dn %p\n", dn);
 520		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 521			ceph_init_inode_acls(d_inode(dentry), &as_ctx);
 522			file->f_mode |= FMODE_CREATED;
 523		}
 524		err = finish_open(file, dentry, ceph_open);
 525	}
 526out_req:
 527	if (!req->r_err && req->r_target_inode)
 528		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 529	ceph_mdsc_put_request(req);
 530out_ctx:
 531	ceph_release_acl_sec_ctx(&as_ctx);
 532	dout("atomic_open result=%d\n", err);
 533	return err;
 534}
 535
 536int ceph_release(struct inode *inode, struct file *file)
 537{
 538	struct ceph_inode_info *ci = ceph_inode(inode);
 
 539
 540	if (S_ISDIR(inode->i_mode)) {
 541		struct ceph_dir_file_info *dfi = file->private_data;
 542		dout("release inode %p dir file %p\n", inode, file);
 543		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 544
 545		ceph_put_fmode(ci, dfi->file_info.fmode);
 546
 547		if (dfi->last_readdir)
 548			ceph_mdsc_put_request(dfi->last_readdir);
 549		kfree(dfi->last_name);
 550		kfree(dfi->dir_info);
 551		kmem_cache_free(ceph_dir_file_cachep, dfi);
 552	} else {
 553		struct ceph_file_info *fi = file->private_data;
 554		dout("release inode %p regular file %p\n", inode, file);
 555		WARN_ON(!list_empty(&fi->rw_contexts));
 556
 557		ceph_put_fmode(ci, fi->fmode);
 558		kmem_cache_free(ceph_file_cachep, fi);
 559	}
 560
 561	/* wake up anyone waiting for caps on this inode */
 562	wake_up_all(&ci->i_cap_wq);
 563	return 0;
 564}
 565
 566enum {
 567	HAVE_RETRIED = 1,
 568	CHECK_EOF =    2,
 569	READ_INLINE =  3,
 570};
 571
 572/*
 573 * Completely synchronous read and write methods.  Direct from __user
 574 * buffer to osd, or directly to user pages (if O_DIRECT).
 575 *
 576 * If the read spans object boundary, just do multiple reads.  (That's not
 577 * atomic, but good enough for now.)
 578 *
 579 * If we get a short result from the OSD, check against i_size; we need to
 580 * only return a short read to the caller if we hit EOF.
 581 */
 582static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 583			      int *retry_op)
 
 
 
 584{
 585	struct file *file = iocb->ki_filp;
 586	struct inode *inode = file_inode(file);
 587	struct ceph_inode_info *ci = ceph_inode(inode);
 588	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 589	struct ceph_osd_client *osdc = &fsc->client->osdc;
 590	ssize_t ret;
 591	u64 off = iocb->ki_pos;
 592	u64 len = iov_iter_count(to);
 
 
 593
 594	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
 595	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 596
 597	if (!len)
 598		return 0;
 599	/*
 600	 * flush any page cache pages in this range.  this
 601	 * will make concurrent normal and sync io slow,
 602	 * but it will at least behave sensibly when they are
 603	 * in sequence.
 604	 */
 605	ret = filemap_write_and_wait_range(inode->i_mapping,
 606					   off, off + len - 1);
 607	if (ret < 0)
 608		return ret;
 609
 610	ret = 0;
 611	while ((len = iov_iter_count(to)) > 0) {
 612		struct ceph_osd_request *req;
 613		struct page **pages;
 614		int num_pages;
 615		size_t page_off;
 616		u64 i_size;
 617		bool more;
 618
 619		req = ceph_osdc_new_request(osdc, &ci->i_layout,
 620					ci->i_vino, off, &len, 0, 1,
 621					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
 622					NULL, ci->i_truncate_seq,
 623					ci->i_truncate_size, false);
 624		if (IS_ERR(req)) {
 625			ret = PTR_ERR(req);
 626			break;
 627		}
 628
 629		more = len < iov_iter_count(to);
 630
 631		if (unlikely(iov_iter_is_pipe(to))) {
 632			ret = iov_iter_get_pages_alloc(to, &pages, len,
 633						       &page_off);
 634			if (ret <= 0) {
 635				ceph_osdc_put_request(req);
 636				ret = -ENOMEM;
 637				break;
 638			}
 639			num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 640			if (ret < len) {
 641				len = ret;
 642				osd_req_op_extent_update(req, 0, len);
 643				more = false;
 644			}
 645		} else {
 646			num_pages = calc_pages_for(off, len);
 647			page_off = off & ~PAGE_MASK;
 648			pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 649			if (IS_ERR(pages)) {
 650				ceph_osdc_put_request(req);
 651				ret = PTR_ERR(pages);
 652				break;
 653			}
 654		}
 655
 656		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
 657						 false, false);
 658		ret = ceph_osdc_start_request(osdc, req, false);
 659		if (!ret)
 660			ret = ceph_osdc_wait_request(osdc, req);
 661		ceph_osdc_put_request(req);
 662
 663		i_size = i_size_read(inode);
 664		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
 665		     off, len, ret, i_size, (more ? " MORE" : ""));
 666
 667		if (ret == -ENOENT)
 668			ret = 0;
 669		if (ret >= 0 && ret < len && (off + ret < i_size)) {
 670			int zlen = min(len - ret, i_size - off - ret);
 671			int zoff = page_off + ret;
 672			dout("sync_read zero gap %llu~%llu\n",
 673                             off + ret, off + ret + zlen);
 674			ceph_zero_page_vector_range(zoff, zlen, pages);
 675			ret += zlen;
 676		}
 677
 678		if (unlikely(iov_iter_is_pipe(to))) {
 679			if (ret > 0) {
 680				iov_iter_advance(to, ret);
 681				off += ret;
 682			} else {
 683				iov_iter_advance(to, 0);
 684			}
 685			ceph_put_page_vector(pages, num_pages, false);
 686		} else {
 687			int idx = 0;
 688			size_t left = ret > 0 ? ret : 0;
 689			while (left > 0) {
 690				size_t len, copied;
 691				page_off = off & ~PAGE_MASK;
 692				len = min_t(size_t, left, PAGE_SIZE - page_off);
 693				copied = copy_page_to_iter(pages[idx++],
 694							   page_off, len, to);
 695				off += copied;
 696				left -= copied;
 697				if (copied < len) {
 698					ret = -EFAULT;
 699					break;
 700				}
 701			}
 702			ceph_release_page_vector(pages, num_pages);
 703		}
 704
 705		if (ret < 0) {
 706			if (ret == -EBLACKLISTED)
 707				fsc->blacklisted = true;
 708			break;
 709		}
 710
 711		if (off >= i_size || !more)
 712			break;
 713	}
 714
 715	if (off > iocb->ki_pos) {
 716		if (ret >= 0 &&
 717		    iov_iter_count(to) > 0 && off >= i_size_read(inode))
 718			*retry_op = CHECK_EOF;
 719		ret = off - iocb->ki_pos;
 720		iocb->ki_pos = off;
 721	}
 722
 723	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
 724	return ret;
 725}
 726
 727struct ceph_aio_request {
 728	struct kiocb *iocb;
 729	size_t total_len;
 730	bool write;
 731	bool should_dirty;
 732	int error;
 733	struct list_head osd_reqs;
 734	unsigned num_reqs;
 735	atomic_t pending_reqs;
 736	struct timespec64 mtime;
 737	struct ceph_cap_flush *prealloc_cf;
 738};
 739
 740struct ceph_aio_work {
 741	struct work_struct work;
 742	struct ceph_osd_request *req;
 743};
 744
 745static void ceph_aio_retry_work(struct work_struct *work);
 746
 747static void ceph_aio_complete(struct inode *inode,
 748			      struct ceph_aio_request *aio_req)
 749{
 750	struct ceph_inode_info *ci = ceph_inode(inode);
 751	int ret;
 
 
 752
 753	if (!atomic_dec_and_test(&aio_req->pending_reqs))
 754		return;
 755
 756	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
 757		inode_dio_end(inode);
 758
 759	ret = aio_req->error;
 760	if (!ret)
 761		ret = aio_req->total_len;
 762
 763	dout("ceph_aio_complete %p rc %d\n", inode, ret);
 764
 765	if (ret >= 0 && aio_req->write) {
 766		int dirty;
 767
 768		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 769		if (endoff > i_size_read(inode)) {
 770			if (ceph_inode_set_size(inode, endoff))
 771				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 772		}
 773
 774		spin_lock(&ci->i_ceph_lock);
 775		ci->i_inline_version = CEPH_INLINE_NONE;
 776		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 777					       &aio_req->prealloc_cf);
 778		spin_unlock(&ci->i_ceph_lock);
 779		if (dirty)
 780			__mark_inode_dirty(inode, dirty);
 781
 782	}
 783
 784	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 785						CEPH_CAP_FILE_RD));
 786
 787	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 788
 789	ceph_free_cap_flush(aio_req->prealloc_cf);
 790	kfree(aio_req);
 791}
 792
 793static void ceph_aio_complete_req(struct ceph_osd_request *req)
 794{
 795	int rc = req->r_result;
 796	struct inode *inode = req->r_inode;
 797	struct ceph_aio_request *aio_req = req->r_priv;
 798	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 799
 800	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
 801	BUG_ON(!osd_data->num_bvecs);
 802
 803	dout("ceph_aio_complete_req %p rc %d bytes %u\n",
 804	     inode, rc, osd_data->bvec_pos.iter.bi_size);
 805
 806	if (rc == -EOLDSNAPC) {
 807		struct ceph_aio_work *aio_work;
 808		BUG_ON(!aio_req->write);
 809
 810		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 811		if (aio_work) {
 812			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 813			aio_work->req = req;
 814			queue_work(ceph_inode_to_client(inode)->inode_wq,
 815				   &aio_work->work);
 816			return;
 817		}
 818		rc = -ENOMEM;
 819	} else if (!aio_req->write) {
 820		if (rc == -ENOENT)
 821			rc = 0;
 822		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
 823			struct iov_iter i;
 824			int zlen = osd_data->bvec_pos.iter.bi_size - rc;
 825
 826			/*
 827			 * If read is satisfied by single OSD request,
 828			 * it can pass EOF. Otherwise read is within
 829			 * i_size.
 830			 */
 831			if (aio_req->num_reqs == 1) {
 832				loff_t i_size = i_size_read(inode);
 833				loff_t endoff = aio_req->iocb->ki_pos + rc;
 834				if (endoff < i_size)
 835					zlen = min_t(size_t, zlen,
 836						     i_size - endoff);
 837				aio_req->total_len = rc + zlen;
 838			}
 839
 840			iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
 841				      osd_data->num_bvecs,
 842				      osd_data->bvec_pos.iter.bi_size);
 843			iov_iter_advance(&i, rc);
 844			iov_iter_zero(zlen, &i);
 845		}
 846	}
 847
 848	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
 849		  aio_req->should_dirty);
 850	ceph_osdc_put_request(req);
 851
 852	if (rc < 0)
 853		cmpxchg(&aio_req->error, 0, rc);
 854
 855	ceph_aio_complete(inode, aio_req);
 856	return;
 857}
 858
 859static void ceph_aio_retry_work(struct work_struct *work)
 860{
 861	struct ceph_aio_work *aio_work =
 862		container_of(work, struct ceph_aio_work, work);
 863	struct ceph_osd_request *orig_req = aio_work->req;
 864	struct ceph_aio_request *aio_req = orig_req->r_priv;
 865	struct inode *inode = orig_req->r_inode;
 866	struct ceph_inode_info *ci = ceph_inode(inode);
 867	struct ceph_snap_context *snapc;
 868	struct ceph_osd_request *req;
 869	int ret;
 870
 871	spin_lock(&ci->i_ceph_lock);
 872	if (__ceph_have_pending_cap_snap(ci)) {
 873		struct ceph_cap_snap *capsnap =
 874			list_last_entry(&ci->i_cap_snaps,
 875					struct ceph_cap_snap,
 876					ci_item);
 877		snapc = ceph_get_snap_context(capsnap->context);
 878	} else {
 879		BUG_ON(!ci->i_head_snapc);
 880		snapc = ceph_get_snap_context(ci->i_head_snapc);
 881	}
 882	spin_unlock(&ci->i_ceph_lock);
 
 883
 884	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
 885			false, GFP_NOFS);
 886	if (!req) {
 887		ret = -ENOMEM;
 888		req = orig_req;
 889		goto out;
 890	}
 
 
 891
 892	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 893	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
 894	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 895
 896	req->r_ops[0] = orig_req->r_ops[0];
 897
 898	req->r_mtime = aio_req->mtime;
 899	req->r_data_offset = req->r_ops[0].extent.offset;
 900
 901	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
 902	if (ret) {
 903		ceph_osdc_put_request(req);
 904		req = orig_req;
 905		goto out;
 906	}
 907
 908	ceph_osdc_put_request(orig_req);
 
 
 
 909
 910	req->r_callback = ceph_aio_complete_req;
 911	req->r_inode = inode;
 912	req->r_priv = aio_req;
 913
 914	ret = ceph_osdc_start_request(req->r_osdc, req, false);
 915out:
 916	if (ret < 0) {
 917		req->r_result = ret;
 918		ceph_aio_complete_req(req);
 919	}
 920
 921	ceph_put_snap_context(snapc);
 922	kfree(aio_work);
 923}
 924
 925static ssize_t
 926ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 927		       struct ceph_snap_context *snapc,
 928		       struct ceph_cap_flush **pcf)
 
 
 929{
 930	struct file *file = iocb->ki_filp;
 931	struct inode *inode = file_inode(file);
 932	struct ceph_inode_info *ci = ceph_inode(inode);
 933	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 934	struct ceph_vino vino;
 935	struct ceph_osd_request *req;
 936	struct bio_vec *bvecs;
 937	struct ceph_aio_request *aio_req = NULL;
 938	int num_pages = 0;
 939	int flags;
 940	int ret = 0;
 941	struct timespec64 mtime = current_time(inode);
 942	size_t count = iov_iter_count(iter);
 943	loff_t pos = iocb->ki_pos;
 944	bool write = iov_iter_rw(iter) == WRITE;
 945	bool should_dirty = !write && iter_is_iovec(iter);
 946
 947	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 948		return -EROFS;
 949
 950	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
 951	     (write ? "write" : "read"), file, pos, (unsigned)count,
 952	     snapc, snapc ? snapc->seq : 0);
 953
 954	if (write) {
 955		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 956					pos >> PAGE_SHIFT,
 957					(pos + count - 1) >> PAGE_SHIFT);
 958		if (ret2 < 0)
 959			dout("invalidate_inode_pages2_range returned %d\n", ret2);
 960
 961		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 962	} else {
 963		flags = CEPH_OSD_FLAG_READ;
 964	}
 965
 966	while (iov_iter_count(iter) > 0) {
 967		u64 size = iov_iter_count(iter);
 968		ssize_t len;
 969
 970		if (write)
 971			size = min_t(u64, size, fsc->mount_options->wsize);
 972		else
 973			size = min_t(u64, size, fsc->mount_options->rsize);
 974
 975		vino = ceph_vino(inode);
 976		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 977					    vino, pos, &size, 0,
 978					    1,
 979					    write ? CEPH_OSD_OP_WRITE :
 980						    CEPH_OSD_OP_READ,
 981					    flags, snapc,
 982					    ci->i_truncate_seq,
 983					    ci->i_truncate_size,
 984					    false);
 985		if (IS_ERR(req)) {
 986			ret = PTR_ERR(req);
 987			break;
 988		}
 989
 990		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
 991		if (len < 0) {
 992			ceph_osdc_put_request(req);
 993			ret = len;
 994			break;
 995		}
 996		if (len != size)
 997			osd_req_op_extent_update(req, 0, len);
 998
 999		/*
1000		 * To simplify error handling, allow AIO when IO within i_size
1001		 * or IO can be satisfied by single OSD request.
1002		 */
1003		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1004		    (len == count || pos + count <= i_size_read(inode))) {
1005			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1006			if (aio_req) {
1007				aio_req->iocb = iocb;
1008				aio_req->write = write;
1009				aio_req->should_dirty = should_dirty;
1010				INIT_LIST_HEAD(&aio_req->osd_reqs);
1011				if (write) {
1012					aio_req->mtime = mtime;
1013					swap(aio_req->prealloc_cf, *pcf);
1014				}
1015			}
1016			/* ignore error */
1017		}
1018
1019		if (write) {
1020			/*
1021			 * throw out any page cache pages in this range. this
1022			 * may block.
1023			 */
1024			truncate_inode_pages_range(inode->i_mapping, pos,
1025						   PAGE_ALIGN(pos + len) - 1);
1026
1027			req->r_mtime = mtime;
1028		}
1029
1030		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1031
1032		if (aio_req) {
1033			aio_req->total_len += len;
1034			aio_req->num_reqs++;
1035			atomic_inc(&aio_req->pending_reqs);
1036
1037			req->r_callback = ceph_aio_complete_req;
1038			req->r_inode = inode;
1039			req->r_priv = aio_req;
1040			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1041
1042			pos += len;
1043			continue;
1044		}
1045
1046		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1047		if (!ret)
1048			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1049
1050		size = i_size_read(inode);
1051		if (!write) {
1052			if (ret == -ENOENT)
1053				ret = 0;
1054			if (ret >= 0 && ret < len && pos + ret < size) {
1055				struct iov_iter i;
1056				int zlen = min_t(size_t, len - ret,
1057						 size - pos - ret);
1058
1059				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1060				iov_iter_advance(&i, ret);
1061				iov_iter_zero(zlen, &i);
1062				ret += zlen;
1063			}
1064			if (ret >= 0)
1065				len = ret;
1066		}
1067
1068		put_bvecs(bvecs, num_pages, should_dirty);
1069		ceph_osdc_put_request(req);
1070		if (ret < 0)
1071			break;
1072
1073		pos += len;
1074		if (!write && pos >= size)
1075			break;
1076
1077		if (write && pos > size) {
1078			if (ceph_inode_set_size(inode, pos))
1079				ceph_check_caps(ceph_inode(inode),
1080						CHECK_CAPS_AUTHONLY,
1081						NULL);
1082		}
1083	}
1084
1085	if (aio_req) {
1086		LIST_HEAD(osd_reqs);
1087
1088		if (aio_req->num_reqs == 0) {
1089			kfree(aio_req);
1090			return ret;
1091		}
1092
1093		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1094					      CEPH_CAP_FILE_RD);
1095
1096		list_splice(&aio_req->osd_reqs, &osd_reqs);
1097		inode_dio_begin(inode);
1098		while (!list_empty(&osd_reqs)) {
1099			req = list_first_entry(&osd_reqs,
1100					       struct ceph_osd_request,
1101					       r_private_item);
1102			list_del_init(&req->r_private_item);
1103			if (ret >= 0)
1104				ret = ceph_osdc_start_request(req->r_osdc,
1105							      req, false);
1106			if (ret < 0) {
1107				req->r_result = ret;
1108				ceph_aio_complete_req(req);
1109			}
1110		}
1111		return -EIOCBQUEUED;
1112	}
1113
1114	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1115		ret = pos - iocb->ki_pos;
1116		iocb->ki_pos = pos;
1117	}
1118	return ret;
1119}
1120
1121/*
1122 * Synchronous write, straight from __user pointer or user pages.
 
1123 *
1124 * If write spans object boundary, just do multiple writes.  (For a
1125 * correct atomic write, we should e.g. take write locks on all
1126 * objects, rollback on failure, etc.)
1127 */
1128static ssize_t
1129ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1130		struct ceph_snap_context *snapc)
1131{
1132	struct file *file = iocb->ki_filp;
1133	struct inode *inode = file_inode(file);
1134	struct ceph_inode_info *ci = ceph_inode(inode);
1135	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1136	struct ceph_vino vino;
1137	struct ceph_osd_request *req;
1138	struct page **pages;
 
 
1139	u64 len;
1140	int num_pages;
1141	int written = 0;
1142	int flags;
 
 
 
 
1143	int ret;
1144	bool check_caps = false;
1145	struct timespec64 mtime = current_time(inode);
1146	size_t count = iov_iter_count(from);
1147
1148	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1149		return -EROFS;
1150
1151	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1152	     file, pos, (unsigned)count, snapc, snapc->seq);
 
 
 
 
 
1153
1154	ret = filemap_write_and_wait_range(inode->i_mapping,
1155					   pos, pos + count - 1);
1156	if (ret < 0)
1157		return ret;
1158
1159	ret = invalidate_inode_pages2_range(inode->i_mapping,
1160					    pos >> PAGE_SHIFT,
1161					    (pos + count - 1) >> PAGE_SHIFT);
1162	if (ret < 0)
1163		dout("invalidate_inode_pages2_range returned %d\n", ret);
1164
1165	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
 
 
 
 
1166
1167	while ((len = iov_iter_count(from)) > 0) {
1168		size_t left;
1169		int n;
1170
1171		vino = ceph_vino(inode);
1172		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1173					    vino, pos, &len, 0, 1,
1174					    CEPH_OSD_OP_WRITE, flags, snapc,
1175					    ci->i_truncate_seq,
1176					    ci->i_truncate_size,
1177					    false);
1178		if (IS_ERR(req)) {
1179			ret = PTR_ERR(req);
1180			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1181		}
1182
1183		/*
1184		 * write from beginning of first page,
1185		 * regardless of io alignment
1186		 */
1187		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1188
1189		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 
1190		if (IS_ERR(pages)) {
1191			ret = PTR_ERR(pages);
1192			goto out;
1193		}
1194
1195		left = len;
1196		for (n = 0; n < num_pages; n++) {
1197			size_t plen = min_t(size_t, left, PAGE_SIZE);
1198			ret = copy_page_from_iter(pages[n], 0, plen, from);
1199			if (ret != plen) {
1200				ret = -EFAULT;
1201				break;
1202			}
1203			left -= ret;
1204		}
1205
1206		if (ret < 0) {
1207			ceph_release_page_vector(pages, num_pages);
1208			goto out;
1209		}
1210
1211		req->r_inode = inode;
 
 
 
 
 
 
 
 
1212
1213		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1214						false, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215
1216		req->r_mtime = mtime;
1217		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1218		if (!ret)
1219			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1220
1221out:
1222		ceph_osdc_put_request(req);
1223		if (ret != 0) {
1224			ceph_set_error_write(ci);
1225			break;
1226		}
1227
1228		ceph_clear_error_write(ci);
1229		pos += len;
1230		written += len;
1231		if (pos > i_size_read(inode)) {
1232			check_caps = ceph_inode_set_size(inode, pos);
1233			if (check_caps)
1234				ceph_check_caps(ceph_inode(inode),
1235						CHECK_CAPS_AUTHONLY,
1236						NULL);
1237		}
1238
1239	}
1240
1241	if (ret != -EOLDSNAPC && written > 0) {
1242		ret = written;
1243		iocb->ki_pos = pos;
 
 
 
 
 
1244	}
1245	return ret;
1246}
1247
1248/*
1249 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1250 * Atomically grab references, so that those bits are not released
1251 * back to the MDS mid-read.
1252 *
1253 * Hmm, the sync read case isn't actually async... should it be?
1254 */
1255static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
1256{
1257	struct file *filp = iocb->ki_filp;
1258	struct ceph_file_info *fi = filp->private_data;
1259	size_t len = iov_iter_count(to);
1260	struct inode *inode = file_inode(filp);
 
1261	struct ceph_inode_info *ci = ceph_inode(inode);
1262	struct page *pinned_page = NULL;
1263	ssize_t ret;
1264	int want, got = 0;
1265	int retry_op = 0, read = 0;
1266
 
 
1267again:
1268	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1269	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1270
1271	if (iocb->ki_flags & IOCB_DIRECT)
1272		ceph_start_io_direct(inode);
1273	else
1274		ceph_start_io_read(inode);
1275
1276	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1277		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1278	else
1279		want = CEPH_CAP_FILE_CACHE;
1280	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
1281			    &got, &pinned_page);
1282	if (ret < 0) {
1283		if (iocb->ki_flags & IOCB_DIRECT)
1284			ceph_end_io_direct(inode);
1285		else
1286			ceph_end_io_read(inode);
1287		return ret;
1288	}
1289
1290	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1291	    (iocb->ki_flags & IOCB_DIRECT) ||
1292	    (fi->flags & CEPH_F_SYNC)) {
1293
1294		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1295		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1296		     ceph_cap_string(got));
1297
1298		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1299			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1300				ret = ceph_direct_read_write(iocb, to,
1301							     NULL, NULL);
1302				if (ret >= 0 && ret < len)
1303					retry_op = CHECK_EOF;
1304			} else {
1305				ret = ceph_sync_read(iocb, to, &retry_op);
1306			}
1307		} else {
1308			retry_op = READ_INLINE;
1309		}
1310	} else {
1311		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1312		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1313		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1314		     ceph_cap_string(got));
1315		ceph_add_rw_context(fi, &rw_ctx);
1316		ret = generic_file_read_iter(iocb, to);
1317		ceph_del_rw_context(fi, &rw_ctx);
1318	}
1319
 
1320	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1321	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1322	if (pinned_page) {
1323		put_page(pinned_page);
1324		pinned_page = NULL;
1325	}
1326	ceph_put_cap_refs(ci, got);
1327
1328	if (iocb->ki_flags & IOCB_DIRECT)
1329		ceph_end_io_direct(inode);
1330	else
1331		ceph_end_io_read(inode);
1332
1333	if (retry_op > HAVE_RETRIED && ret >= 0) {
1334		int statret;
1335		struct page *page = NULL;
1336		loff_t i_size;
1337		if (retry_op == READ_INLINE) {
1338			page = __page_cache_alloc(GFP_KERNEL);
1339			if (!page)
1340				return -ENOMEM;
1341		}
1342
1343		statret = __ceph_do_getattr(inode, page,
1344					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1345		if (statret < 0) {
1346			if (page)
1347				__free_page(page);
1348			if (statret == -ENODATA) {
1349				BUG_ON(retry_op != READ_INLINE);
1350				goto again;
1351			}
1352			return statret;
1353		}
1354
1355		i_size = i_size_read(inode);
1356		if (retry_op == READ_INLINE) {
1357			BUG_ON(ret > 0 || read > 0);
1358			if (iocb->ki_pos < i_size &&
1359			    iocb->ki_pos < PAGE_SIZE) {
1360				loff_t end = min_t(loff_t, i_size,
1361						   iocb->ki_pos + len);
1362				end = min_t(loff_t, end, PAGE_SIZE);
1363				if (statret < end)
1364					zero_user_segment(page, statret, end);
1365				ret = copy_page_to_iter(page,
1366						iocb->ki_pos & ~PAGE_MASK,
1367						end - iocb->ki_pos, to);
1368				iocb->ki_pos += ret;
1369				read += ret;
1370			}
1371			if (iocb->ki_pos < i_size && read < len) {
1372				size_t zlen = min_t(size_t, len - read,
1373						    i_size - iocb->ki_pos);
1374				ret = iov_iter_zero(zlen, to);
1375				iocb->ki_pos += ret;
1376				read += ret;
1377			}
1378			__free_pages(page, 0);
1379			return read;
1380		}
1381
1382		/* hit EOF or hole? */
1383		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1384		    ret < len) {
1385			dout("sync_read hit hole, ppos %lld < size %lld"
1386			     ", reading more\n", iocb->ki_pos, i_size);
1387
1388			read += ret;
 
1389			len -= ret;
1390			retry_op = HAVE_RETRIED;
1391			goto again;
1392		}
1393	}
1394
1395	if (ret >= 0)
1396		ret += read;
1397
1398	return ret;
1399}
1400
1401/*
1402 * Take cap references to avoid releasing caps to MDS mid-write.
1403 *
1404 * If we are synchronous, and write with an old snap context, the OSD
1405 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1406 * dropping our cap refs and allowing the pending snap to logically
1407 * complete _before_ this write occurs.
1408 *
1409 * If we are near ENOSPC, write synchronously.
1410 */
1411static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
1412{
1413	struct file *file = iocb->ki_filp;
1414	struct ceph_file_info *fi = file->private_data;
1415	struct inode *inode = file_inode(file);
1416	struct ceph_inode_info *ci = ceph_inode(inode);
1417	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1418	struct ceph_cap_flush *prealloc_cf;
1419	ssize_t count, written = 0;
1420	int err, want, got;
1421	loff_t pos;
1422	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1423
1424	if (ceph_snap(inode) != CEPH_NOSNAP)
1425		return -EROFS;
1426
1427	prealloc_cf = ceph_alloc_cap_flush();
1428	if (!prealloc_cf)
1429		return -ENOMEM;
1430
1431retry_snap:
1432	if (iocb->ki_flags & IOCB_DIRECT)
1433		ceph_start_io_direct(inode);
1434	else
1435		ceph_start_io_write(inode);
1436
1437	/* We can write back this queue in page reclaim */
1438	current->backing_dev_info = inode_to_bdi(inode);
1439
1440	if (iocb->ki_flags & IOCB_APPEND) {
1441		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1442		if (err < 0)
1443			goto out;
1444	}
1445
1446	err = generic_write_checks(iocb, from);
1447	if (err <= 0)
1448		goto out;
1449
1450	pos = iocb->ki_pos;
1451	if (unlikely(pos >= limit)) {
1452		err = -EFBIG;
1453		goto out;
1454	} else {
1455		iov_iter_truncate(from, limit - pos);
1456	}
1457
1458	count = iov_iter_count(from);
1459	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1460		err = -EDQUOT;
1461		goto out;
1462	}
1463
1464	err = file_remove_privs(file);
1465	if (err)
1466		goto out;
1467
1468	err = file_update_time(file);
1469	if (err)
1470		goto out;
1471
1472	inode_inc_iversion_raw(inode);
1473
1474	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1475		err = ceph_uninline_data(file, NULL);
1476		if (err < 0)
1477			goto out;
1478	}
1479
1480	/* FIXME: not complete since it doesn't account for being at quota */
1481	if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1482		err = -ENOSPC;
1483		goto out;
1484	}
1485
1486	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1487	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1488	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1489		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1490	else
1491		want = CEPH_CAP_FILE_BUFFER;
1492	got = 0;
1493	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
1494			    &got, NULL);
1495	if (err < 0)
1496		goto out;
1497
1498	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1499	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
 
1500
1501	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1502	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1503	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1504		struct ceph_snap_context *snapc;
1505		struct iov_iter data;
1506
1507		spin_lock(&ci->i_ceph_lock);
1508		if (__ceph_have_pending_cap_snap(ci)) {
1509			struct ceph_cap_snap *capsnap =
1510					list_last_entry(&ci->i_cap_snaps,
1511							struct ceph_cap_snap,
1512							ci_item);
1513			snapc = ceph_get_snap_context(capsnap->context);
1514		} else {
1515			BUG_ON(!ci->i_head_snapc);
1516			snapc = ceph_get_snap_context(ci->i_head_snapc);
1517		}
1518		spin_unlock(&ci->i_ceph_lock);
1519
1520		/* we might need to revert back to that point */
1521		data = *from;
1522		if (iocb->ki_flags & IOCB_DIRECT) {
1523			written = ceph_direct_read_write(iocb, &data, snapc,
1524							 &prealloc_cf);
1525			ceph_end_io_direct(inode);
1526		} else {
1527			written = ceph_sync_write(iocb, &data, pos, snapc);
1528			ceph_end_io_write(inode);
1529		}
1530		if (written > 0)
1531			iov_iter_advance(from, written);
1532		ceph_put_snap_context(snapc);
1533	} else {
1534		/*
1535		 * No need to acquire the i_truncate_mutex. Because
1536		 * the MDS revokes Fwb caps before sending truncate
1537		 * message to us. We can't get Fwb cap while there
1538		 * are pending vmtruncate. So write and vmtruncate
1539		 * can not run at the same time
1540		 */
1541		written = generic_perform_write(file, from, pos);
1542		if (likely(written >= 0))
1543			iocb->ki_pos = pos + written;
1544		ceph_end_io_write(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545	}
1546
1547	if (written >= 0) {
1548		int dirty;
1549
1550		spin_lock(&ci->i_ceph_lock);
1551		ci->i_inline_version = CEPH_INLINE_NONE;
1552		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1553					       &prealloc_cf);
1554		spin_unlock(&ci->i_ceph_lock);
1555		if (dirty)
1556			__mark_inode_dirty(inode, dirty);
1557		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1558			ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1559	}
1560
 
1561	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1562	     inode, ceph_vinop(inode), pos, (unsigned)count,
1563	     ceph_cap_string(got));
1564	ceph_put_cap_refs(ci, got);
1565
1566	if (written == -EOLDSNAPC) {
1567		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1568		     inode, ceph_vinop(inode), pos, (unsigned)count);
 
1569		goto retry_snap;
1570	}
1571
1572	if (written >= 0) {
1573		if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1574			iocb->ki_flags |= IOCB_DSYNC;
1575		written = generic_write_sync(iocb, written);
1576	}
1577
1578	goto out_unlocked;
1579out:
1580	if (iocb->ki_flags & IOCB_DIRECT)
1581		ceph_end_io_direct(inode);
1582	else
1583		ceph_end_io_write(inode);
1584out_unlocked:
1585	ceph_free_cap_flush(prealloc_cf);
1586	current->backing_dev_info = NULL;
1587	return written ? written : err;
1588}
1589
1590/*
1591 * llseek.  be sure to verify file size on SEEK_END.
1592 */
1593static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1594{
1595	struct inode *inode = file->f_mapping->host;
1596	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1597	loff_t i_size;
1598	loff_t ret;
1599
1600	inode_lock(inode);
1601
1602	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1603		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1604		if (ret < 0)
 
1605			goto out;
 
1606	}
1607
1608	i_size = i_size_read(inode);
1609	switch (whence) {
1610	case SEEK_END:
1611		offset += i_size;
1612		break;
1613	case SEEK_CUR:
1614		/*
1615		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1616		 * position-querying operation.  Avoid rewriting the "same"
1617		 * f_pos value back to the file because a concurrent read(),
1618		 * write() or lseek() might have altered it
1619		 */
1620		if (offset == 0) {
1621			ret = file->f_pos;
1622			goto out;
1623		}
1624		offset += file->f_pos;
1625		break;
1626	case SEEK_DATA:
1627		if (offset < 0 || offset >= i_size) {
1628			ret = -ENXIO;
1629			goto out;
1630		}
1631		break;
1632	case SEEK_HOLE:
1633		if (offset < 0 || offset >= i_size) {
1634			ret = -ENXIO;
1635			goto out;
1636		}
1637		offset = i_size;
1638		break;
1639	}
1640
1641	ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1642
1643out:
1644	inode_unlock(inode);
1645	return ret;
1646}
1647
1648static inline void ceph_zero_partial_page(
1649	struct inode *inode, loff_t offset, unsigned size)
1650{
1651	struct page *page;
1652	pgoff_t index = offset >> PAGE_SHIFT;
1653
1654	page = find_lock_page(inode->i_mapping, index);
1655	if (page) {
1656		wait_on_page_writeback(page);
1657		zero_user(page, offset & (PAGE_SIZE - 1), size);
1658		unlock_page(page);
1659		put_page(page);
1660	}
1661}
1662
1663static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1664				      loff_t length)
1665{
1666	loff_t nearly = round_up(offset, PAGE_SIZE);
1667	if (offset < nearly) {
1668		loff_t size = nearly - offset;
1669		if (length < size)
1670			size = length;
1671		ceph_zero_partial_page(inode, offset, size);
1672		offset += size;
1673		length -= size;
1674	}
1675	if (length >= PAGE_SIZE) {
1676		loff_t size = round_down(length, PAGE_SIZE);
1677		truncate_pagecache_range(inode, offset, offset + size - 1);
1678		offset += size;
1679		length -= size;
1680	}
1681	if (length)
1682		ceph_zero_partial_page(inode, offset, length);
1683}
1684
1685static int ceph_zero_partial_object(struct inode *inode,
1686				    loff_t offset, loff_t *length)
1687{
1688	struct ceph_inode_info *ci = ceph_inode(inode);
1689	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1690	struct ceph_osd_request *req;
1691	int ret = 0;
1692	loff_t zero = 0;
1693	int op;
1694
1695	if (!length) {
1696		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1697		length = &zero;
1698	} else {
1699		op = CEPH_OSD_OP_ZERO;
1700	}
1701
1702	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1703					ceph_vino(inode),
1704					offset, length,
1705					0, 1, op,
1706					CEPH_OSD_FLAG_WRITE,
1707					NULL, 0, 0, false);
1708	if (IS_ERR(req)) {
1709		ret = PTR_ERR(req);
1710		goto out;
1711	}
1712
1713	req->r_mtime = inode->i_mtime;
1714	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1715	if (!ret) {
1716		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1717		if (ret == -ENOENT)
1718			ret = 0;
1719	}
1720	ceph_osdc_put_request(req);
1721
1722out:
1723	return ret;
1724}
1725
1726static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1727{
1728	int ret = 0;
1729	struct ceph_inode_info *ci = ceph_inode(inode);
1730	s32 stripe_unit = ci->i_layout.stripe_unit;
1731	s32 stripe_count = ci->i_layout.stripe_count;
1732	s32 object_size = ci->i_layout.object_size;
1733	u64 object_set_size = object_size * stripe_count;
1734	u64 nearly, t;
1735
1736	/* round offset up to next period boundary */
1737	nearly = offset + object_set_size - 1;
1738	t = nearly;
1739	nearly -= do_div(t, object_set_size);
1740
1741	while (length && offset < nearly) {
1742		loff_t size = length;
1743		ret = ceph_zero_partial_object(inode, offset, &size);
1744		if (ret < 0)
1745			return ret;
1746		offset += size;
1747		length -= size;
1748	}
1749	while (length >= object_set_size) {
1750		int i;
1751		loff_t pos = offset;
1752		for (i = 0; i < stripe_count; ++i) {
1753			ret = ceph_zero_partial_object(inode, pos, NULL);
1754			if (ret < 0)
1755				return ret;
1756			pos += stripe_unit;
1757		}
1758		offset += object_set_size;
1759		length -= object_set_size;
1760	}
1761	while (length) {
1762		loff_t size = length;
1763		ret = ceph_zero_partial_object(inode, offset, &size);
1764		if (ret < 0)
1765			return ret;
1766		offset += size;
1767		length -= size;
1768	}
1769	return ret;
1770}
1771
1772static long ceph_fallocate(struct file *file, int mode,
1773				loff_t offset, loff_t length)
1774{
1775	struct ceph_file_info *fi = file->private_data;
1776	struct inode *inode = file_inode(file);
1777	struct ceph_inode_info *ci = ceph_inode(inode);
1778	struct ceph_cap_flush *prealloc_cf;
1779	int want, got = 0;
1780	int dirty;
1781	int ret = 0;
1782	loff_t endoff = 0;
1783	loff_t size;
1784
1785	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1786		return -EOPNOTSUPP;
1787
1788	if (!S_ISREG(inode->i_mode))
1789		return -EOPNOTSUPP;
1790
1791	prealloc_cf = ceph_alloc_cap_flush();
1792	if (!prealloc_cf)
1793		return -ENOMEM;
1794
1795	inode_lock(inode);
1796
1797	if (ceph_snap(inode) != CEPH_NOSNAP) {
1798		ret = -EROFS;
1799		goto unlock;
1800	}
1801
1802	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1803		ret = ceph_uninline_data(file, NULL);
1804		if (ret < 0)
1805			goto unlock;
1806	}
1807
1808	size = i_size_read(inode);
1809
1810	/* Are we punching a hole beyond EOF? */
1811	if (offset >= size)
1812		goto unlock;
1813	if ((offset + length) > size)
1814		length = size - offset;
1815
1816	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1817		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1818	else
1819		want = CEPH_CAP_FILE_BUFFER;
1820
1821	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1822	if (ret < 0)
1823		goto unlock;
1824
1825	ceph_zero_pagecache_range(inode, offset, length);
1826	ret = ceph_zero_objects(inode, offset, length);
1827
1828	if (!ret) {
1829		spin_lock(&ci->i_ceph_lock);
1830		ci->i_inline_version = CEPH_INLINE_NONE;
1831		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1832					       &prealloc_cf);
1833		spin_unlock(&ci->i_ceph_lock);
1834		if (dirty)
1835			__mark_inode_dirty(inode, dirty);
1836	}
1837
1838	ceph_put_cap_refs(ci, got);
1839unlock:
1840	inode_unlock(inode);
1841	ceph_free_cap_flush(prealloc_cf);
1842	return ret;
1843}
1844
1845/*
1846 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1847 * src_ci.  Two attempts are made to obtain both caps, and an error is return if
1848 * this fails; zero is returned on success.
1849 */
1850static int get_rd_wr_caps(struct file *src_filp, int *src_got,
1851			  struct file *dst_filp,
1852			  loff_t dst_endoff, int *dst_got)
1853{
1854	int ret = 0;
1855	bool retrying = false;
1856
1857retry_caps:
1858	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1859			    dst_endoff, dst_got, NULL);
1860	if (ret < 0)
1861		return ret;
1862
1863	/*
1864	 * Since we're already holding the FILE_WR capability for the dst file,
1865	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
1866	 * retry dance instead to try to get both capabilities.
1867	 */
1868	ret = ceph_try_get_caps(file_inode(src_filp),
1869				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1870				false, src_got);
1871	if (ret <= 0) {
1872		/* Start by dropping dst_ci caps and getting src_ci caps */
1873		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
1874		if (retrying) {
1875			if (!ret)
1876				/* ceph_try_get_caps masks EAGAIN */
1877				ret = -EAGAIN;
1878			return ret;
1879		}
1880		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
1881				    CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
1882		if (ret < 0)
1883			return ret;
1884		/*... drop src_ci caps too, and retry */
1885		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
1886		retrying = true;
1887		goto retry_caps;
1888	}
1889	return ret;
1890}
1891
1892static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1893			   struct ceph_inode_info *dst_ci, int dst_got)
1894{
1895	ceph_put_cap_refs(src_ci, src_got);
1896	ceph_put_cap_refs(dst_ci, dst_got);
1897}
1898
1899/*
1900 * This function does several size-related checks, returning an error if:
1901 *  - source file is smaller than off+len
1902 *  - destination file size is not OK (inode_newsize_ok())
1903 *  - max bytes quotas is exceeded
1904 */
1905static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1906			   loff_t src_off, loff_t dst_off, size_t len)
1907{
1908	loff_t size, endoff;
1909
1910	size = i_size_read(src_inode);
1911	/*
1912	 * Don't copy beyond source file EOF.  Instead of simply setting length
1913	 * to (size - src_off), just drop to VFS default implementation, as the
1914	 * local i_size may be stale due to other clients writing to the source
1915	 * inode.
1916	 */
1917	if (src_off + len > size) {
1918		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1919		     src_off, len, size);
1920		return -EOPNOTSUPP;
1921	}
1922	size = i_size_read(dst_inode);
1923
1924	endoff = dst_off + len;
1925	if (inode_newsize_ok(dst_inode, endoff))
1926		return -EOPNOTSUPP;
1927
1928	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1929		return -EDQUOT;
1930
1931	return 0;
1932}
1933
1934static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
1935				      struct file *dst_file, loff_t dst_off,
1936				      size_t len, unsigned int flags)
1937{
1938	struct inode *src_inode = file_inode(src_file);
1939	struct inode *dst_inode = file_inode(dst_file);
1940	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1941	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1942	struct ceph_cap_flush *prealloc_cf;
1943	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
1944	struct ceph_object_locator src_oloc, dst_oloc;
1945	struct ceph_object_id src_oid, dst_oid;
1946	loff_t endoff = 0, size;
1947	ssize_t ret = -EIO;
1948	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1949	u32 src_objlen, dst_objlen, object_size;
1950	int src_got = 0, dst_got = 0, err, dirty;
1951	bool do_final_copy = false;
1952
1953	if (src_inode->i_sb != dst_inode->i_sb) {
1954		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
1955
1956		if (ceph_fsid_compare(&src_fsc->client->fsid,
1957				      &dst_fsc->client->fsid)) {
1958			dout("Copying files across clusters: src: %pU dst: %pU\n",
1959			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
1960			return -EXDEV;
1961		}
1962	}
1963	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1964		return -EROFS;
1965
1966	/*
1967	 * Some of the checks below will return -EOPNOTSUPP, which will force a
1968	 * fallback to the default VFS copy_file_range implementation.  This is
1969	 * desirable in several cases (for ex, the 'len' is smaller than the
1970	 * size of the objects, or in cases where that would be more
1971	 * efficient).
1972	 */
1973
1974	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
1975		return -EOPNOTSUPP;
1976
1977	/*
1978	 * Striped file layouts require that we copy partial objects, but the
1979	 * OSD copy-from operation only supports full-object copies.  Limit
1980	 * this to non-striped file layouts for now.
1981	 */
1982	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1983	    (src_ci->i_layout.stripe_count != 1) ||
1984	    (dst_ci->i_layout.stripe_count != 1) ||
1985	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
1986		dout("Invalid src/dst files layout\n");
1987		return -EOPNOTSUPP;
1988	}
1989
1990	if (len < src_ci->i_layout.object_size)
1991		return -EOPNOTSUPP; /* no remote copy will be done */
1992
1993	prealloc_cf = ceph_alloc_cap_flush();
1994	if (!prealloc_cf)
1995		return -ENOMEM;
1996
1997	/* Start by sync'ing the source and destination files */
1998	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1999	if (ret < 0) {
2000		dout("failed to write src file (%zd)\n", ret);
2001		goto out;
2002	}
2003	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2004	if (ret < 0) {
2005		dout("failed to write dst file (%zd)\n", ret);
2006		goto out;
2007	}
2008
2009	/*
2010	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2011	 * clients may have dirty data in their caches.  And OSDs know nothing
2012	 * about caps, so they can't safely do the remote object copies.
2013	 */
2014	err = get_rd_wr_caps(src_file, &src_got,
2015			     dst_file, (dst_off + len), &dst_got);
2016	if (err < 0) {
2017		dout("get_rd_wr_caps returned %d\n", err);
2018		ret = -EOPNOTSUPP;
2019		goto out;
2020	}
2021
2022	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2023	if (ret < 0)
2024		goto out_caps;
2025
2026	size = i_size_read(dst_inode);
2027	endoff = dst_off + len;
2028
2029	/* Drop dst file cached pages */
2030	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2031					    dst_off >> PAGE_SHIFT,
2032					    endoff >> PAGE_SHIFT);
2033	if (ret < 0) {
2034		dout("Failed to invalidate inode pages (%zd)\n", ret);
2035		ret = 0; /* XXX */
2036	}
2037	src_oloc.pool = src_ci->i_layout.pool_id;
2038	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2039	dst_oloc.pool = dst_ci->i_layout.pool_id;
2040	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2041
2042	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2043				      src_ci->i_layout.object_size,
2044				      &src_objnum, &src_objoff, &src_objlen);
2045	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2046				      dst_ci->i_layout.object_size,
2047				      &dst_objnum, &dst_objoff, &dst_objlen);
2048	/* object-level offsets need to the same */
2049	if (src_objoff != dst_objoff) {
2050		ret = -EOPNOTSUPP;
2051		goto out_caps;
2052	}
2053
2054	/*
2055	 * Do a manual copy if the object offset isn't object aligned.
2056	 * 'src_objlen' contains the bytes left until the end of the object,
2057	 * starting at the src_off
2058	 */
2059	if (src_objoff) {
2060		/*
2061		 * we need to temporarily drop all caps as we'll be calling
2062		 * {read,write}_iter, which will get caps again.
2063		 */
2064		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2065		ret = do_splice_direct(src_file, &src_off, dst_file,
2066				       &dst_off, src_objlen, flags);
2067		if (ret < 0) {
2068			dout("do_splice_direct returned %d\n", err);
2069			goto out;
2070		}
2071		len -= ret;
2072		err = get_rd_wr_caps(src_file, &src_got,
2073				     dst_file, (dst_off + len), &dst_got);
2074		if (err < 0)
2075			goto out;
2076		err = is_file_size_ok(src_inode, dst_inode,
2077				      src_off, dst_off, len);
2078		if (err < 0)
2079			goto out_caps;
2080	}
2081	object_size = src_ci->i_layout.object_size;
2082	while (len >= object_size) {
2083		ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2084					      object_size, &src_objnum,
2085					      &src_objoff, &src_objlen);
2086		ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2087					      object_size, &dst_objnum,
2088					      &dst_objoff, &dst_objlen);
2089		ceph_oid_init(&src_oid);
2090		ceph_oid_printf(&src_oid, "%llx.%08llx",
2091				src_ci->i_vino.ino, src_objnum);
2092		ceph_oid_init(&dst_oid);
2093		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2094				dst_ci->i_vino.ino, dst_objnum);
2095		/* Do an object remote copy */
2096		err = ceph_osdc_copy_from(
2097			&src_fsc->client->osdc,
2098			src_ci->i_vino.snap, 0,
2099			&src_oid, &src_oloc,
2100			CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2101			CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2102			&dst_oid, &dst_oloc,
2103			CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2104			CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2105		if (err) {
2106			dout("ceph_osdc_copy_from returned %d\n", err);
2107			if (!ret)
2108				ret = err;
2109			goto out_caps;
2110		}
2111		len -= object_size;
2112		src_off += object_size;
2113		dst_off += object_size;
2114		ret += object_size;
2115	}
2116
2117	if (len)
2118		/* We still need one final local copy */
2119		do_final_copy = true;
2120
2121	file_update_time(dst_file);
2122	inode_inc_iversion_raw(dst_inode);
2123
2124	if (endoff > size) {
2125		int caps_flags = 0;
2126
2127		/* Let the MDS know about dst file size change */
2128		if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2129			caps_flags |= CHECK_CAPS_NODELAY;
2130		if (ceph_inode_set_size(dst_inode, endoff))
2131			caps_flags |= CHECK_CAPS_AUTHONLY;
2132		if (caps_flags)
2133			ceph_check_caps(dst_ci, caps_flags, NULL);
2134	}
2135	/* Mark Fw dirty */
2136	spin_lock(&dst_ci->i_ceph_lock);
2137	dst_ci->i_inline_version = CEPH_INLINE_NONE;
2138	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2139	spin_unlock(&dst_ci->i_ceph_lock);
2140	if (dirty)
2141		__mark_inode_dirty(dst_inode, dirty);
2142
2143out_caps:
2144	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2145
2146	if (do_final_copy) {
2147		err = do_splice_direct(src_file, &src_off, dst_file,
2148				       &dst_off, len, flags);
2149		if (err < 0) {
2150			dout("do_splice_direct returned %d\n", err);
2151			goto out;
2152		}
2153		len -= err;
2154		ret += err;
2155	}
2156
2157out:
2158	ceph_free_cap_flush(prealloc_cf);
2159
2160	return ret;
2161}
2162
2163static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2164				    struct file *dst_file, loff_t dst_off,
2165				    size_t len, unsigned int flags)
2166{
2167	ssize_t ret;
2168
2169	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2170				     len, flags);
2171
2172	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2173		ret = generic_copy_file_range(src_file, src_off, dst_file,
2174					      dst_off, len, flags);
2175	return ret;
2176}
2177
2178const struct file_operations ceph_file_fops = {
2179	.open = ceph_open,
2180	.release = ceph_release,
2181	.llseek = ceph_llseek,
2182	.read_iter = ceph_read_iter,
2183	.write_iter = ceph_write_iter,
 
 
2184	.mmap = ceph_mmap,
2185	.fsync = ceph_fsync,
2186	.lock = ceph_lock,
2187	.flock = ceph_flock,
2188	.splice_read = generic_file_splice_read,
2189	.splice_write = iter_file_splice_write,
2190	.unlocked_ioctl = ceph_ioctl,
2191	.compat_ioctl	= ceph_ioctl,
2192	.fallocate	= ceph_fallocate,
2193	.copy_file_range = ceph_copy_file_range,
2194};