Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1#include <linux/ceph/ceph_debug.h>
 
   2
   3#include <linux/module.h>
   4#include <linux/sched.h>
   5#include <linux/slab.h>
   6#include <linux/file.h>
   7#include <linux/mount.h>
   8#include <linux/namei.h>
   9#include <linux/writeback.h>
  10#include <linux/falloc.h>
 
 
  11
  12#include "super.h"
  13#include "mds_client.h"
  14#include "cache.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  15
  16/*
  17 * Ceph file operations
  18 *
  19 * Implement basic open/close functionality, and implement
  20 * read/write.
  21 *
  22 * We implement three modes of file I/O:
  23 *  - buffered uses the generic_file_aio_{read,write} helpers
  24 *
  25 *  - synchronous is used when there is multi-client read/write
  26 *    sharing, avoids the page cache, and synchronously waits for an
  27 *    ack from the OSD.
  28 *
  29 *  - direct io takes the variant of the sync path that references
  30 *    user pages directly.
  31 *
  32 * fsync() flushes and waits on dirty pages, but just queues metadata
  33 * for writeback: since the MDS can recover size and mtime there is no
  34 * need to wait for MDS acknowledgement.
  35 */
  36
  37/*
  38 * Calculate the length sum of direct io vectors that can
  39 * be combined into one page vector.
  40 */
  41static size_t dio_get_pagev_size(const struct iov_iter *it)
 
 
 
  42{
  43    const struct iovec *iov = it->iov;
  44    const struct iovec *iovend = iov + it->nr_segs;
  45    size_t size;
  46
  47    size = iov->iov_len - it->iov_offset;
  48    /*
  49     * An iov can be page vectored when both the current tail
  50     * and the next base are page aligned.
  51     */
  52    while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
  53           (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
  54        size += iov->iov_len;
  55    }
  56    dout("dio_get_pagevlen len = %zu\n", size);
  57    return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58}
  59
  60/*
  61 * Allocate a page vector based on (@it, @nbytes).
  62 * The return value is the tuple describing a page vector,
  63 * that is (@pages, @page_align, @num_pages).
 
 
 
  64 */
  65static struct page **
  66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
  67		    size_t *page_align, int *num_pages)
  68{
  69	struct iov_iter tmp_it = *it;
  70	size_t align;
  71	struct page **pages;
  72	int ret = 0, idx, npages;
 
 
 
 
  73
  74	align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
  75		(PAGE_SIZE - 1);
  76	npages = calc_pages_for(align, nbytes);
  77	pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
  78	if (!pages) {
  79		pages = vmalloc(sizeof(*pages) * npages);
  80		if (!pages)
  81			return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
  82	}
  83
  84	for (idx = 0; idx < npages; ) {
  85		size_t start;
  86		ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
  87					 npages - idx, &start);
  88		if (ret < 0)
  89			goto fail;
  90
  91		iov_iter_advance(&tmp_it, ret);
  92		nbytes -= ret;
  93		idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
  94	}
  95
  96	BUG_ON(nbytes != 0);
  97	*num_pages = npages;
  98	*page_align = align;
  99	dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
 100	return pages;
 101fail:
 102	ceph_put_page_vector(pages, idx, false);
 103	return ERR_PTR(ret);
 104}
 105
 106/*
 107 * Prepare an open request.  Preallocate ceph_cap to avoid an
 108 * inopportune ENOMEM later.
 109 */
 110static struct ceph_mds_request *
 111prepare_open_request(struct super_block *sb, int flags, int create_mode)
 112{
 113	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 114	struct ceph_mds_client *mdsc = fsc->mdsc;
 115	struct ceph_mds_request *req;
 116	int want_auth = USE_ANY_MDS;
 117	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 118
 119	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 120		want_auth = USE_AUTH_MDS;
 121
 122	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 123	if (IS_ERR(req))
 124		goto out;
 125	req->r_fmode = ceph_flags_to_mode(flags);
 126	req->r_args.open.flags = cpu_to_le32(flags);
 127	req->r_args.open.mode = cpu_to_le32(create_mode);
 128out:
 129	return req;
 130}
 131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132/*
 133 * initialize private struct file data.
 134 * if we fail, clean up by dropping fmode reference on the ceph_inode
 135 */
 136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 137{
 138	struct ceph_file_info *cf;
 139	int ret = 0;
 140
 141	switch (inode->i_mode & S_IFMT) {
 142	case S_IFREG:
 143		ceph_fscache_register_inode_cookie(inode);
 144		ceph_fscache_file_set_cookie(inode, file);
 145	case S_IFDIR:
 146		dout("init_file %p %p 0%o (regular)\n", inode, file,
 147		     inode->i_mode);
 148		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 149		if (cf == NULL) {
 150			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 151			return -ENOMEM;
 152		}
 153		cf->fmode = fmode;
 154		cf->next_offset = 2;
 155		cf->readdir_cache_idx = -1;
 156		file->private_data = cf;
 157		BUG_ON(inode->i_fop->release != ceph_release);
 158		break;
 159
 160	case S_IFLNK:
 161		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 162		     inode->i_mode);
 163		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 164		break;
 165
 166	default:
 167		dout("init_file %p %p 0%o (special)\n", inode, file,
 168		     inode->i_mode);
 169		/*
 170		 * we need to drop the open ref now, since we don't
 171		 * have .release set to ceph_release.
 172		 */
 173		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 174		BUG_ON(inode->i_fop->release == ceph_release);
 175
 176		/* call the proper open fop */
 177		ret = inode->i_fop->open(inode, file);
 178	}
 179	return ret;
 180}
 181
 182/*
 183 * try renew caps after session gets killed.
 184 */
 185int ceph_renew_caps(struct inode *inode)
 186{
 187	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 188	struct ceph_inode_info *ci = ceph_inode(inode);
 189	struct ceph_mds_request *req;
 190	int err, flags, wanted;
 191
 192	spin_lock(&ci->i_ceph_lock);
 
 193	wanted = __ceph_caps_file_wanted(ci);
 194	if (__ceph_is_any_real_caps(ci) &&
 195	    (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
 196		int issued = __ceph_caps_issued(ci, NULL);
 197		spin_unlock(&ci->i_ceph_lock);
 198		dout("renew caps %p want %s issued %s updating mds_wanted\n",
 199		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 200		ceph_check_caps(ci, 0, NULL);
 201		return 0;
 202	}
 203	spin_unlock(&ci->i_ceph_lock);
 204
 205	flags = 0;
 206	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 207		flags = O_RDWR;
 208	else if (wanted & CEPH_CAP_FILE_RD)
 209		flags = O_RDONLY;
 210	else if (wanted & CEPH_CAP_FILE_WR)
 211		flags = O_WRONLY;
 212#ifdef O_LAZY
 213	if (wanted & CEPH_CAP_FILE_LAZYIO)
 214		flags |= O_LAZY;
 215#endif
 216
 217	req = prepare_open_request(inode->i_sb, flags, 0);
 218	if (IS_ERR(req)) {
 219		err = PTR_ERR(req);
 220		goto out;
 221	}
 222
 223	req->r_inode = inode;
 224	ihold(inode);
 225	req->r_num_caps = 1;
 226	req->r_fmode = -1;
 227
 228	err = ceph_mdsc_do_request(mdsc, NULL, req);
 229	ceph_mdsc_put_request(req);
 230out:
 231	dout("renew caps %p open result=%d\n", inode, err);
 232	return err < 0 ? err : 0;
 233}
 234
 235/*
 236 * If we already have the requisite capabilities, we can satisfy
 237 * the open request locally (no need to request new caps from the
 238 * MDS).  We do, however, need to inform the MDS (asynchronously)
 239 * if our wanted caps set expands.
 240 */
 241int ceph_open(struct inode *inode, struct file *file)
 242{
 243	struct ceph_inode_info *ci = ceph_inode(inode);
 244	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 245	struct ceph_mds_client *mdsc = fsc->mdsc;
 246	struct ceph_mds_request *req;
 247	struct ceph_file_info *cf = file->private_data;
 248	int err;
 249	int flags, fmode, wanted;
 250
 251	if (cf) {
 252		dout("open file %p is already opened\n", file);
 253		return 0;
 254	}
 255
 256	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 257	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 258	if (S_ISDIR(inode->i_mode))
 259		flags = O_DIRECTORY;  /* mds likes to know */
 260
 261	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 262	     ceph_vinop(inode), file, flags, file->f_flags);
 263	fmode = ceph_flags_to_mode(flags);
 264	wanted = ceph_caps_for_mode(fmode);
 265
 266	/* snapped files are read-only */
 267	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 268		return -EROFS;
 269
 270	/* trivially open snapdir */
 271	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 272		spin_lock(&ci->i_ceph_lock);
 273		__ceph_get_fmode(ci, fmode);
 274		spin_unlock(&ci->i_ceph_lock);
 275		return ceph_init_file(inode, file, fmode);
 276	}
 277
 278	/*
 279	 * No need to block if we have caps on the auth MDS (for
 280	 * write) or any MDS (for read).  Update wanted set
 281	 * asynchronously.
 282	 */
 283	spin_lock(&ci->i_ceph_lock);
 284	if (__ceph_is_any_real_caps(ci) &&
 285	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 286		int mds_wanted = __ceph_caps_mds_wanted(ci);
 287		int issued = __ceph_caps_issued(ci, NULL);
 288
 289		dout("open %p fmode %d want %s issued %s using existing\n",
 290		     inode, fmode, ceph_cap_string(wanted),
 291		     ceph_cap_string(issued));
 292		__ceph_get_fmode(ci, fmode);
 293		spin_unlock(&ci->i_ceph_lock);
 294
 295		/* adjust wanted? */
 296		if ((issued & wanted) != wanted &&
 297		    (mds_wanted & wanted) != wanted &&
 298		    ceph_snap(inode) != CEPH_SNAPDIR)
 299			ceph_check_caps(ci, 0, NULL);
 300
 301		return ceph_init_file(inode, file, fmode);
 302	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 303		   (ci->i_snap_caps & wanted) == wanted) {
 304		__ceph_get_fmode(ci, fmode);
 305		spin_unlock(&ci->i_ceph_lock);
 306		return ceph_init_file(inode, file, fmode);
 307	}
 308
 309	spin_unlock(&ci->i_ceph_lock);
 310
 311	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 312	req = prepare_open_request(inode->i_sb, flags, 0);
 313	if (IS_ERR(req)) {
 314		err = PTR_ERR(req);
 315		goto out;
 316	}
 317	req->r_inode = inode;
 318	ihold(inode);
 319
 320	req->r_num_caps = 1;
 321	err = ceph_mdsc_do_request(mdsc, NULL, req);
 322	if (!err)
 323		err = ceph_init_file(inode, file, req->r_fmode);
 324	ceph_mdsc_put_request(req);
 325	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 326out:
 327	return err;
 328}
 329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330
 331/*
 332 * Do a lookup + open with a single request.  If we get a non-existent
 333 * file or symlink, return 1 so the VFS can retry.
 334 */
 335int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 336		     struct file *file, unsigned flags, umode_t mode,
 337		     int *opened)
 338{
 339	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 340	struct ceph_mds_client *mdsc = fsc->mdsc;
 341	struct ceph_mds_request *req;
 342	struct dentry *dn;
 343	struct ceph_acls_info acls = {};
 344       int mask;
 
 345	int err;
 346
 347	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 348	     dir, dentry, dentry,
 349	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 350
 351	if (dentry->d_name.len > NAME_MAX)
 352		return -ENAMETOOLONG;
 353
 
 
 
 
 
 
 
 
 
 354	if (flags & O_CREAT) {
 355		err = ceph_pre_init_acls(dir, &mode, &acls);
 
 
 356		if (err < 0)
 357			return err;
 
 
 
 
 
 
 
 
 
 
 358	}
 359
 360	/* do the open */
 361	req = prepare_open_request(dir->i_sb, flags, mode);
 362	if (IS_ERR(req)) {
 363		err = PTR_ERR(req);
 364		goto out_acl;
 365	}
 366	req->r_dentry = dget(dentry);
 367	req->r_num_caps = 2;
 
 
 
 
 
 
 
 368	if (flags & O_CREAT) {
 369		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
 
 
 370		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 371		if (acls.pagelist) {
 372			req->r_pagelist = acls.pagelist;
 373			acls.pagelist = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374		}
 375	}
 376
 377       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 378       if (ceph_security_xattr_wanted(dir))
 379               mask |= CEPH_CAP_XATTR_SHARED;
 380       req->r_args.open.mask = cpu_to_le32(mask);
 381
 382	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
 383	err = ceph_mdsc_do_request(mdsc,
 384				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 385				   req);
 386	err = ceph_handle_snapdir(req, dentry, err);
 387	if (err)
 388		goto out_req;
 389
 390	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 391		err = ceph_handle_notrace_create(dir, dentry);
 392
 393	if (d_in_lookup(dentry)) {
 394		dn = ceph_finish_lookup(req, dentry, err);
 395		if (IS_ERR(dn))
 396			err = PTR_ERR(dn);
 397	} else {
 398		/* we were given a hashed negative dentry */
 399		dn = NULL;
 400	}
 401	if (err)
 402		goto out_req;
 403	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 404		/* make vfs retry on splice, ENOENT, or symlink */
 405		dout("atomic_open finish_no_open on dn %p\n", dn);
 406		err = finish_no_open(file, dn);
 407	} else {
 408		dout("atomic_open finish_open on dn %p\n", dn);
 409		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 410			ceph_init_inode_acls(d_inode(dentry), &acls);
 411			*opened |= FILE_CREATED;
 
 
 
 412		}
 413		err = finish_open(file, dentry, ceph_open, opened);
 414	}
 415out_req:
 416	if (!req->r_err && req->r_target_inode)
 417		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 418	ceph_mdsc_put_request(req);
 419out_acl:
 420	ceph_release_acls_info(&acls);
 421	dout("atomic_open result=%d\n", err);
 422	return err;
 423}
 424
 425int ceph_release(struct inode *inode, struct file *file)
 426{
 427	struct ceph_inode_info *ci = ceph_inode(inode);
 428	struct ceph_file_info *cf = file->private_data;
 429
 430	dout("release inode %p file %p\n", inode, file);
 431	ceph_put_fmode(ci, cf->fmode);
 432	if (cf->last_readdir)
 433		ceph_mdsc_put_request(cf->last_readdir);
 434	kfree(cf->last_name);
 435	kfree(cf->dir_info);
 436	kmem_cache_free(ceph_file_cachep, cf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	/* wake up anyone waiting for caps on this inode */
 439	wake_up_all(&ci->i_cap_wq);
 440	return 0;
 441}
 442
 443enum {
 444	HAVE_RETRIED = 1,
 445	CHECK_EOF =    2,
 446	READ_INLINE =  3,
 447};
 448
 449/*
 450 * Read a range of bytes striped over one or more objects.  Iterate over
 451 * objects we stripe over.  (That's not atomic, but good enough for now.)
 452 *
 453 * If we get a short result from the OSD, check against i_size; we need to
 454 * only return a short read to the caller if we hit EOF.
 455 */
 456static int striped_read(struct inode *inode,
 457			u64 pos, u64 len,
 458			struct page **pages, int num_pages,
 459			int page_align, int *checkeof)
 460{
 461	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 462	struct ceph_inode_info *ci = ceph_inode(inode);
 463	u64 this_len;
 464	loff_t i_size;
 465	int page_idx;
 466	int ret, read = 0;
 467	bool hit_stripe, was_short;
 468
 469	/*
 470	 * we may need to do multiple reads.  not atomic, unfortunately.
 471	 */
 472more:
 473	this_len = len;
 474	page_idx = (page_align + read) >> PAGE_SHIFT;
 475	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
 476				  &ci->i_layout, pos, &this_len,
 477				  ci->i_truncate_seq, ci->i_truncate_size,
 478				  pages + page_idx, num_pages - page_idx,
 479				  ((page_align + read) & ~PAGE_MASK));
 480	if (ret == -ENOENT)
 481		ret = 0;
 482	hit_stripe = this_len < len;
 483	was_short = ret >= 0 && ret < this_len;
 484	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
 485	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 486
 487	i_size = i_size_read(inode);
 488	if (ret >= 0) {
 489		if (was_short && (pos + ret < i_size)) {
 490			int zlen = min(this_len - ret, i_size - pos - ret);
 491			int zoff = page_align + read + ret;
 492			dout(" zero gap %llu to %llu\n",
 493			     pos + ret, pos + ret + zlen);
 494			ceph_zero_page_vector_range(zoff, zlen, pages);
 495			ret += zlen;
 496		}
 497
 498		read += ret;
 499		pos += ret;
 500		len -= ret;
 501
 502		/* hit stripe and need continue*/
 503		if (len && hit_stripe && pos < i_size)
 504			goto more;
 505	}
 506
 507	if (read > 0) {
 508		ret = read;
 509		/* did we bounce off eof? */
 510		if (pos + len > i_size)
 511			*checkeof = CHECK_EOF;
 512	}
 513
 514	dout("striped_read returns %d\n", ret);
 515	return ret;
 516}
 517
 518/*
 519 * Completely synchronous read and write methods.  Direct from __user
 520 * buffer to osd, or directly to user pages (if O_DIRECT).
 521 *
 522 * If the read spans object boundary, just do multiple reads.
 
 
 
 
 523 */
 524static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 525			      int *checkeof)
 526{
 527	struct file *file = iocb->ki_filp;
 528	struct inode *inode = file_inode(file);
 529	struct page **pages;
 530	u64 off = iocb->ki_pos;
 531	int num_pages;
 532	ssize_t ret;
 533	size_t len = iov_iter_count(to);
 
 
 534
 535	dout("sync_read on file %p %llu~%u %s\n", file, off,
 536	     (unsigned)len,
 537	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 538
 539	if (!len)
 540		return 0;
 541	/*
 542	 * flush any page cache pages in this range.  this
 543	 * will make concurrent normal and sync io slow,
 544	 * but it will at least behave sensibly when they are
 545	 * in sequence.
 546	 */
 547	ret = filemap_write_and_wait_range(inode->i_mapping, off,
 548						off + len);
 549	if (ret < 0)
 550		return ret;
 551
 552	if (unlikely(to->type & ITER_PIPE)) {
 
 
 
 
 553		size_t page_off;
 554		ret = iov_iter_get_pages_alloc(to, &pages, len,
 555					       &page_off);
 556		if (ret <= 0)
 557			return -ENOMEM;
 558		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 559
 560		ret = striped_read(inode, off, ret, pages, num_pages,
 561				   page_off, checkeof);
 562		if (ret > 0) {
 563			iov_iter_advance(to, ret);
 564			off += ret;
 565		} else {
 566			iov_iter_advance(to, 0);
 
 567		}
 568		ceph_put_page_vector(pages, num_pages, false);
 569	} else {
 
 570		num_pages = calc_pages_for(off, len);
 
 571		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 572		if (IS_ERR(pages))
 573			return PTR_ERR(pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574
 575		ret = striped_read(inode, off, len, pages, num_pages,
 576				   (off & ~PAGE_MASK), checkeof);
 577		if (ret > 0) {
 578			int l, k = 0;
 579			size_t left = ret;
 580
 581			while (left) {
 582				size_t page_off = off & ~PAGE_MASK;
 583				size_t copy = min_t(size_t, left,
 584						    PAGE_SIZE - page_off);
 585				l = copy_page_to_iter(pages[k++], page_off,
 586						      copy, to);
 587				off += l;
 588				left -= l;
 589				if (l < copy)
 590					break;
 591			}
 592		}
 593		ceph_release_page_vector(pages, num_pages);
 
 
 
 
 
 
 
 
 
 594	}
 595
 596	if (off > iocb->ki_pos) {
 597		ret = off - iocb->ki_pos;
 598		iocb->ki_pos = off;
 
 
 
 
 
 
 599	}
 600
 601	dout("sync_read result %zd\n", ret);
 602	return ret;
 603}
 604
 605struct ceph_aio_request {
 606	struct kiocb *iocb;
 607	size_t total_len;
 608	int write;
 
 609	int error;
 610	struct list_head osd_reqs;
 611	unsigned num_reqs;
 612	atomic_t pending_reqs;
 613	struct timespec mtime;
 614	struct ceph_cap_flush *prealloc_cf;
 615};
 616
 617struct ceph_aio_work {
 618	struct work_struct work;
 619	struct ceph_osd_request *req;
 620};
 621
 622static void ceph_aio_retry_work(struct work_struct *work);
 623
 624static void ceph_aio_complete(struct inode *inode,
 625			      struct ceph_aio_request *aio_req)
 626{
 627	struct ceph_inode_info *ci = ceph_inode(inode);
 628	int ret;
 629
 630	if (!atomic_dec_and_test(&aio_req->pending_reqs))
 631		return;
 632
 
 
 
 633	ret = aio_req->error;
 634	if (!ret)
 635		ret = aio_req->total_len;
 636
 637	dout("ceph_aio_complete %p rc %d\n", inode, ret);
 638
 639	if (ret >= 0 && aio_req->write) {
 640		int dirty;
 641
 642		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 643		if (endoff > i_size_read(inode)) {
 644			if (ceph_inode_set_size(inode, endoff))
 645				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 646		}
 647
 648		spin_lock(&ci->i_ceph_lock);
 649		ci->i_inline_version = CEPH_INLINE_NONE;
 650		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 651					       &aio_req->prealloc_cf);
 652		spin_unlock(&ci->i_ceph_lock);
 653		if (dirty)
 654			__mark_inode_dirty(inode, dirty);
 655
 656	}
 657
 658	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 659						CEPH_CAP_FILE_RD));
 660
 661	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 662
 663	ceph_free_cap_flush(aio_req->prealloc_cf);
 664	kfree(aio_req);
 665}
 666
 667static void ceph_aio_complete_req(struct ceph_osd_request *req)
 668{
 669	int rc = req->r_result;
 670	struct inode *inode = req->r_inode;
 671	struct ceph_aio_request *aio_req = req->r_priv;
 672	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 673	int num_pages = calc_pages_for((u64)osd_data->alignment,
 674				       osd_data->length);
 675
 676	dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
 677	     inode, rc, osd_data->length);
 
 
 678
 679	if (rc == -EOLDSNAPC) {
 680		struct ceph_aio_work *aio_work;
 681		BUG_ON(!aio_req->write);
 682
 683		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 684		if (aio_work) {
 685			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 686			aio_work->req = req;
 687			queue_work(ceph_inode_to_client(inode)->wb_wq,
 688				   &aio_work->work);
 689			return;
 690		}
 691		rc = -ENOMEM;
 692	} else if (!aio_req->write) {
 693		if (rc == -ENOENT)
 694			rc = 0;
 695		if (rc >= 0 && osd_data->length > rc) {
 696			int zoff = osd_data->alignment + rc;
 697			int zlen = osd_data->length - rc;
 
 698			/*
 699			 * If read is satisfied by single OSD request,
 700			 * it can pass EOF. Otherwise read is within
 701			 * i_size.
 702			 */
 703			if (aio_req->num_reqs == 1) {
 704				loff_t i_size = i_size_read(inode);
 705				loff_t endoff = aio_req->iocb->ki_pos + rc;
 706				if (endoff < i_size)
 707					zlen = min_t(size_t, zlen,
 708						     i_size - endoff);
 709				aio_req->total_len = rc + zlen;
 710			}
 711
 712			if (zlen > 0)
 713				ceph_zero_page_vector_range(zoff, zlen,
 714							    osd_data->pages);
 
 715		}
 716	}
 717
 718	ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
 
 
 
 
 
 
 
 
 
 
 
 719	ceph_osdc_put_request(req);
 720
 721	if (rc < 0)
 722		cmpxchg(&aio_req->error, 0, rc);
 723
 724	ceph_aio_complete(inode, aio_req);
 725	return;
 726}
 727
 728static void ceph_aio_retry_work(struct work_struct *work)
 729{
 730	struct ceph_aio_work *aio_work =
 731		container_of(work, struct ceph_aio_work, work);
 732	struct ceph_osd_request *orig_req = aio_work->req;
 733	struct ceph_aio_request *aio_req = orig_req->r_priv;
 734	struct inode *inode = orig_req->r_inode;
 735	struct ceph_inode_info *ci = ceph_inode(inode);
 736	struct ceph_snap_context *snapc;
 737	struct ceph_osd_request *req;
 738	int ret;
 739
 740	spin_lock(&ci->i_ceph_lock);
 741	if (__ceph_have_pending_cap_snap(ci)) {
 742		struct ceph_cap_snap *capsnap =
 743			list_last_entry(&ci->i_cap_snaps,
 744					struct ceph_cap_snap,
 745					ci_item);
 746		snapc = ceph_get_snap_context(capsnap->context);
 747	} else {
 748		BUG_ON(!ci->i_head_snapc);
 749		snapc = ceph_get_snap_context(ci->i_head_snapc);
 750	}
 751	spin_unlock(&ci->i_ceph_lock);
 752
 753	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
 754			false, GFP_NOFS);
 755	if (!req) {
 756		ret = -ENOMEM;
 757		req = orig_req;
 758		goto out;
 759	}
 760
 761	req->r_flags =	CEPH_OSD_FLAG_ORDERSNAP |
 762			CEPH_OSD_FLAG_ONDISK |
 763			CEPH_OSD_FLAG_WRITE;
 764	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
 765	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 766
 
 
 
 
 
 767	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
 768	if (ret) {
 769		ceph_osdc_put_request(req);
 770		req = orig_req;
 771		goto out;
 772	}
 773
 774	req->r_ops[0] = orig_req->r_ops[0];
 775	osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 776
 777	req->r_mtime = aio_req->mtime;
 778	req->r_data_offset = req->r_ops[0].extent.offset;
 779
 780	ceph_osdc_put_request(orig_req);
 781
 782	req->r_callback = ceph_aio_complete_req;
 783	req->r_inode = inode;
 784	req->r_priv = aio_req;
 785
 786	ret = ceph_osdc_start_request(req->r_osdc, req, false);
 787out:
 788	if (ret < 0) {
 789		req->r_result = ret;
 790		ceph_aio_complete_req(req);
 791	}
 792
 793	ceph_put_snap_context(snapc);
 794	kfree(aio_work);
 795}
 796
 797/*
 798 * Write commit request unsafe callback, called to tell us when a
 799 * request is unsafe (that is, in flight--has been handed to the
 800 * messenger to send to its target osd).  It is called again when
 801 * we've received a response message indicating the request is
 802 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
 803 * is completed early (and unsuccessfully) due to a timeout or
 804 * interrupt.
 805 *
 806 * This is used if we requested both an ACK and ONDISK commit reply
 807 * from the OSD.
 808 */
 809static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
 810{
 811	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
 812
 813	dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
 814		unsafe ? "un" : "");
 815	if (unsafe) {
 816		ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
 817		spin_lock(&ci->i_unsafe_lock);
 818		list_add_tail(&req->r_unsafe_item,
 819			      &ci->i_unsafe_writes);
 820		spin_unlock(&ci->i_unsafe_lock);
 821
 822		complete_all(&req->r_completion);
 823	} else {
 824		spin_lock(&ci->i_unsafe_lock);
 825		list_del_init(&req->r_unsafe_item);
 826		spin_unlock(&ci->i_unsafe_lock);
 827		ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
 828	}
 829}
 830
 831/*
 832 * Wait on any unsafe replies for the given inode.  First wait on the
 833 * newest request, and make that the upper bound.  Then, if there are
 834 * more requests, keep waiting on the oldest as long as it is still older
 835 * than the original request.
 836 */
 837void ceph_sync_write_wait(struct inode *inode)
 838{
 839	struct ceph_inode_info *ci = ceph_inode(inode);
 840	struct list_head *head = &ci->i_unsafe_writes;
 841	struct ceph_osd_request *req;
 842	u64 last_tid;
 843
 844	if (!S_ISREG(inode->i_mode))
 845		return;
 846
 847	spin_lock(&ci->i_unsafe_lock);
 848	if (list_empty(head))
 849		goto out;
 850
 851	/* set upper bound as _last_ entry in chain */
 852
 853	req = list_last_entry(head, struct ceph_osd_request,
 854			      r_unsafe_item);
 855	last_tid = req->r_tid;
 856
 857	do {
 858		ceph_osdc_get_request(req);
 859		spin_unlock(&ci->i_unsafe_lock);
 860
 861		dout("sync_write_wait on tid %llu (until %llu)\n",
 862		     req->r_tid, last_tid);
 863		wait_for_completion(&req->r_done_completion);
 864		ceph_osdc_put_request(req);
 865
 866		spin_lock(&ci->i_unsafe_lock);
 867		/*
 868		 * from here on look at first entry in chain, since we
 869		 * only want to wait for anything older than last_tid
 870		 */
 871		if (list_empty(head))
 872			break;
 873		req = list_first_entry(head, struct ceph_osd_request,
 874				       r_unsafe_item);
 875	} while (req->r_tid < last_tid);
 876out:
 877	spin_unlock(&ci->i_unsafe_lock);
 878}
 879
 880static ssize_t
 881ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 882		       struct ceph_snap_context *snapc,
 883		       struct ceph_cap_flush **pcf)
 884{
 885	struct file *file = iocb->ki_filp;
 886	struct inode *inode = file_inode(file);
 887	struct ceph_inode_info *ci = ceph_inode(inode);
 888	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
 889	struct ceph_vino vino;
 890	struct ceph_osd_request *req;
 891	struct page **pages;
 892	struct ceph_aio_request *aio_req = NULL;
 893	int num_pages = 0;
 894	int flags;
 895	int ret;
 896	struct timespec mtime = current_time(inode);
 897	size_t count = iov_iter_count(iter);
 898	loff_t pos = iocb->ki_pos;
 899	bool write = iov_iter_rw(iter) == WRITE;
 
 900
 901	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 902		return -EROFS;
 903
 904	dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
 905	     (write ? "write" : "read"), file, pos, (unsigned)count);
 906
 907	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
 908	if (ret < 0)
 909		return ret;
 910
 911	if (write) {
 912		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 
 
 
 
 913					pos >> PAGE_SHIFT,
 914					(pos + count) >> PAGE_SHIFT);
 915		if (ret2 < 0)
 916			dout("invalidate_inode_pages2_range returned %d\n", ret2);
 917
 918		flags = CEPH_OSD_FLAG_ORDERSNAP |
 919			CEPH_OSD_FLAG_ONDISK |
 920			CEPH_OSD_FLAG_WRITE;
 921	} else {
 922		flags = CEPH_OSD_FLAG_READ;
 923	}
 924
 925	while (iov_iter_count(iter) > 0) {
 926		u64 size = dio_get_pagev_size(iter);
 927		size_t start = 0;
 928		ssize_t len;
 929
 
 
 
 
 
 930		vino = ceph_vino(inode);
 931		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 932					    vino, pos, &size, 0,
 933					    /*include a 'startsync' command*/
 934					    write ? 2 : 1,
 935					    write ? CEPH_OSD_OP_WRITE :
 936						    CEPH_OSD_OP_READ,
 937					    flags, snapc,
 938					    ci->i_truncate_seq,
 939					    ci->i_truncate_size,
 940					    false);
 941		if (IS_ERR(req)) {
 942			ret = PTR_ERR(req);
 943			break;
 944		}
 945
 946		len = size;
 947		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
 948		if (IS_ERR(pages)) {
 949			ceph_osdc_put_request(req);
 950			ret = PTR_ERR(pages);
 951			break;
 952		}
 
 
 953
 954		/*
 955		 * To simplify error handling, allow AIO when IO within i_size
 956		 * or IO can be satisfied by single OSD request.
 957		 */
 958		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
 959		    (len == count || pos + count <= i_size_read(inode))) {
 960			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
 961			if (aio_req) {
 962				aio_req->iocb = iocb;
 963				aio_req->write = write;
 
 964				INIT_LIST_HEAD(&aio_req->osd_reqs);
 965				if (write) {
 966					aio_req->mtime = mtime;
 967					swap(aio_req->prealloc_cf, *pcf);
 968				}
 969			}
 970			/* ignore error */
 971		}
 972
 973		if (write) {
 974			/*
 975			 * throw out any page cache pages in this range. this
 976			 * may block.
 977			 */
 978			truncate_inode_pages_range(inode->i_mapping, pos,
 979					(pos+len) | (PAGE_SIZE - 1));
 980
 981			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 982			req->r_mtime = mtime;
 983		}
 984
 985		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
 986						 false, false);
 987
 988		if (aio_req) {
 989			aio_req->total_len += len;
 990			aio_req->num_reqs++;
 991			atomic_inc(&aio_req->pending_reqs);
 992
 993			req->r_callback = ceph_aio_complete_req;
 994			req->r_inode = inode;
 995			req->r_priv = aio_req;
 996			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
 997
 998			pos += len;
 999			iov_iter_advance(iter, len);
1000			continue;
1001		}
1002
1003		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1004		if (!ret)
1005			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 
 
 
 
 
 
1006
1007		size = i_size_read(inode);
1008		if (!write) {
1009			if (ret == -ENOENT)
1010				ret = 0;
1011			if (ret >= 0 && ret < len && pos + ret < size) {
 
1012				int zlen = min_t(size_t, len - ret,
1013						 size - pos - ret);
1014				ceph_zero_page_vector_range(start + ret, zlen,
1015							    pages);
 
 
1016				ret += zlen;
1017			}
1018			if (ret >= 0)
1019				len = ret;
1020		}
1021
1022		ceph_put_page_vector(pages, num_pages, !write);
1023
1024		ceph_osdc_put_request(req);
1025		if (ret < 0)
1026			break;
1027
1028		pos += len;
1029		iov_iter_advance(iter, len);
1030
1031		if (!write && pos >= size)
1032			break;
1033
1034		if (write && pos > size) {
1035			if (ceph_inode_set_size(inode, pos))
1036				ceph_check_caps(ceph_inode(inode),
1037						CHECK_CAPS_AUTHONLY,
1038						NULL);
1039		}
1040	}
1041
1042	if (aio_req) {
1043		LIST_HEAD(osd_reqs);
1044
1045		if (aio_req->num_reqs == 0) {
1046			kfree(aio_req);
1047			return ret;
1048		}
1049
1050		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1051					      CEPH_CAP_FILE_RD);
1052
1053		list_splice(&aio_req->osd_reqs, &osd_reqs);
 
1054		while (!list_empty(&osd_reqs)) {
1055			req = list_first_entry(&osd_reqs,
1056					       struct ceph_osd_request,
1057					       r_unsafe_item);
1058			list_del_init(&req->r_unsafe_item);
1059			if (ret >= 0)
1060				ret = ceph_osdc_start_request(req->r_osdc,
1061							      req, false);
1062			if (ret < 0) {
1063				req->r_result = ret;
1064				ceph_aio_complete_req(req);
1065			}
1066		}
1067		return -EIOCBQUEUED;
1068	}
1069
1070	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1071		ret = pos - iocb->ki_pos;
1072		iocb->ki_pos = pos;
1073	}
1074	return ret;
1075}
1076
1077/*
1078 * Synchronous write, straight from __user pointer or user pages.
1079 *
1080 * If write spans object boundary, just do multiple writes.  (For a
1081 * correct atomic write, we should e.g. take write locks on all
1082 * objects, rollback on failure, etc.)
1083 */
1084static ssize_t
1085ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1086		struct ceph_snap_context *snapc)
1087{
1088	struct file *file = iocb->ki_filp;
1089	struct inode *inode = file_inode(file);
1090	struct ceph_inode_info *ci = ceph_inode(inode);
1091	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1092	struct ceph_vino vino;
1093	struct ceph_osd_request *req;
1094	struct page **pages;
1095	u64 len;
1096	int num_pages;
1097	int written = 0;
1098	int flags;
1099	int check_caps = 0;
1100	int ret;
1101	struct timespec mtime = current_time(inode);
 
1102	size_t count = iov_iter_count(from);
1103
1104	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1105		return -EROFS;
1106
1107	dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
 
1108
1109	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
 
1110	if (ret < 0)
1111		return ret;
1112
 
1113	ret = invalidate_inode_pages2_range(inode->i_mapping,
1114					    pos >> PAGE_SHIFT,
1115					    (pos + count) >> PAGE_SHIFT);
1116	if (ret < 0)
1117		dout("invalidate_inode_pages2_range returned %d\n", ret);
1118
1119	flags = CEPH_OSD_FLAG_ORDERSNAP |
1120		CEPH_OSD_FLAG_ONDISK |
1121		CEPH_OSD_FLAG_WRITE |
1122		CEPH_OSD_FLAG_ACK;
1123
1124	while ((len = iov_iter_count(from)) > 0) {
1125		size_t left;
1126		int n;
1127
1128		vino = ceph_vino(inode);
1129		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1130					    vino, pos, &len, 0, 1,
1131					    CEPH_OSD_OP_WRITE, flags, snapc,
1132					    ci->i_truncate_seq,
1133					    ci->i_truncate_size,
1134					    false);
1135		if (IS_ERR(req)) {
1136			ret = PTR_ERR(req);
1137			break;
1138		}
1139
1140		/*
1141		 * write from beginning of first page,
1142		 * regardless of io alignment
1143		 */
1144		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145
1146		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1147		if (IS_ERR(pages)) {
1148			ret = PTR_ERR(pages);
1149			goto out;
1150		}
1151
1152		left = len;
1153		for (n = 0; n < num_pages; n++) {
1154			size_t plen = min_t(size_t, left, PAGE_SIZE);
1155			ret = copy_page_from_iter(pages[n], 0, plen, from);
1156			if (ret != plen) {
1157				ret = -EFAULT;
1158				break;
1159			}
1160			left -= ret;
1161		}
1162
1163		if (ret < 0) {
1164			ceph_release_page_vector(pages, num_pages);
1165			goto out;
1166		}
1167
1168		/* get a second commit callback */
1169		req->r_unsafe_callback = ceph_sync_write_unsafe;
1170		req->r_inode = inode;
1171
1172		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1173						false, true);
1174
1175		req->r_mtime = mtime;
1176		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1177		if (!ret)
1178			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1179
 
 
1180out:
1181		ceph_osdc_put_request(req);
1182		if (ret == 0) {
1183			pos += len;
1184			written += len;
1185
1186			if (pos > i_size_read(inode)) {
1187				check_caps = ceph_inode_set_size(inode, pos);
1188				if (check_caps)
1189					ceph_check_caps(ceph_inode(inode),
1190							CHECK_CAPS_AUTHONLY,
1191							NULL);
1192			}
1193		} else
1194			break;
 
 
 
 
 
 
 
 
 
 
 
 
1195	}
1196
1197	if (ret != -EOLDSNAPC && written > 0) {
1198		ret = written;
1199		iocb->ki_pos = pos;
1200	}
1201	return ret;
1202}
1203
1204/*
1205 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1206 * Atomically grab references, so that those bits are not released
1207 * back to the MDS mid-read.
1208 *
1209 * Hmm, the sync read case isn't actually async... should it be?
1210 */
1211static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1212{
1213	struct file *filp = iocb->ki_filp;
1214	struct ceph_file_info *fi = filp->private_data;
1215	size_t len = iov_iter_count(to);
1216	struct inode *inode = file_inode(filp);
1217	struct ceph_inode_info *ci = ceph_inode(inode);
1218	struct page *pinned_page = NULL;
1219	ssize_t ret;
1220	int want, got = 0;
1221	int retry_op = 0, read = 0;
1222
1223again:
1224	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1225	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1226
1227	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1228		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
 
 
 
1229	else
1230		want = CEPH_CAP_FILE_CACHE;
1231	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1232	if (ret < 0)
 
 
 
 
 
 
 
 
 
 
1233		return ret;
 
1234
1235	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1236	    (iocb->ki_flags & IOCB_DIRECT) ||
1237	    (fi->flags & CEPH_F_SYNC)) {
1238
1239		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1240		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1241		     ceph_cap_string(got));
1242
1243		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1244			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1245				ret = ceph_direct_read_write(iocb, to,
1246							     NULL, NULL);
1247				if (ret >= 0 && ret < len)
1248					retry_op = CHECK_EOF;
1249			} else {
1250				ret = ceph_sync_read(iocb, to, &retry_op);
1251			}
1252		} else {
1253			retry_op = READ_INLINE;
1254		}
1255	} else {
 
1256		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1257		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1258		     ceph_cap_string(got));
1259		current->journal_info = filp;
1260		ret = generic_file_read_iter(iocb, to);
1261		current->journal_info = NULL;
1262	}
 
1263	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1264	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1265	if (pinned_page) {
1266		put_page(pinned_page);
1267		pinned_page = NULL;
1268	}
1269	ceph_put_cap_refs(ci, got);
 
 
 
 
 
 
1270	if (retry_op > HAVE_RETRIED && ret >= 0) {
1271		int statret;
1272		struct page *page = NULL;
1273		loff_t i_size;
1274		if (retry_op == READ_INLINE) {
1275			page = __page_cache_alloc(GFP_KERNEL);
1276			if (!page)
1277				return -ENOMEM;
1278		}
1279
1280		statret = __ceph_do_getattr(inode, page,
1281					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1282		if (statret < 0) {
1283			if (page)
1284				__free_page(page);
1285			if (statret == -ENODATA) {
1286				BUG_ON(retry_op != READ_INLINE);
1287				goto again;
1288			}
1289			return statret;
1290		}
1291
1292		i_size = i_size_read(inode);
1293		if (retry_op == READ_INLINE) {
1294			BUG_ON(ret > 0 || read > 0);
1295			if (iocb->ki_pos < i_size &&
1296			    iocb->ki_pos < PAGE_SIZE) {
1297				loff_t end = min_t(loff_t, i_size,
1298						   iocb->ki_pos + len);
1299				end = min_t(loff_t, end, PAGE_SIZE);
1300				if (statret < end)
1301					zero_user_segment(page, statret, end);
1302				ret = copy_page_to_iter(page,
1303						iocb->ki_pos & ~PAGE_MASK,
1304						end - iocb->ki_pos, to);
1305				iocb->ki_pos += ret;
1306				read += ret;
1307			}
1308			if (iocb->ki_pos < i_size && read < len) {
1309				size_t zlen = min_t(size_t, len - read,
1310						    i_size - iocb->ki_pos);
1311				ret = iov_iter_zero(zlen, to);
1312				iocb->ki_pos += ret;
1313				read += ret;
1314			}
1315			__free_pages(page, 0);
1316			return read;
1317		}
1318
1319		/* hit EOF or hole? */
1320		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1321		    ret < len) {
1322			dout("sync_read hit hole, ppos %lld < size %lld"
1323			     ", reading more\n", iocb->ki_pos, i_size);
1324
1325			read += ret;
1326			len -= ret;
1327			retry_op = HAVE_RETRIED;
1328			goto again;
1329		}
1330	}
1331
1332	if (ret >= 0)
1333		ret += read;
1334
1335	return ret;
1336}
1337
1338/*
1339 * Take cap references to avoid releasing caps to MDS mid-write.
1340 *
1341 * If we are synchronous, and write with an old snap context, the OSD
1342 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1343 * dropping our cap refs and allowing the pending snap to logically
1344 * complete _before_ this write occurs.
1345 *
1346 * If we are near ENOSPC, write synchronously.
1347 */
1348static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1349{
1350	struct file *file = iocb->ki_filp;
1351	struct ceph_file_info *fi = file->private_data;
1352	struct inode *inode = file_inode(file);
1353	struct ceph_inode_info *ci = ceph_inode(inode);
1354	struct ceph_osd_client *osdc =
1355		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1356	struct ceph_cap_flush *prealloc_cf;
1357	ssize_t count, written = 0;
1358	int err, want, got;
 
 
 
1359	loff_t pos;
 
 
 
 
1360
1361	if (ceph_snap(inode) != CEPH_NOSNAP)
1362		return -EROFS;
1363
1364	prealloc_cf = ceph_alloc_cap_flush();
1365	if (!prealloc_cf)
1366		return -ENOMEM;
1367
1368	inode_lock(inode);
 
 
 
 
 
 
 
1369
1370	/* We can write back this queue in page reclaim */
1371	current->backing_dev_info = inode_to_bdi(inode);
1372
1373	if (iocb->ki_flags & IOCB_APPEND) {
1374		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1375		if (err < 0)
1376			goto out;
1377	}
1378
1379	err = generic_write_checks(iocb, from);
1380	if (err <= 0)
1381		goto out;
1382
1383	pos = iocb->ki_pos;
1384	count = iov_iter_count(from);
1385	err = file_remove_privs(file);
1386	if (err)
1387		goto out;
 
 
 
1388
1389	err = file_update_time(file);
1390	if (err)
 
1391		goto out;
1392
1393	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1394		err = ceph_uninline_data(file, NULL);
1395		if (err < 0)
1396			goto out;
1397	}
1398
1399retry_snap:
1400	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
 
 
 
 
1401		err = -ENOSPC;
1402		goto out;
1403	}
1404
 
 
 
 
1405	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1406	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
 
 
1407	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1408		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1409	else
1410		want = CEPH_CAP_FILE_BUFFER;
1411	got = 0;
1412	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1413			    &got, NULL);
1414	if (err < 0)
1415		goto out;
1416
 
 
 
 
 
 
1417	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1418	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1419
1420	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1421	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
 
1422		struct ceph_snap_context *snapc;
1423		struct iov_iter data;
1424		inode_unlock(inode);
1425
1426		spin_lock(&ci->i_ceph_lock);
1427		if (__ceph_have_pending_cap_snap(ci)) {
1428			struct ceph_cap_snap *capsnap =
1429					list_last_entry(&ci->i_cap_snaps,
1430							struct ceph_cap_snap,
1431							ci_item);
1432			snapc = ceph_get_snap_context(capsnap->context);
1433		} else {
1434			BUG_ON(!ci->i_head_snapc);
1435			snapc = ceph_get_snap_context(ci->i_head_snapc);
1436		}
1437		spin_unlock(&ci->i_ceph_lock);
1438
1439		/* we might need to revert back to that point */
1440		data = *from;
1441		if (iocb->ki_flags & IOCB_DIRECT)
1442			written = ceph_direct_read_write(iocb, &data, snapc,
1443							 &prealloc_cf);
1444		else
1445			written = ceph_sync_write(iocb, &data, pos, snapc);
1446		if (written == -EOLDSNAPC) {
1447			dout("aio_write %p %llx.%llx %llu~%u"
1448				"got EOLDSNAPC, retrying\n",
1449				inode, ceph_vinop(inode),
1450				pos, (unsigned)count);
1451			inode_lock(inode);
1452			goto retry_snap;
1453		}
1454		if (written > 0)
1455			iov_iter_advance(from, written);
1456		ceph_put_snap_context(snapc);
1457	} else {
1458		/*
1459		 * No need to acquire the i_truncate_mutex. Because
1460		 * the MDS revokes Fwb caps before sending truncate
1461		 * message to us. We can't get Fwb cap while there
1462		 * are pending vmtruncate. So write and vmtruncate
1463		 * can not run at the same time
1464		 */
1465		written = generic_perform_write(file, from, pos);
1466		if (likely(written >= 0))
1467			iocb->ki_pos = pos + written;
1468		inode_unlock(inode);
1469	}
1470
1471	if (written >= 0) {
1472		int dirty;
 
1473		spin_lock(&ci->i_ceph_lock);
1474		ci->i_inline_version = CEPH_INLINE_NONE;
1475		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1476					       &prealloc_cf);
1477		spin_unlock(&ci->i_ceph_lock);
1478		if (dirty)
1479			__mark_inode_dirty(inode, dirty);
 
 
1480	}
1481
1482	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1483	     inode, ceph_vinop(inode), pos, (unsigned)count,
1484	     ceph_cap_string(got));
1485	ceph_put_cap_refs(ci, got);
1486
 
 
 
 
 
 
1487	if (written >= 0) {
1488		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
 
1489			iocb->ki_flags |= IOCB_DSYNC;
1490
1491		written = generic_write_sync(iocb, written);
1492	}
1493
1494	goto out_unlocked;
1495
 
1496out:
1497	inode_unlock(inode);
 
 
 
1498out_unlocked:
1499	ceph_free_cap_flush(prealloc_cf);
1500	current->backing_dev_info = NULL;
1501	return written ? written : err;
1502}
1503
1504/*
1505 * llseek.  be sure to verify file size on SEEK_END.
1506 */
1507static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1508{
1509	struct inode *inode = file->f_mapping->host;
1510	loff_t i_size;
1511	loff_t ret;
1512
1513	inode_lock(inode);
1514
1515	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
 
 
 
1516		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1517		if (ret < 0)
1518			goto out;
1519	}
1520
1521	i_size = i_size_read(inode);
1522	switch (whence) {
1523	case SEEK_END:
1524		offset += i_size;
1525		break;
1526	case SEEK_CUR:
1527		/*
1528		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1529		 * position-querying operation.  Avoid rewriting the "same"
1530		 * f_pos value back to the file because a concurrent read(),
1531		 * write() or lseek() might have altered it
1532		 */
1533		if (offset == 0) {
1534			ret = file->f_pos;
1535			goto out;
1536		}
1537		offset += file->f_pos;
1538		break;
1539	case SEEK_DATA:
1540		if (offset >= i_size) {
1541			ret = -ENXIO;
1542			goto out;
1543		}
1544		break;
1545	case SEEK_HOLE:
1546		if (offset >= i_size) {
1547			ret = -ENXIO;
1548			goto out;
1549		}
1550		offset = i_size;
1551		break;
1552	}
1553
1554	ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1555
1556out:
1557	inode_unlock(inode);
1558	return ret;
1559}
1560
1561static inline void ceph_zero_partial_page(
1562	struct inode *inode, loff_t offset, unsigned size)
1563{
1564	struct page *page;
1565	pgoff_t index = offset >> PAGE_SHIFT;
1566
1567	page = find_lock_page(inode->i_mapping, index);
1568	if (page) {
1569		wait_on_page_writeback(page);
1570		zero_user(page, offset & (PAGE_SIZE - 1), size);
1571		unlock_page(page);
1572		put_page(page);
1573	}
1574}
1575
1576static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1577				      loff_t length)
1578{
1579	loff_t nearly = round_up(offset, PAGE_SIZE);
1580	if (offset < nearly) {
1581		loff_t size = nearly - offset;
1582		if (length < size)
1583			size = length;
1584		ceph_zero_partial_page(inode, offset, size);
1585		offset += size;
1586		length -= size;
1587	}
1588	if (length >= PAGE_SIZE) {
1589		loff_t size = round_down(length, PAGE_SIZE);
1590		truncate_pagecache_range(inode, offset, offset + size - 1);
1591		offset += size;
1592		length -= size;
1593	}
1594	if (length)
1595		ceph_zero_partial_page(inode, offset, length);
1596}
1597
1598static int ceph_zero_partial_object(struct inode *inode,
1599				    loff_t offset, loff_t *length)
1600{
1601	struct ceph_inode_info *ci = ceph_inode(inode);
1602	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1603	struct ceph_osd_request *req;
1604	int ret = 0;
1605	loff_t zero = 0;
1606	int op;
1607
 
 
 
1608	if (!length) {
1609		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1610		length = &zero;
1611	} else {
1612		op = CEPH_OSD_OP_ZERO;
1613	}
1614
1615	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1616					ceph_vino(inode),
1617					offset, length,
1618					0, 1, op,
1619					CEPH_OSD_FLAG_WRITE |
1620					CEPH_OSD_FLAG_ONDISK,
1621					NULL, 0, 0, false);
1622	if (IS_ERR(req)) {
1623		ret = PTR_ERR(req);
1624		goto out;
1625	}
1626
1627	req->r_mtime = inode->i_mtime;
1628	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1629	if (!ret) {
1630		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1631		if (ret == -ENOENT)
1632			ret = 0;
1633	}
1634	ceph_osdc_put_request(req);
1635
1636out:
1637	return ret;
1638}
1639
1640static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1641{
1642	int ret = 0;
1643	struct ceph_inode_info *ci = ceph_inode(inode);
1644	s32 stripe_unit = ci->i_layout.stripe_unit;
1645	s32 stripe_count = ci->i_layout.stripe_count;
1646	s32 object_size = ci->i_layout.object_size;
1647	u64 object_set_size = object_size * stripe_count;
1648	u64 nearly, t;
1649
1650	/* round offset up to next period boundary */
1651	nearly = offset + object_set_size - 1;
1652	t = nearly;
1653	nearly -= do_div(t, object_set_size);
1654
1655	while (length && offset < nearly) {
1656		loff_t size = length;
1657		ret = ceph_zero_partial_object(inode, offset, &size);
1658		if (ret < 0)
1659			return ret;
1660		offset += size;
1661		length -= size;
1662	}
1663	while (length >= object_set_size) {
1664		int i;
1665		loff_t pos = offset;
1666		for (i = 0; i < stripe_count; ++i) {
1667			ret = ceph_zero_partial_object(inode, pos, NULL);
1668			if (ret < 0)
1669				return ret;
1670			pos += stripe_unit;
1671		}
1672		offset += object_set_size;
1673		length -= object_set_size;
1674	}
1675	while (length) {
1676		loff_t size = length;
1677		ret = ceph_zero_partial_object(inode, offset, &size);
1678		if (ret < 0)
1679			return ret;
1680		offset += size;
1681		length -= size;
1682	}
1683	return ret;
1684}
1685
1686static long ceph_fallocate(struct file *file, int mode,
1687				loff_t offset, loff_t length)
1688{
1689	struct ceph_file_info *fi = file->private_data;
1690	struct inode *inode = file_inode(file);
1691	struct ceph_inode_info *ci = ceph_inode(inode);
1692	struct ceph_osd_client *osdc =
1693		&ceph_inode_to_client(inode)->client->osdc;
1694	struct ceph_cap_flush *prealloc_cf;
1695	int want, got = 0;
1696	int dirty;
1697	int ret = 0;
1698	loff_t endoff = 0;
1699	loff_t size;
1700
1701	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1702		return -EOPNOTSUPP;
1703
1704	if (!S_ISREG(inode->i_mode))
1705		return -EOPNOTSUPP;
1706
1707	prealloc_cf = ceph_alloc_cap_flush();
1708	if (!prealloc_cf)
1709		return -ENOMEM;
1710
1711	inode_lock(inode);
1712
1713	if (ceph_snap(inode) != CEPH_NOSNAP) {
1714		ret = -EROFS;
1715		goto unlock;
1716	}
1717
1718	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1719	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
1720		ret = -ENOSPC;
1721		goto unlock;
1722	}
1723
1724	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1725		ret = ceph_uninline_data(file, NULL);
1726		if (ret < 0)
1727			goto unlock;
1728	}
1729
1730	size = i_size_read(inode);
1731	if (!(mode & FALLOC_FL_KEEP_SIZE))
1732		endoff = offset + length;
 
 
 
 
1733
1734	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1735		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1736	else
1737		want = CEPH_CAP_FILE_BUFFER;
1738
1739	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1740	if (ret < 0)
1741		goto unlock;
1742
1743	if (mode & FALLOC_FL_PUNCH_HOLE) {
1744		if (offset < size)
1745			ceph_zero_pagecache_range(inode, offset, length);
1746		ret = ceph_zero_objects(inode, offset, length);
1747	} else if (endoff > size) {
1748		truncate_pagecache_range(inode, size, -1);
1749		if (ceph_inode_set_size(inode, endoff))
1750			ceph_check_caps(ceph_inode(inode),
1751				CHECK_CAPS_AUTHONLY, NULL);
1752	}
1753
1754	if (!ret) {
1755		spin_lock(&ci->i_ceph_lock);
1756		ci->i_inline_version = CEPH_INLINE_NONE;
1757		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1758					       &prealloc_cf);
1759		spin_unlock(&ci->i_ceph_lock);
1760		if (dirty)
1761			__mark_inode_dirty(inode, dirty);
1762	}
 
1763
1764	ceph_put_cap_refs(ci, got);
1765unlock:
1766	inode_unlock(inode);
1767	ceph_free_cap_flush(prealloc_cf);
1768	return ret;
1769}
1770
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1771const struct file_operations ceph_file_fops = {
1772	.open = ceph_open,
1773	.release = ceph_release,
1774	.llseek = ceph_llseek,
1775	.read_iter = ceph_read_iter,
1776	.write_iter = ceph_write_iter,
1777	.mmap = ceph_mmap,
1778	.fsync = ceph_fsync,
1779	.lock = ceph_lock,
 
1780	.flock = ceph_flock,
1781	.splice_read = generic_file_splice_read,
1782	.splice_write = iter_file_splice_write,
1783	.unlocked_ioctl = ceph_ioctl,
1784	.compat_ioctl	= ceph_ioctl,
1785	.fallocate	= ceph_fallocate,
 
1786};
1787
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3#include <linux/ceph/striper.h>
   4
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/file.h>
   9#include <linux/mount.h>
  10#include <linux/namei.h>
  11#include <linux/writeback.h>
  12#include <linux/falloc.h>
  13#include <linux/iversion.h>
  14#include <linux/ktime.h>
  15
  16#include "super.h"
  17#include "mds_client.h"
  18#include "cache.h"
  19#include "io.h"
  20#include "metric.h"
  21
  22static __le32 ceph_flags_sys2wire(u32 flags)
  23{
  24	u32 wire_flags = 0;
  25
  26	switch (flags & O_ACCMODE) {
  27	case O_RDONLY:
  28		wire_flags |= CEPH_O_RDONLY;
  29		break;
  30	case O_WRONLY:
  31		wire_flags |= CEPH_O_WRONLY;
  32		break;
  33	case O_RDWR:
  34		wire_flags |= CEPH_O_RDWR;
  35		break;
  36	}
  37
  38	flags &= ~O_ACCMODE;
  39
  40#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  41
  42	ceph_sys2wire(O_CREAT);
  43	ceph_sys2wire(O_EXCL);
  44	ceph_sys2wire(O_TRUNC);
  45	ceph_sys2wire(O_DIRECTORY);
  46	ceph_sys2wire(O_NOFOLLOW);
  47
  48#undef ceph_sys2wire
  49
  50	if (flags)
  51		dout("unused open flags: %x\n", flags);
  52
  53	return cpu_to_le32(wire_flags);
  54}
  55
  56/*
  57 * Ceph file operations
  58 *
  59 * Implement basic open/close functionality, and implement
  60 * read/write.
  61 *
  62 * We implement three modes of file I/O:
  63 *  - buffered uses the generic_file_aio_{read,write} helpers
  64 *
  65 *  - synchronous is used when there is multi-client read/write
  66 *    sharing, avoids the page cache, and synchronously waits for an
  67 *    ack from the OSD.
  68 *
  69 *  - direct io takes the variant of the sync path that references
  70 *    user pages directly.
  71 *
  72 * fsync() flushes and waits on dirty pages, but just queues metadata
  73 * for writeback: since the MDS can recover size and mtime there is no
  74 * need to wait for MDS acknowledgement.
  75 */
  76
  77/*
  78 * How many pages to get in one call to iov_iter_get_pages().  This
  79 * determines the size of the on-stack array used as a buffer.
  80 */
  81#define ITER_GET_BVECS_PAGES	64
  82
  83static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  84				struct bio_vec *bvecs)
  85{
  86	size_t size = 0;
  87	int bvec_idx = 0;
  88
  89	if (maxsize > iov_iter_count(iter))
  90		maxsize = iov_iter_count(iter);
  91
  92	while (size < maxsize) {
  93		struct page *pages[ITER_GET_BVECS_PAGES];
  94		ssize_t bytes;
  95		size_t start;
  96		int idx = 0;
  97
  98		bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
  99					   ITER_GET_BVECS_PAGES, &start);
 100		if (bytes < 0)
 101			return size ?: bytes;
 102
 103		size += bytes;
 104
 105		for ( ; bytes; idx++, bvec_idx++) {
 106			struct bio_vec bv = {
 107				.bv_page = pages[idx],
 108				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
 109				.bv_offset = start,
 110			};
 111
 112			bvecs[bvec_idx] = bv;
 113			bytes -= bv.bv_len;
 114			start = 0;
 115		}
 116	}
 117
 118	return size;
 119}
 120
 121/*
 122 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 123 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 124 * page.
 125 *
 126 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 127 * Return the number of bytes in the created bio_vec array, or an error.
 128 */
 129static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 130				    struct bio_vec **bvecs, int *num_bvecs)
 
 131{
 132	struct bio_vec *bv;
 133	size_t orig_count = iov_iter_count(iter);
 134	ssize_t bytes;
 135	int npages;
 136
 137	iov_iter_truncate(iter, maxsize);
 138	npages = iov_iter_npages(iter, INT_MAX);
 139	iov_iter_reexpand(iter, orig_count);
 140
 141	/*
 142	 * __iter_get_bvecs() may populate only part of the array -- zero it
 143	 * out.
 144	 */
 145	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
 146	if (!bv)
 147		return -ENOMEM;
 148
 149	bytes = __iter_get_bvecs(iter, maxsize, bv);
 150	if (bytes < 0) {
 151		/*
 152		 * No pages were pinned -- just free the array.
 153		 */
 154		kvfree(bv);
 155		return bytes;
 156	}
 157
 158	*bvecs = bv;
 159	*num_bvecs = npages;
 160	return bytes;
 161}
 
 
 162
 163static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
 164{
 165	int i;
 
 166
 167	for (i = 0; i < num_bvecs; i++) {
 168		if (bvecs[i].bv_page) {
 169			if (should_dirty)
 170				set_page_dirty_lock(bvecs[i].bv_page);
 171			put_page(bvecs[i].bv_page);
 172		}
 173	}
 174	kvfree(bvecs);
 175}
 176
 177/*
 178 * Prepare an open request.  Preallocate ceph_cap to avoid an
 179 * inopportune ENOMEM later.
 180 */
 181static struct ceph_mds_request *
 182prepare_open_request(struct super_block *sb, int flags, int create_mode)
 183{
 184	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
 
 185	struct ceph_mds_request *req;
 186	int want_auth = USE_ANY_MDS;
 187	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 188
 189	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 190		want_auth = USE_AUTH_MDS;
 191
 192	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 193	if (IS_ERR(req))
 194		goto out;
 195	req->r_fmode = ceph_flags_to_mode(flags);
 196	req->r_args.open.flags = ceph_flags_sys2wire(flags);
 197	req->r_args.open.mode = cpu_to_le32(create_mode);
 198out:
 199	return req;
 200}
 201
 202static int ceph_init_file_info(struct inode *inode, struct file *file,
 203					int fmode, bool isdir)
 204{
 205	struct ceph_inode_info *ci = ceph_inode(inode);
 206	struct ceph_mount_options *opt =
 207		ceph_inode_to_client(&ci->netfs.inode)->mount_options;
 208	struct ceph_file_info *fi;
 209	int ret;
 210
 211	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
 212			inode->i_mode, isdir ? "dir" : "regular");
 213	BUG_ON(inode->i_fop->release != ceph_release);
 214
 215	if (isdir) {
 216		struct ceph_dir_file_info *dfi =
 217			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 218		if (!dfi)
 219			return -ENOMEM;
 220
 221		file->private_data = dfi;
 222		fi = &dfi->file_info;
 223		dfi->next_offset = 2;
 224		dfi->readdir_cache_idx = -1;
 225	} else {
 226		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 227		if (!fi)
 228			return -ENOMEM;
 229
 230		if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
 231			fi->flags |= CEPH_F_SYNC;
 232
 233		file->private_data = fi;
 234	}
 235
 236	ceph_get_fmode(ci, fmode, 1);
 237	fi->fmode = fmode;
 238
 239	spin_lock_init(&fi->rw_contexts_lock);
 240	INIT_LIST_HEAD(&fi->rw_contexts);
 241	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
 242
 243	if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
 244		ret = ceph_uninline_data(file);
 245		if (ret < 0)
 246			goto error;
 247	}
 248
 249	return 0;
 250
 251error:
 252	ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
 253	ceph_put_fmode(ci, fi->fmode, 1);
 254	kmem_cache_free(ceph_file_cachep, fi);
 255	/* wake up anyone waiting for caps on this inode */
 256	wake_up_all(&ci->i_cap_wq);
 257	return ret;
 258}
 259
 260/*
 261 * initialize private struct file data.
 262 * if we fail, clean up by dropping fmode reference on the ceph_inode
 263 */
 264static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 265{
 
 266	int ret = 0;
 267
 268	switch (inode->i_mode & S_IFMT) {
 269	case S_IFREG:
 270		ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
 271		fallthrough;
 272	case S_IFDIR:
 273		ret = ceph_init_file_info(inode, file, fmode,
 274						S_ISDIR(inode->i_mode));
 
 
 
 
 
 
 
 
 
 
 275		break;
 276
 277	case S_IFLNK:
 278		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 279		     inode->i_mode);
 
 280		break;
 281
 282	default:
 283		dout("init_file %p %p 0%o (special)\n", inode, file,
 284		     inode->i_mode);
 285		/*
 286		 * we need to drop the open ref now, since we don't
 287		 * have .release set to ceph_release.
 288		 */
 
 289		BUG_ON(inode->i_fop->release == ceph_release);
 290
 291		/* call the proper open fop */
 292		ret = inode->i_fop->open(inode, file);
 293	}
 294	return ret;
 295}
 296
 297/*
 298 * try renew caps after session gets killed.
 299 */
 300int ceph_renew_caps(struct inode *inode, int fmode)
 301{
 302	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
 303	struct ceph_inode_info *ci = ceph_inode(inode);
 304	struct ceph_mds_request *req;
 305	int err, flags, wanted;
 306
 307	spin_lock(&ci->i_ceph_lock);
 308	__ceph_touch_fmode(ci, mdsc, fmode);
 309	wanted = __ceph_caps_file_wanted(ci);
 310	if (__ceph_is_any_real_caps(ci) &&
 311	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
 312		int issued = __ceph_caps_issued(ci, NULL);
 313		spin_unlock(&ci->i_ceph_lock);
 314		dout("renew caps %p want %s issued %s updating mds_wanted\n",
 315		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 316		ceph_check_caps(ci, 0);
 317		return 0;
 318	}
 319	spin_unlock(&ci->i_ceph_lock);
 320
 321	flags = 0;
 322	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 323		flags = O_RDWR;
 324	else if (wanted & CEPH_CAP_FILE_RD)
 325		flags = O_RDONLY;
 326	else if (wanted & CEPH_CAP_FILE_WR)
 327		flags = O_WRONLY;
 328#ifdef O_LAZY
 329	if (wanted & CEPH_CAP_FILE_LAZYIO)
 330		flags |= O_LAZY;
 331#endif
 332
 333	req = prepare_open_request(inode->i_sb, flags, 0);
 334	if (IS_ERR(req)) {
 335		err = PTR_ERR(req);
 336		goto out;
 337	}
 338
 339	req->r_inode = inode;
 340	ihold(inode);
 341	req->r_num_caps = 1;
 
 342
 343	err = ceph_mdsc_do_request(mdsc, NULL, req);
 344	ceph_mdsc_put_request(req);
 345out:
 346	dout("renew caps %p open result=%d\n", inode, err);
 347	return err < 0 ? err : 0;
 348}
 349
 350/*
 351 * If we already have the requisite capabilities, we can satisfy
 352 * the open request locally (no need to request new caps from the
 353 * MDS).  We do, however, need to inform the MDS (asynchronously)
 354 * if our wanted caps set expands.
 355 */
 356int ceph_open(struct inode *inode, struct file *file)
 357{
 358	struct ceph_inode_info *ci = ceph_inode(inode);
 359	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 360	struct ceph_mds_client *mdsc = fsc->mdsc;
 361	struct ceph_mds_request *req;
 362	struct ceph_file_info *fi = file->private_data;
 363	int err;
 364	int flags, fmode, wanted;
 365
 366	if (fi) {
 367		dout("open file %p is already opened\n", file);
 368		return 0;
 369	}
 370
 371	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 372	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 373	if (S_ISDIR(inode->i_mode))
 374		flags = O_DIRECTORY;  /* mds likes to know */
 375
 376	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 377	     ceph_vinop(inode), file, flags, file->f_flags);
 378	fmode = ceph_flags_to_mode(flags);
 379	wanted = ceph_caps_for_mode(fmode);
 380
 381	/* snapped files are read-only */
 382	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 383		return -EROFS;
 384
 385	/* trivially open snapdir */
 386	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 
 
 
 387		return ceph_init_file(inode, file, fmode);
 388	}
 389
 390	/*
 391	 * No need to block if we have caps on the auth MDS (for
 392	 * write) or any MDS (for read).  Update wanted set
 393	 * asynchronously.
 394	 */
 395	spin_lock(&ci->i_ceph_lock);
 396	if (__ceph_is_any_real_caps(ci) &&
 397	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 398		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
 399		int issued = __ceph_caps_issued(ci, NULL);
 400
 401		dout("open %p fmode %d want %s issued %s using existing\n",
 402		     inode, fmode, ceph_cap_string(wanted),
 403		     ceph_cap_string(issued));
 404		__ceph_touch_fmode(ci, mdsc, fmode);
 405		spin_unlock(&ci->i_ceph_lock);
 406
 407		/* adjust wanted? */
 408		if ((issued & wanted) != wanted &&
 409		    (mds_wanted & wanted) != wanted &&
 410		    ceph_snap(inode) != CEPH_SNAPDIR)
 411			ceph_check_caps(ci, 0);
 412
 413		return ceph_init_file(inode, file, fmode);
 414	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 415		   (ci->i_snap_caps & wanted) == wanted) {
 416		__ceph_touch_fmode(ci, mdsc, fmode);
 417		spin_unlock(&ci->i_ceph_lock);
 418		return ceph_init_file(inode, file, fmode);
 419	}
 420
 421	spin_unlock(&ci->i_ceph_lock);
 422
 423	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 424	req = prepare_open_request(inode->i_sb, flags, 0);
 425	if (IS_ERR(req)) {
 426		err = PTR_ERR(req);
 427		goto out;
 428	}
 429	req->r_inode = inode;
 430	ihold(inode);
 431
 432	req->r_num_caps = 1;
 433	err = ceph_mdsc_do_request(mdsc, NULL, req);
 434	if (!err)
 435		err = ceph_init_file(inode, file, req->r_fmode);
 436	ceph_mdsc_put_request(req);
 437	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 438out:
 439	return err;
 440}
 441
 442/* Clone the layout from a synchronous create, if the dir now has Dc caps */
 443static void
 444cache_file_layout(struct inode *dst, struct inode *src)
 445{
 446	struct ceph_inode_info *cdst = ceph_inode(dst);
 447	struct ceph_inode_info *csrc = ceph_inode(src);
 448
 449	spin_lock(&cdst->i_ceph_lock);
 450	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
 451	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
 452		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
 453			sizeof(cdst->i_cached_layout));
 454		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
 455				   ceph_try_get_string(csrc->i_layout.pool_ns));
 456	}
 457	spin_unlock(&cdst->i_ceph_lock);
 458}
 459
 460/*
 461 * Try to set up an async create. We need caps, a file layout, and inode number,
 462 * and either a lease on the dentry or complete dir info. If any of those
 463 * criteria are not satisfied, then return false and the caller can go
 464 * synchronous.
 465 */
 466static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
 467				 struct ceph_file_layout *lo, u64 *pino)
 468{
 469	struct ceph_inode_info *ci = ceph_inode(dir);
 470	struct ceph_dentry_info *di = ceph_dentry(dentry);
 471	int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
 472	u64 ino;
 473
 474	spin_lock(&ci->i_ceph_lock);
 475	/* No auth cap means no chance for Dc caps */
 476	if (!ci->i_auth_cap)
 477		goto no_async;
 478
 479	/* Any delegated inos? */
 480	if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
 481		goto no_async;
 482
 483	if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
 484		goto no_async;
 485
 486	if ((__ceph_caps_issued(ci, NULL) & want) != want)
 487		goto no_async;
 488
 489	if (d_in_lookup(dentry)) {
 490		if (!__ceph_dir_is_complete(ci))
 491			goto no_async;
 492		spin_lock(&dentry->d_lock);
 493		di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
 494		spin_unlock(&dentry->d_lock);
 495	} else if (atomic_read(&ci->i_shared_gen) !=
 496		   READ_ONCE(di->lease_shared_gen)) {
 497		goto no_async;
 498	}
 499
 500	ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
 501	if (!ino)
 502		goto no_async;
 503
 504	*pino = ino;
 505	ceph_take_cap_refs(ci, want, false);
 506	memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
 507	rcu_assign_pointer(lo->pool_ns,
 508			   ceph_try_get_string(ci->i_cached_layout.pool_ns));
 509	got = want;
 510no_async:
 511	spin_unlock(&ci->i_ceph_lock);
 512	return got;
 513}
 514
 515static void restore_deleg_ino(struct inode *dir, u64 ino)
 516{
 517	struct ceph_inode_info *ci = ceph_inode(dir);
 518	struct ceph_mds_session *s = NULL;
 519
 520	spin_lock(&ci->i_ceph_lock);
 521	if (ci->i_auth_cap)
 522		s = ceph_get_mds_session(ci->i_auth_cap->session);
 523	spin_unlock(&ci->i_ceph_lock);
 524	if (s) {
 525		int err = ceph_restore_deleg_ino(s, ino);
 526		if (err)
 527			pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
 528				ino, err);
 529		ceph_put_mds_session(s);
 530	}
 531}
 532
 533static void wake_async_create_waiters(struct inode *inode,
 534				      struct ceph_mds_session *session)
 535{
 536	struct ceph_inode_info *ci = ceph_inode(inode);
 537	bool check_cap = false;
 538
 539	spin_lock(&ci->i_ceph_lock);
 540	if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
 541		ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
 542		wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
 543
 544		if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
 545			ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
 546			check_cap = true;
 547		}
 548	}
 549	ceph_kick_flushing_inode_caps(session, ci);
 550	spin_unlock(&ci->i_ceph_lock);
 551
 552	if (check_cap)
 553		ceph_check_caps(ci, CHECK_CAPS_FLUSH);
 554}
 555
 556static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
 557                                 struct ceph_mds_request *req)
 558{
 559	struct dentry *dentry = req->r_dentry;
 560	struct inode *dinode = d_inode(dentry);
 561	struct inode *tinode = req->r_target_inode;
 562	int result = req->r_err ? req->r_err :
 563			le32_to_cpu(req->r_reply_info.head->result);
 564
 565	WARN_ON_ONCE(dinode && tinode && dinode != tinode);
 566
 567	/* MDS changed -- caller must resubmit */
 568	if (result == -EJUKEBOX)
 569		goto out;
 570
 571	mapping_set_error(req->r_parent->i_mapping, result);
 572
 573	if (result) {
 574		int pathlen = 0;
 575		u64 base = 0;
 576		char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
 577						  &base, 0);
 578
 579		pr_warn("async create failure path=(%llx)%s result=%d!\n",
 580			base, IS_ERR(path) ? "<<bad>>" : path, result);
 581		ceph_mdsc_free_path(path, pathlen);
 582
 583		ceph_dir_clear_complete(req->r_parent);
 584		if (!d_unhashed(dentry))
 585			d_drop(dentry);
 586
 587		if (dinode) {
 588			mapping_set_error(dinode->i_mapping, result);
 589			ceph_inode_shutdown(dinode);
 590			wake_async_create_waiters(dinode, req->r_session);
 591		}
 592	}
 593
 594	if (tinode) {
 595		u64 ino = ceph_vino(tinode).ino;
 596
 597		if (req->r_deleg_ino != ino)
 598			pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
 599				__func__, req->r_err, req->r_deleg_ino, ino);
 600
 601		mapping_set_error(tinode->i_mapping, result);
 602		wake_async_create_waiters(tinode, req->r_session);
 603	} else if (!result) {
 604		pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
 605			req->r_deleg_ino);
 606	}
 607out:
 608	ceph_mdsc_release_dir_caps(req);
 609}
 610
 611static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
 612				    struct file *file, umode_t mode,
 613				    struct ceph_mds_request *req,
 614				    struct ceph_acl_sec_ctx *as_ctx,
 615				    struct ceph_file_layout *lo)
 616{
 617	int ret;
 618	char xattr_buf[4];
 619	struct ceph_mds_reply_inode in = { };
 620	struct ceph_mds_reply_info_in iinfo = { .in = &in };
 621	struct ceph_inode_info *ci = ceph_inode(dir);
 622	struct ceph_dentry_info *di = ceph_dentry(dentry);
 623	struct inode *inode;
 624	struct timespec64 now;
 625	struct ceph_string *pool_ns;
 626	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
 627	struct ceph_vino vino = { .ino = req->r_deleg_ino,
 628				  .snap = CEPH_NOSNAP };
 629
 630	ktime_get_real_ts64(&now);
 631
 632	inode = ceph_get_inode(dentry->d_sb, vino);
 633	if (IS_ERR(inode))
 634		return PTR_ERR(inode);
 635
 636	iinfo.inline_version = CEPH_INLINE_NONE;
 637	iinfo.change_attr = 1;
 638	ceph_encode_timespec64(&iinfo.btime, &now);
 639
 640	if (req->r_pagelist) {
 641		iinfo.xattr_len = req->r_pagelist->length;
 642		iinfo.xattr_data = req->r_pagelist->mapped_tail;
 643	} else {
 644		/* fake it */
 645		iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
 646		iinfo.xattr_data = xattr_buf;
 647		memset(iinfo.xattr_data, 0, iinfo.xattr_len);
 648	}
 649
 650	in.ino = cpu_to_le64(vino.ino);
 651	in.snapid = cpu_to_le64(CEPH_NOSNAP);
 652	in.version = cpu_to_le64(1);	// ???
 653	in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
 654	in.cap.cap_id = cpu_to_le64(1);
 655	in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
 656	in.cap.flags = CEPH_CAP_FLAG_AUTH;
 657	in.ctime = in.mtime = in.atime = iinfo.btime;
 658	in.truncate_seq = cpu_to_le32(1);
 659	in.truncate_size = cpu_to_le64(-1ULL);
 660	in.xattr_version = cpu_to_le64(1);
 661	in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
 662	if (dir->i_mode & S_ISGID) {
 663		in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
 664
 665		/* Directories always inherit the setgid bit. */
 666		if (S_ISDIR(mode))
 667			mode |= S_ISGID;
 668	} else {
 669		in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
 670	}
 671	in.mode = cpu_to_le32((u32)mode);
 672
 673	in.nlink = cpu_to_le32(1);
 674	in.max_size = cpu_to_le64(lo->stripe_unit);
 675
 676	ceph_file_layout_to_legacy(lo, &in.layout);
 677	/* lo is private, so pool_ns can't change */
 678	pool_ns = rcu_dereference_raw(lo->pool_ns);
 679	if (pool_ns) {
 680		iinfo.pool_ns_len = pool_ns->len;
 681		iinfo.pool_ns_data = pool_ns->str;
 682	}
 683
 684	down_read(&mdsc->snap_rwsem);
 685	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
 686			      req->r_fmode, NULL);
 687	up_read(&mdsc->snap_rwsem);
 688	if (ret) {
 689		dout("%s failed to fill inode: %d\n", __func__, ret);
 690		ceph_dir_clear_complete(dir);
 691		if (!d_unhashed(dentry))
 692			d_drop(dentry);
 693		if (inode->i_state & I_NEW)
 694			discard_new_inode(inode);
 695	} else {
 696		struct dentry *dn;
 697
 698		dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
 699			vino.ino, ceph_ino(dir), dentry->d_name.name);
 700		ceph_dir_clear_ordered(dir);
 701		ceph_init_inode_acls(inode, as_ctx);
 702		if (inode->i_state & I_NEW) {
 703			/*
 704			 * If it's not I_NEW, then someone created this before
 705			 * we got here. Assume the server is aware of it at
 706			 * that point and don't worry about setting
 707			 * CEPH_I_ASYNC_CREATE.
 708			 */
 709			ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
 710			unlock_new_inode(inode);
 711		}
 712		if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
 713			if (!d_unhashed(dentry))
 714				d_drop(dentry);
 715			dn = d_splice_alias(inode, dentry);
 716			WARN_ON_ONCE(dn && dn != dentry);
 717		}
 718		file->f_mode |= FMODE_CREATED;
 719		ret = finish_open(file, dentry, ceph_open);
 720	}
 721
 722	spin_lock(&dentry->d_lock);
 723	di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
 724	wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
 725	spin_unlock(&dentry->d_lock);
 726
 727	return ret;
 728}
 729
 730/*
 731 * Do a lookup + open with a single request.  If we get a non-existent
 732 * file or symlink, return 1 so the VFS can retry.
 733 */
 734int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 735		     struct file *file, unsigned flags, umode_t mode)
 
 736{
 737	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 738	struct ceph_mds_client *mdsc = fsc->mdsc;
 739	struct ceph_mds_request *req;
 740	struct dentry *dn;
 741	struct ceph_acl_sec_ctx as_ctx = {};
 742	bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
 743	int mask;
 744	int err;
 745
 746	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 747	     dir, dentry, dentry,
 748	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 749
 750	if (dentry->d_name.len > NAME_MAX)
 751		return -ENAMETOOLONG;
 752
 753	err = ceph_wait_on_conflict_unlink(dentry);
 754	if (err)
 755		return err;
 756	/*
 757	 * Do not truncate the file, since atomic_open is called before the
 758	 * permission check. The caller will do the truncation afterward.
 759	 */
 760	flags &= ~O_TRUNC;
 761
 762	if (flags & O_CREAT) {
 763		if (ceph_quota_is_max_files_exceeded(dir))
 764			return -EDQUOT;
 765		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
 766		if (err < 0)
 767			return err;
 768		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
 769		if (err < 0)
 770			goto out_ctx;
 771		/* Async create can't handle more than a page of xattrs */
 772		if (as_ctx.pagelist &&
 773		    !list_is_singular(&as_ctx.pagelist->head))
 774			try_async = false;
 775	} else if (!d_in_lookup(dentry)) {
 776		/* If it's not being looked up, it's negative */
 777		return -ENOENT;
 778	}
 779retry:
 780	/* do the open */
 781	req = prepare_open_request(dir->i_sb, flags, mode);
 782	if (IS_ERR(req)) {
 783		err = PTR_ERR(req);
 784		goto out_ctx;
 785	}
 786	req->r_dentry = dget(dentry);
 787	req->r_num_caps = 2;
 788	mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 789	if (ceph_security_xattr_wanted(dir))
 790		mask |= CEPH_CAP_XATTR_SHARED;
 791	req->r_args.open.mask = cpu_to_le32(mask);
 792	req->r_parent = dir;
 793	ihold(dir);
 794
 795	if (flags & O_CREAT) {
 796		struct ceph_file_layout lo;
 797
 798		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
 799		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 800		if (as_ctx.pagelist) {
 801			req->r_pagelist = as_ctx.pagelist;
 802			as_ctx.pagelist = NULL;
 803		}
 804		if (try_async &&
 805		    (req->r_dir_caps =
 806		      try_prep_async_create(dir, dentry, &lo,
 807					    &req->r_deleg_ino))) {
 808			struct ceph_dentry_info *di = ceph_dentry(dentry);
 809
 810			set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
 811			req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
 812			req->r_callback = ceph_async_create_cb;
 813
 814			spin_lock(&dentry->d_lock);
 815			di->flags |= CEPH_DENTRY_ASYNC_CREATE;
 816			spin_unlock(&dentry->d_lock);
 817
 818			err = ceph_mdsc_submit_request(mdsc, dir, req);
 819			if (!err) {
 820				err = ceph_finish_async_create(dir, dentry,
 821							file, mode, req,
 822							&as_ctx, &lo);
 823			} else if (err == -EJUKEBOX) {
 824				restore_deleg_ino(dir, req->r_deleg_ino);
 825				ceph_mdsc_put_request(req);
 826				try_async = false;
 827				ceph_put_string(rcu_dereference_raw(lo.pool_ns));
 828				goto retry;
 829			}
 830			ceph_put_string(rcu_dereference_raw(lo.pool_ns));
 831			goto out_req;
 832		}
 833	}
 834
 835	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
 836	err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
 837	if (err == -ENOENT) {
 838		dentry = ceph_handle_snapdir(req, dentry);
 839		if (IS_ERR(dentry)) {
 840			err = PTR_ERR(dentry);
 841			goto out_req;
 842		}
 843		err = 0;
 844	}
 
 
 845
 846	if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 847		err = ceph_handle_notrace_create(dir, dentry);
 848
 849	if (d_in_lookup(dentry)) {
 850		dn = ceph_finish_lookup(req, dentry, err);
 851		if (IS_ERR(dn))
 852			err = PTR_ERR(dn);
 853	} else {
 854		/* we were given a hashed negative dentry */
 855		dn = NULL;
 856	}
 857	if (err)
 858		goto out_req;
 859	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 860		/* make vfs retry on splice, ENOENT, or symlink */
 861		dout("atomic_open finish_no_open on dn %p\n", dn);
 862		err = finish_no_open(file, dn);
 863	} else {
 864		dout("atomic_open finish_open on dn %p\n", dn);
 865		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 866			struct inode *newino = d_inode(dentry);
 867
 868			cache_file_layout(dir, newino);
 869			ceph_init_inode_acls(newino, &as_ctx);
 870			file->f_mode |= FMODE_CREATED;
 871		}
 872		err = finish_open(file, dentry, ceph_open);
 873	}
 874out_req:
 
 
 875	ceph_mdsc_put_request(req);
 876out_ctx:
 877	ceph_release_acl_sec_ctx(&as_ctx);
 878	dout("atomic_open result=%d\n", err);
 879	return err;
 880}
 881
 882int ceph_release(struct inode *inode, struct file *file)
 883{
 884	struct ceph_inode_info *ci = ceph_inode(inode);
 
 885
 886	if (S_ISDIR(inode->i_mode)) {
 887		struct ceph_dir_file_info *dfi = file->private_data;
 888		dout("release inode %p dir file %p\n", inode, file);
 889		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 890
 891		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
 892
 893		if (dfi->last_readdir)
 894			ceph_mdsc_put_request(dfi->last_readdir);
 895		kfree(dfi->last_name);
 896		kfree(dfi->dir_info);
 897		kmem_cache_free(ceph_dir_file_cachep, dfi);
 898	} else {
 899		struct ceph_file_info *fi = file->private_data;
 900		dout("release inode %p regular file %p\n", inode, file);
 901		WARN_ON(!list_empty(&fi->rw_contexts));
 902
 903		ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
 904		ceph_put_fmode(ci, fi->fmode, 1);
 905
 906		kmem_cache_free(ceph_file_cachep, fi);
 907	}
 908
 909	/* wake up anyone waiting for caps on this inode */
 910	wake_up_all(&ci->i_cap_wq);
 911	return 0;
 912}
 913
 914enum {
 915	HAVE_RETRIED = 1,
 916	CHECK_EOF =    2,
 917	READ_INLINE =  3,
 918};
 919
 920/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921 * Completely synchronous read and write methods.  Direct from __user
 922 * buffer to osd, or directly to user pages (if O_DIRECT).
 923 *
 924 * If the read spans object boundary, just do multiple reads.  (That's not
 925 * atomic, but good enough for now.)
 926 *
 927 * If we get a short result from the OSD, check against i_size; we need to
 928 * only return a short read to the caller if we hit EOF.
 929 */
 930static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 931			      int *retry_op)
 932{
 933	struct file *file = iocb->ki_filp;
 934	struct inode *inode = file_inode(file);
 935	struct ceph_inode_info *ci = ceph_inode(inode);
 936	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 937	struct ceph_osd_client *osdc = &fsc->client->osdc;
 938	ssize_t ret;
 939	u64 off = iocb->ki_pos;
 940	u64 len = iov_iter_count(to);
 941	u64 i_size = i_size_read(inode);
 942
 943	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
 
 944	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 945
 946	if (!len)
 947		return 0;
 948	/*
 949	 * flush any page cache pages in this range.  this
 950	 * will make concurrent normal and sync io slow,
 951	 * but it will at least behave sensibly when they are
 952	 * in sequence.
 953	 */
 954	ret = filemap_write_and_wait_range(inode->i_mapping,
 955					   off, off + len - 1);
 956	if (ret < 0)
 957		return ret;
 958
 959	ret = 0;
 960	while ((len = iov_iter_count(to)) > 0) {
 961		struct ceph_osd_request *req;
 962		struct page **pages;
 963		int num_pages;
 964		size_t page_off;
 965		bool more;
 966		int idx;
 967		size_t left;
 
 
 968
 969		req = ceph_osdc_new_request(osdc, &ci->i_layout,
 970					ci->i_vino, off, &len, 0, 1,
 971					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
 972					NULL, ci->i_truncate_seq,
 973					ci->i_truncate_size, false);
 974		if (IS_ERR(req)) {
 975			ret = PTR_ERR(req);
 976			break;
 977		}
 978
 979		more = len < iov_iter_count(to);
 980
 981		num_pages = calc_pages_for(off, len);
 982		page_off = off & ~PAGE_MASK;
 983		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 984		if (IS_ERR(pages)) {
 985			ceph_osdc_put_request(req);
 986			ret = PTR_ERR(pages);
 987			break;
 988		}
 989
 990		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
 991						 false, false);
 992		ceph_osdc_start_request(osdc, req);
 993		ret = ceph_osdc_wait_request(osdc, req);
 994
 995		ceph_update_read_metrics(&fsc->mdsc->metric,
 996					 req->r_start_latency,
 997					 req->r_end_latency,
 998					 len, ret);
 999
1000		ceph_osdc_put_request(req);
1001
1002		i_size = i_size_read(inode);
1003		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1004		     off, len, ret, i_size, (more ? " MORE" : ""));
1005
1006		if (ret == -ENOENT)
1007			ret = 0;
1008		if (ret >= 0 && ret < len && (off + ret < i_size)) {
1009			int zlen = min(len - ret, i_size - off - ret);
1010			int zoff = page_off + ret;
1011			dout("sync_read zero gap %llu~%llu\n",
1012                             off + ret, off + ret + zlen);
1013			ceph_zero_page_vector_range(zoff, zlen, pages);
1014			ret += zlen;
1015		}
1016
1017		idx = 0;
1018		left = ret > 0 ? ret : 0;
1019		while (left > 0) {
1020			size_t len, copied;
1021			page_off = off & ~PAGE_MASK;
1022			len = min_t(size_t, left, PAGE_SIZE - page_off);
1023			SetPageUptodate(pages[idx]);
1024			copied = copy_page_to_iter(pages[idx++],
1025						   page_off, len, to);
1026			off += copied;
1027			left -= copied;
1028			if (copied < len) {
1029				ret = -EFAULT;
1030				break;
 
 
1031			}
1032		}
1033		ceph_release_page_vector(pages, num_pages);
1034
1035		if (ret < 0) {
1036			if (ret == -EBLOCKLISTED)
1037				fsc->blocklisted = true;
1038			break;
1039		}
1040
1041		if (off >= i_size || !more)
1042			break;
1043	}
1044
1045	if (off > iocb->ki_pos) {
1046		if (off >= i_size) {
1047			*retry_op = CHECK_EOF;
1048			ret = i_size - iocb->ki_pos;
1049			iocb->ki_pos = i_size;
1050		} else {
1051			ret = off - iocb->ki_pos;
1052			iocb->ki_pos = off;
1053		}
1054	}
1055
1056	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1057	return ret;
1058}
1059
1060struct ceph_aio_request {
1061	struct kiocb *iocb;
1062	size_t total_len;
1063	bool write;
1064	bool should_dirty;
1065	int error;
1066	struct list_head osd_reqs;
1067	unsigned num_reqs;
1068	atomic_t pending_reqs;
1069	struct timespec64 mtime;
1070	struct ceph_cap_flush *prealloc_cf;
1071};
1072
1073struct ceph_aio_work {
1074	struct work_struct work;
1075	struct ceph_osd_request *req;
1076};
1077
1078static void ceph_aio_retry_work(struct work_struct *work);
1079
1080static void ceph_aio_complete(struct inode *inode,
1081			      struct ceph_aio_request *aio_req)
1082{
1083	struct ceph_inode_info *ci = ceph_inode(inode);
1084	int ret;
1085
1086	if (!atomic_dec_and_test(&aio_req->pending_reqs))
1087		return;
1088
1089	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1090		inode_dio_end(inode);
1091
1092	ret = aio_req->error;
1093	if (!ret)
1094		ret = aio_req->total_len;
1095
1096	dout("ceph_aio_complete %p rc %d\n", inode, ret);
1097
1098	if (ret >= 0 && aio_req->write) {
1099		int dirty;
1100
1101		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1102		if (endoff > i_size_read(inode)) {
1103			if (ceph_inode_set_size(inode, endoff))
1104				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1105		}
1106
1107		spin_lock(&ci->i_ceph_lock);
 
1108		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1109					       &aio_req->prealloc_cf);
1110		spin_unlock(&ci->i_ceph_lock);
1111		if (dirty)
1112			__mark_inode_dirty(inode, dirty);
1113
1114	}
1115
1116	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1117						CEPH_CAP_FILE_RD));
1118
1119	aio_req->iocb->ki_complete(aio_req->iocb, ret);
1120
1121	ceph_free_cap_flush(aio_req->prealloc_cf);
1122	kfree(aio_req);
1123}
1124
1125static void ceph_aio_complete_req(struct ceph_osd_request *req)
1126{
1127	int rc = req->r_result;
1128	struct inode *inode = req->r_inode;
1129	struct ceph_aio_request *aio_req = req->r_priv;
1130	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1131	struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1132	unsigned int len = osd_data->bvec_pos.iter.bi_size;
1133
1134	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1135	BUG_ON(!osd_data->num_bvecs);
1136
1137	dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1138
1139	if (rc == -EOLDSNAPC) {
1140		struct ceph_aio_work *aio_work;
1141		BUG_ON(!aio_req->write);
1142
1143		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1144		if (aio_work) {
1145			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1146			aio_work->req = req;
1147			queue_work(ceph_inode_to_client(inode)->inode_wq,
1148				   &aio_work->work);
1149			return;
1150		}
1151		rc = -ENOMEM;
1152	} else if (!aio_req->write) {
1153		if (rc == -ENOENT)
1154			rc = 0;
1155		if (rc >= 0 && len > rc) {
1156			struct iov_iter i;
1157			int zlen = len - rc;
1158
1159			/*
1160			 * If read is satisfied by single OSD request,
1161			 * it can pass EOF. Otherwise read is within
1162			 * i_size.
1163			 */
1164			if (aio_req->num_reqs == 1) {
1165				loff_t i_size = i_size_read(inode);
1166				loff_t endoff = aio_req->iocb->ki_pos + rc;
1167				if (endoff < i_size)
1168					zlen = min_t(size_t, zlen,
1169						     i_size - endoff);
1170				aio_req->total_len = rc + zlen;
1171			}
1172
1173			iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1174				      osd_data->num_bvecs, len);
1175			iov_iter_advance(&i, rc);
1176			iov_iter_zero(zlen, &i);
1177		}
1178	}
1179
1180	/* r_start_latency == 0 means the request was not submitted */
1181	if (req->r_start_latency) {
1182		if (aio_req->write)
1183			ceph_update_write_metrics(metric, req->r_start_latency,
1184						  req->r_end_latency, len, rc);
1185		else
1186			ceph_update_read_metrics(metric, req->r_start_latency,
1187						 req->r_end_latency, len, rc);
1188	}
1189
1190	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1191		  aio_req->should_dirty);
1192	ceph_osdc_put_request(req);
1193
1194	if (rc < 0)
1195		cmpxchg(&aio_req->error, 0, rc);
1196
1197	ceph_aio_complete(inode, aio_req);
1198	return;
1199}
1200
1201static void ceph_aio_retry_work(struct work_struct *work)
1202{
1203	struct ceph_aio_work *aio_work =
1204		container_of(work, struct ceph_aio_work, work);
1205	struct ceph_osd_request *orig_req = aio_work->req;
1206	struct ceph_aio_request *aio_req = orig_req->r_priv;
1207	struct inode *inode = orig_req->r_inode;
1208	struct ceph_inode_info *ci = ceph_inode(inode);
1209	struct ceph_snap_context *snapc;
1210	struct ceph_osd_request *req;
1211	int ret;
1212
1213	spin_lock(&ci->i_ceph_lock);
1214	if (__ceph_have_pending_cap_snap(ci)) {
1215		struct ceph_cap_snap *capsnap =
1216			list_last_entry(&ci->i_cap_snaps,
1217					struct ceph_cap_snap,
1218					ci_item);
1219		snapc = ceph_get_snap_context(capsnap->context);
1220	} else {
1221		BUG_ON(!ci->i_head_snapc);
1222		snapc = ceph_get_snap_context(ci->i_head_snapc);
1223	}
1224	spin_unlock(&ci->i_ceph_lock);
1225
1226	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1227			false, GFP_NOFS);
1228	if (!req) {
1229		ret = -ENOMEM;
1230		req = orig_req;
1231		goto out;
1232	}
1233
1234	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
1235	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1236	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1237
1238	req->r_ops[0] = orig_req->r_ops[0];
1239
1240	req->r_mtime = aio_req->mtime;
1241	req->r_data_offset = req->r_ops[0].extent.offset;
1242
1243	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1244	if (ret) {
1245		ceph_osdc_put_request(req);
1246		req = orig_req;
1247		goto out;
1248	}
1249
 
 
 
 
 
 
1250	ceph_osdc_put_request(orig_req);
1251
1252	req->r_callback = ceph_aio_complete_req;
1253	req->r_inode = inode;
1254	req->r_priv = aio_req;
1255
1256	ceph_osdc_start_request(req->r_osdc, req);
1257out:
1258	if (ret < 0) {
1259		req->r_result = ret;
1260		ceph_aio_complete_req(req);
1261	}
1262
1263	ceph_put_snap_context(snapc);
1264	kfree(aio_work);
1265}
1266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267static ssize_t
1268ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1269		       struct ceph_snap_context *snapc,
1270		       struct ceph_cap_flush **pcf)
1271{
1272	struct file *file = iocb->ki_filp;
1273	struct inode *inode = file_inode(file);
1274	struct ceph_inode_info *ci = ceph_inode(inode);
1275	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1276	struct ceph_client_metric *metric = &fsc->mdsc->metric;
1277	struct ceph_vino vino;
1278	struct ceph_osd_request *req;
1279	struct bio_vec *bvecs;
1280	struct ceph_aio_request *aio_req = NULL;
1281	int num_pages = 0;
1282	int flags;
1283	int ret = 0;
1284	struct timespec64 mtime = current_time(inode);
1285	size_t count = iov_iter_count(iter);
1286	loff_t pos = iocb->ki_pos;
1287	bool write = iov_iter_rw(iter) == WRITE;
1288	bool should_dirty = !write && user_backed_iter(iter);
1289
1290	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1291		return -EROFS;
1292
1293	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1294	     (write ? "write" : "read"), file, pos, (unsigned)count,
1295	     snapc, snapc ? snapc->seq : 0);
 
 
 
1296
1297	if (write) {
1298		int ret2;
1299
1300		ceph_fscache_invalidate(inode, true);
1301
1302		ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1303					pos >> PAGE_SHIFT,
1304					(pos + count - 1) >> PAGE_SHIFT);
1305		if (ret2 < 0)
1306			dout("invalidate_inode_pages2_range returned %d\n", ret2);
1307
1308		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
1309	} else {
1310		flags = CEPH_OSD_FLAG_READ;
1311	}
1312
1313	while (iov_iter_count(iter) > 0) {
1314		u64 size = iov_iter_count(iter);
 
1315		ssize_t len;
1316
1317		if (write)
1318			size = min_t(u64, size, fsc->mount_options->wsize);
1319		else
1320			size = min_t(u64, size, fsc->mount_options->rsize);
1321
1322		vino = ceph_vino(inode);
1323		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1324					    vino, pos, &size, 0,
1325					    1,
 
1326					    write ? CEPH_OSD_OP_WRITE :
1327						    CEPH_OSD_OP_READ,
1328					    flags, snapc,
1329					    ci->i_truncate_seq,
1330					    ci->i_truncate_size,
1331					    false);
1332		if (IS_ERR(req)) {
1333			ret = PTR_ERR(req);
1334			break;
1335		}
1336
1337		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1338		if (len < 0) {
 
1339			ceph_osdc_put_request(req);
1340			ret = len;
1341			break;
1342		}
1343		if (len != size)
1344			osd_req_op_extent_update(req, 0, len);
1345
1346		/*
1347		 * To simplify error handling, allow AIO when IO within i_size
1348		 * or IO can be satisfied by single OSD request.
1349		 */
1350		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1351		    (len == count || pos + count <= i_size_read(inode))) {
1352			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1353			if (aio_req) {
1354				aio_req->iocb = iocb;
1355				aio_req->write = write;
1356				aio_req->should_dirty = should_dirty;
1357				INIT_LIST_HEAD(&aio_req->osd_reqs);
1358				if (write) {
1359					aio_req->mtime = mtime;
1360					swap(aio_req->prealloc_cf, *pcf);
1361				}
1362			}
1363			/* ignore error */
1364		}
1365
1366		if (write) {
1367			/*
1368			 * throw out any page cache pages in this range. this
1369			 * may block.
1370			 */
1371			truncate_inode_pages_range(inode->i_mapping, pos,
1372						   PAGE_ALIGN(pos + len) - 1);
1373
 
1374			req->r_mtime = mtime;
1375		}
1376
1377		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
 
1378
1379		if (aio_req) {
1380			aio_req->total_len += len;
1381			aio_req->num_reqs++;
1382			atomic_inc(&aio_req->pending_reqs);
1383
1384			req->r_callback = ceph_aio_complete_req;
1385			req->r_inode = inode;
1386			req->r_priv = aio_req;
1387			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1388
1389			pos += len;
 
1390			continue;
1391		}
1392
1393		ceph_osdc_start_request(req->r_osdc, req);
1394		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1395
1396		if (write)
1397			ceph_update_write_metrics(metric, req->r_start_latency,
1398						  req->r_end_latency, len, ret);
1399		else
1400			ceph_update_read_metrics(metric, req->r_start_latency,
1401						 req->r_end_latency, len, ret);
1402
1403		size = i_size_read(inode);
1404		if (!write) {
1405			if (ret == -ENOENT)
1406				ret = 0;
1407			if (ret >= 0 && ret < len && pos + ret < size) {
1408				struct iov_iter i;
1409				int zlen = min_t(size_t, len - ret,
1410						 size - pos - ret);
1411
1412				iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1413				iov_iter_advance(&i, ret);
1414				iov_iter_zero(zlen, &i);
1415				ret += zlen;
1416			}
1417			if (ret >= 0)
1418				len = ret;
1419		}
1420
1421		put_bvecs(bvecs, num_pages, should_dirty);
 
1422		ceph_osdc_put_request(req);
1423		if (ret < 0)
1424			break;
1425
1426		pos += len;
 
 
1427		if (!write && pos >= size)
1428			break;
1429
1430		if (write && pos > size) {
1431			if (ceph_inode_set_size(inode, pos))
1432				ceph_check_caps(ceph_inode(inode),
1433						CHECK_CAPS_AUTHONLY);
 
1434		}
1435	}
1436
1437	if (aio_req) {
1438		LIST_HEAD(osd_reqs);
1439
1440		if (aio_req->num_reqs == 0) {
1441			kfree(aio_req);
1442			return ret;
1443		}
1444
1445		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1446					      CEPH_CAP_FILE_RD);
1447
1448		list_splice(&aio_req->osd_reqs, &osd_reqs);
1449		inode_dio_begin(inode);
1450		while (!list_empty(&osd_reqs)) {
1451			req = list_first_entry(&osd_reqs,
1452					       struct ceph_osd_request,
1453					       r_private_item);
1454			list_del_init(&req->r_private_item);
1455			if (ret >= 0)
1456				ceph_osdc_start_request(req->r_osdc, req);
 
1457			if (ret < 0) {
1458				req->r_result = ret;
1459				ceph_aio_complete_req(req);
1460			}
1461		}
1462		return -EIOCBQUEUED;
1463	}
1464
1465	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1466		ret = pos - iocb->ki_pos;
1467		iocb->ki_pos = pos;
1468	}
1469	return ret;
1470}
1471
1472/*
1473 * Synchronous write, straight from __user pointer or user pages.
1474 *
1475 * If write spans object boundary, just do multiple writes.  (For a
1476 * correct atomic write, we should e.g. take write locks on all
1477 * objects, rollback on failure, etc.)
1478 */
1479static ssize_t
1480ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1481		struct ceph_snap_context *snapc)
1482{
1483	struct file *file = iocb->ki_filp;
1484	struct inode *inode = file_inode(file);
1485	struct ceph_inode_info *ci = ceph_inode(inode);
1486	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1487	struct ceph_vino vino;
1488	struct ceph_osd_request *req;
1489	struct page **pages;
1490	u64 len;
1491	int num_pages;
1492	int written = 0;
1493	int flags;
 
1494	int ret;
1495	bool check_caps = false;
1496	struct timespec64 mtime = current_time(inode);
1497	size_t count = iov_iter_count(from);
1498
1499	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1500		return -EROFS;
1501
1502	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1503	     file, pos, (unsigned)count, snapc, snapc->seq);
1504
1505	ret = filemap_write_and_wait_range(inode->i_mapping,
1506					   pos, pos + count - 1);
1507	if (ret < 0)
1508		return ret;
1509
1510	ceph_fscache_invalidate(inode, false);
1511	ret = invalidate_inode_pages2_range(inode->i_mapping,
1512					    pos >> PAGE_SHIFT,
1513					    (pos + count - 1) >> PAGE_SHIFT);
1514	if (ret < 0)
1515		dout("invalidate_inode_pages2_range returned %d\n", ret);
1516
1517	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
 
1518
1519	while ((len = iov_iter_count(from)) > 0) {
1520		size_t left;
1521		int n;
1522
1523		vino = ceph_vino(inode);
1524		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1525					    vino, pos, &len, 0, 1,
1526					    CEPH_OSD_OP_WRITE, flags, snapc,
1527					    ci->i_truncate_seq,
1528					    ci->i_truncate_size,
1529					    false);
1530		if (IS_ERR(req)) {
1531			ret = PTR_ERR(req);
1532			break;
1533		}
1534
1535		/*
1536		 * write from beginning of first page,
1537		 * regardless of io alignment
1538		 */
1539		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1540
1541		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1542		if (IS_ERR(pages)) {
1543			ret = PTR_ERR(pages);
1544			goto out;
1545		}
1546
1547		left = len;
1548		for (n = 0; n < num_pages; n++) {
1549			size_t plen = min_t(size_t, left, PAGE_SIZE);
1550			ret = copy_page_from_iter(pages[n], 0, plen, from);
1551			if (ret != plen) {
1552				ret = -EFAULT;
1553				break;
1554			}
1555			left -= ret;
1556		}
1557
1558		if (ret < 0) {
1559			ceph_release_page_vector(pages, num_pages);
1560			goto out;
1561		}
1562
 
 
1563		req->r_inode = inode;
1564
1565		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1566						false, true);
1567
1568		req->r_mtime = mtime;
1569		ceph_osdc_start_request(&fsc->client->osdc, req);
1570		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 
1571
1572		ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1573					  req->r_end_latency, len, ret);
1574out:
1575		ceph_osdc_put_request(req);
1576		if (ret != 0) {
1577			ceph_set_error_write(ci);
 
 
 
 
 
 
 
 
 
 
1578			break;
1579		}
1580
1581		ceph_clear_error_write(ci);
1582		pos += len;
1583		written += len;
1584		if (pos > i_size_read(inode)) {
1585			check_caps = ceph_inode_set_size(inode, pos);
1586			if (check_caps)
1587				ceph_check_caps(ceph_inode(inode),
1588						CHECK_CAPS_AUTHONLY);
1589		}
1590
1591	}
1592
1593	if (ret != -EOLDSNAPC && written > 0) {
1594		ret = written;
1595		iocb->ki_pos = pos;
1596	}
1597	return ret;
1598}
1599
1600/*
1601 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1602 * Atomically grab references, so that those bits are not released
1603 * back to the MDS mid-read.
1604 *
1605 * Hmm, the sync read case isn't actually async... should it be?
1606 */
1607static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1608{
1609	struct file *filp = iocb->ki_filp;
1610	struct ceph_file_info *fi = filp->private_data;
1611	size_t len = iov_iter_count(to);
1612	struct inode *inode = file_inode(filp);
1613	struct ceph_inode_info *ci = ceph_inode(inode);
1614	bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1615	ssize_t ret;
1616	int want = 0, got = 0;
1617	int retry_op = 0, read = 0;
1618
1619again:
1620	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1621	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1622
1623	if (ceph_inode_is_shutdown(inode))
1624		return -ESTALE;
1625
1626	if (direct_lock)
1627		ceph_start_io_direct(inode);
1628	else
1629		ceph_start_io_read(inode);
1630
1631	if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1632		want |= CEPH_CAP_FILE_CACHE;
1633	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1634		want |= CEPH_CAP_FILE_LAZYIO;
1635
1636	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1637	if (ret < 0) {
1638		if (direct_lock)
1639			ceph_end_io_direct(inode);
1640		else
1641			ceph_end_io_read(inode);
1642		return ret;
1643	}
1644
1645	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1646	    (iocb->ki_flags & IOCB_DIRECT) ||
1647	    (fi->flags & CEPH_F_SYNC)) {
1648
1649		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1650		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1651		     ceph_cap_string(got));
1652
1653		if (!ceph_has_inline_data(ci)) {
1654			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1655				ret = ceph_direct_read_write(iocb, to,
1656							     NULL, NULL);
1657				if (ret >= 0 && ret < len)
1658					retry_op = CHECK_EOF;
1659			} else {
1660				ret = ceph_sync_read(iocb, to, &retry_op);
1661			}
1662		} else {
1663			retry_op = READ_INLINE;
1664		}
1665	} else {
1666		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1667		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1668		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1669		     ceph_cap_string(got));
1670		ceph_add_rw_context(fi, &rw_ctx);
1671		ret = generic_file_read_iter(iocb, to);
1672		ceph_del_rw_context(fi, &rw_ctx);
1673	}
1674
1675	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1676	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
 
 
 
 
1677	ceph_put_cap_refs(ci, got);
1678
1679	if (direct_lock)
1680		ceph_end_io_direct(inode);
1681	else
1682		ceph_end_io_read(inode);
1683
1684	if (retry_op > HAVE_RETRIED && ret >= 0) {
1685		int statret;
1686		struct page *page = NULL;
1687		loff_t i_size;
1688		if (retry_op == READ_INLINE) {
1689			page = __page_cache_alloc(GFP_KERNEL);
1690			if (!page)
1691				return -ENOMEM;
1692		}
1693
1694		statret = __ceph_do_getattr(inode, page,
1695					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1696		if (statret < 0) {
1697			if (page)
1698				__free_page(page);
1699			if (statret == -ENODATA) {
1700				BUG_ON(retry_op != READ_INLINE);
1701				goto again;
1702			}
1703			return statret;
1704		}
1705
1706		i_size = i_size_read(inode);
1707		if (retry_op == READ_INLINE) {
1708			BUG_ON(ret > 0 || read > 0);
1709			if (iocb->ki_pos < i_size &&
1710			    iocb->ki_pos < PAGE_SIZE) {
1711				loff_t end = min_t(loff_t, i_size,
1712						   iocb->ki_pos + len);
1713				end = min_t(loff_t, end, PAGE_SIZE);
1714				if (statret < end)
1715					zero_user_segment(page, statret, end);
1716				ret = copy_page_to_iter(page,
1717						iocb->ki_pos & ~PAGE_MASK,
1718						end - iocb->ki_pos, to);
1719				iocb->ki_pos += ret;
1720				read += ret;
1721			}
1722			if (iocb->ki_pos < i_size && read < len) {
1723				size_t zlen = min_t(size_t, len - read,
1724						    i_size - iocb->ki_pos);
1725				ret = iov_iter_zero(zlen, to);
1726				iocb->ki_pos += ret;
1727				read += ret;
1728			}
1729			__free_pages(page, 0);
1730			return read;
1731		}
1732
1733		/* hit EOF or hole? */
1734		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1735		    ret < len) {
1736			dout("sync_read hit hole, ppos %lld < size %lld"
1737			     ", reading more\n", iocb->ki_pos, i_size);
1738
1739			read += ret;
1740			len -= ret;
1741			retry_op = HAVE_RETRIED;
1742			goto again;
1743		}
1744	}
1745
1746	if (ret >= 0)
1747		ret += read;
1748
1749	return ret;
1750}
1751
1752/*
1753 * Take cap references to avoid releasing caps to MDS mid-write.
1754 *
1755 * If we are synchronous, and write with an old snap context, the OSD
1756 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1757 * dropping our cap refs and allowing the pending snap to logically
1758 * complete _before_ this write occurs.
1759 *
1760 * If we are near ENOSPC, write synchronously.
1761 */
1762static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1763{
1764	struct file *file = iocb->ki_filp;
1765	struct ceph_file_info *fi = file->private_data;
1766	struct inode *inode = file_inode(file);
1767	struct ceph_inode_info *ci = ceph_inode(inode);
1768	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1769	struct ceph_osd_client *osdc = &fsc->client->osdc;
1770	struct ceph_cap_flush *prealloc_cf;
1771	ssize_t count, written = 0;
1772	int err, want = 0, got;
1773	bool direct_lock = false;
1774	u32 map_flags;
1775	u64 pool_flags;
1776	loff_t pos;
1777	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1778
1779	if (ceph_inode_is_shutdown(inode))
1780		return -ESTALE;
1781
1782	if (ceph_snap(inode) != CEPH_NOSNAP)
1783		return -EROFS;
1784
1785	prealloc_cf = ceph_alloc_cap_flush();
1786	if (!prealloc_cf)
1787		return -ENOMEM;
1788
1789	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1790		direct_lock = true;
1791
1792retry_snap:
1793	if (direct_lock)
1794		ceph_start_io_direct(inode);
1795	else
1796		ceph_start_io_write(inode);
1797
1798	/* We can write back this queue in page reclaim */
1799	current->backing_dev_info = inode_to_bdi(inode);
1800
1801	if (iocb->ki_flags & IOCB_APPEND) {
1802		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1803		if (err < 0)
1804			goto out;
1805	}
1806
1807	err = generic_write_checks(iocb, from);
1808	if (err <= 0)
1809		goto out;
1810
1811	pos = iocb->ki_pos;
1812	if (unlikely(pos >= limit)) {
1813		err = -EFBIG;
 
1814		goto out;
1815	} else {
1816		iov_iter_truncate(from, limit - pos);
1817	}
1818
1819	count = iov_iter_count(from);
1820	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1821		err = -EDQUOT;
1822		goto out;
 
 
 
 
 
1823	}
1824
1825	down_read(&osdc->lock);
1826	map_flags = osdc->osdmap->flags;
1827	pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1828	up_read(&osdc->lock);
1829	if ((map_flags & CEPH_OSDMAP_FULL) ||
1830	    (pool_flags & CEPH_POOL_FLAG_FULL)) {
1831		err = -ENOSPC;
1832		goto out;
1833	}
1834
1835	err = file_remove_privs(file);
1836	if (err)
1837		goto out;
1838
1839	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1840	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1841	if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1842		want |= CEPH_CAP_FILE_BUFFER;
1843	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1844		want |= CEPH_CAP_FILE_LAZYIO;
 
 
1845	got = 0;
1846	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
 
1847	if (err < 0)
1848		goto out;
1849
1850	err = file_update_time(file);
1851	if (err)
1852		goto out_caps;
1853
1854	inode_inc_iversion_raw(inode);
1855
1856	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1857	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1858
1859	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1860	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1861	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1862		struct ceph_snap_context *snapc;
1863		struct iov_iter data;
 
1864
1865		spin_lock(&ci->i_ceph_lock);
1866		if (__ceph_have_pending_cap_snap(ci)) {
1867			struct ceph_cap_snap *capsnap =
1868					list_last_entry(&ci->i_cap_snaps,
1869							struct ceph_cap_snap,
1870							ci_item);
1871			snapc = ceph_get_snap_context(capsnap->context);
1872		} else {
1873			BUG_ON(!ci->i_head_snapc);
1874			snapc = ceph_get_snap_context(ci->i_head_snapc);
1875		}
1876		spin_unlock(&ci->i_ceph_lock);
1877
1878		/* we might need to revert back to that point */
1879		data = *from;
1880		if (iocb->ki_flags & IOCB_DIRECT)
1881			written = ceph_direct_read_write(iocb, &data, snapc,
1882							 &prealloc_cf);
1883		else
1884			written = ceph_sync_write(iocb, &data, pos, snapc);
1885		if (direct_lock)
1886			ceph_end_io_direct(inode);
1887		else
1888			ceph_end_io_write(inode);
 
 
 
 
1889		if (written > 0)
1890			iov_iter_advance(from, written);
1891		ceph_put_snap_context(snapc);
1892	} else {
1893		/*
1894		 * No need to acquire the i_truncate_mutex. Because
1895		 * the MDS revokes Fwb caps before sending truncate
1896		 * message to us. We can't get Fwb cap while there
1897		 * are pending vmtruncate. So write and vmtruncate
1898		 * can not run at the same time
1899		 */
1900		written = generic_perform_write(iocb, from);
1901		if (likely(written >= 0))
1902			iocb->ki_pos = pos + written;
1903		ceph_end_io_write(inode);
1904	}
1905
1906	if (written >= 0) {
1907		int dirty;
1908
1909		spin_lock(&ci->i_ceph_lock);
 
1910		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1911					       &prealloc_cf);
1912		spin_unlock(&ci->i_ceph_lock);
1913		if (dirty)
1914			__mark_inode_dirty(inode, dirty);
1915		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1916			ceph_check_caps(ci, CHECK_CAPS_FLUSH);
1917	}
1918
1919	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1920	     inode, ceph_vinop(inode), pos, (unsigned)count,
1921	     ceph_cap_string(got));
1922	ceph_put_cap_refs(ci, got);
1923
1924	if (written == -EOLDSNAPC) {
1925		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1926		     inode, ceph_vinop(inode), pos, (unsigned)count);
1927		goto retry_snap;
1928	}
1929
1930	if (written >= 0) {
1931		if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1932		    (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1933			iocb->ki_flags |= IOCB_DSYNC;
 
1934		written = generic_write_sync(iocb, written);
1935	}
1936
1937	goto out_unlocked;
1938out_caps:
1939	ceph_put_cap_refs(ci, got);
1940out:
1941	if (direct_lock)
1942		ceph_end_io_direct(inode);
1943	else
1944		ceph_end_io_write(inode);
1945out_unlocked:
1946	ceph_free_cap_flush(prealloc_cf);
1947	current->backing_dev_info = NULL;
1948	return written ? written : err;
1949}
1950
1951/*
1952 * llseek.  be sure to verify file size on SEEK_END.
1953 */
1954static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1955{
 
 
 
 
 
 
1956	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1957		struct inode *inode = file_inode(file);
1958		int ret;
1959
1960		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1961		if (ret < 0)
1962			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963	}
1964	return generic_file_llseek(file, offset, whence);
 
 
 
 
 
1965}
1966
1967static inline void ceph_zero_partial_page(
1968	struct inode *inode, loff_t offset, unsigned size)
1969{
1970	struct page *page;
1971	pgoff_t index = offset >> PAGE_SHIFT;
1972
1973	page = find_lock_page(inode->i_mapping, index);
1974	if (page) {
1975		wait_on_page_writeback(page);
1976		zero_user(page, offset & (PAGE_SIZE - 1), size);
1977		unlock_page(page);
1978		put_page(page);
1979	}
1980}
1981
1982static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1983				      loff_t length)
1984{
1985	loff_t nearly = round_up(offset, PAGE_SIZE);
1986	if (offset < nearly) {
1987		loff_t size = nearly - offset;
1988		if (length < size)
1989			size = length;
1990		ceph_zero_partial_page(inode, offset, size);
1991		offset += size;
1992		length -= size;
1993	}
1994	if (length >= PAGE_SIZE) {
1995		loff_t size = round_down(length, PAGE_SIZE);
1996		truncate_pagecache_range(inode, offset, offset + size - 1);
1997		offset += size;
1998		length -= size;
1999	}
2000	if (length)
2001		ceph_zero_partial_page(inode, offset, length);
2002}
2003
2004static int ceph_zero_partial_object(struct inode *inode,
2005				    loff_t offset, loff_t *length)
2006{
2007	struct ceph_inode_info *ci = ceph_inode(inode);
2008	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2009	struct ceph_osd_request *req;
2010	int ret = 0;
2011	loff_t zero = 0;
2012	int op;
2013
2014	if (ceph_inode_is_shutdown(inode))
2015		return -EIO;
2016
2017	if (!length) {
2018		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2019		length = &zero;
2020	} else {
2021		op = CEPH_OSD_OP_ZERO;
2022	}
2023
2024	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2025					ceph_vino(inode),
2026					offset, length,
2027					0, 1, op,
2028					CEPH_OSD_FLAG_WRITE,
 
2029					NULL, 0, 0, false);
2030	if (IS_ERR(req)) {
2031		ret = PTR_ERR(req);
2032		goto out;
2033	}
2034
2035	req->r_mtime = inode->i_mtime;
2036	ceph_osdc_start_request(&fsc->client->osdc, req);
2037	ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2038	if (ret == -ENOENT)
2039		ret = 0;
 
 
2040	ceph_osdc_put_request(req);
2041
2042out:
2043	return ret;
2044}
2045
2046static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2047{
2048	int ret = 0;
2049	struct ceph_inode_info *ci = ceph_inode(inode);
2050	s32 stripe_unit = ci->i_layout.stripe_unit;
2051	s32 stripe_count = ci->i_layout.stripe_count;
2052	s32 object_size = ci->i_layout.object_size;
2053	u64 object_set_size = object_size * stripe_count;
2054	u64 nearly, t;
2055
2056	/* round offset up to next period boundary */
2057	nearly = offset + object_set_size - 1;
2058	t = nearly;
2059	nearly -= do_div(t, object_set_size);
2060
2061	while (length && offset < nearly) {
2062		loff_t size = length;
2063		ret = ceph_zero_partial_object(inode, offset, &size);
2064		if (ret < 0)
2065			return ret;
2066		offset += size;
2067		length -= size;
2068	}
2069	while (length >= object_set_size) {
2070		int i;
2071		loff_t pos = offset;
2072		for (i = 0; i < stripe_count; ++i) {
2073			ret = ceph_zero_partial_object(inode, pos, NULL);
2074			if (ret < 0)
2075				return ret;
2076			pos += stripe_unit;
2077		}
2078		offset += object_set_size;
2079		length -= object_set_size;
2080	}
2081	while (length) {
2082		loff_t size = length;
2083		ret = ceph_zero_partial_object(inode, offset, &size);
2084		if (ret < 0)
2085			return ret;
2086		offset += size;
2087		length -= size;
2088	}
2089	return ret;
2090}
2091
2092static long ceph_fallocate(struct file *file, int mode,
2093				loff_t offset, loff_t length)
2094{
2095	struct ceph_file_info *fi = file->private_data;
2096	struct inode *inode = file_inode(file);
2097	struct ceph_inode_info *ci = ceph_inode(inode);
 
 
2098	struct ceph_cap_flush *prealloc_cf;
2099	int want, got = 0;
2100	int dirty;
2101	int ret = 0;
2102	loff_t endoff = 0;
2103	loff_t size;
2104
2105	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2106		return -EOPNOTSUPP;
2107
2108	if (!S_ISREG(inode->i_mode))
2109		return -EOPNOTSUPP;
2110
2111	prealloc_cf = ceph_alloc_cap_flush();
2112	if (!prealloc_cf)
2113		return -ENOMEM;
2114
2115	inode_lock(inode);
2116
2117	if (ceph_snap(inode) != CEPH_NOSNAP) {
2118		ret = -EROFS;
2119		goto unlock;
2120	}
2121
 
 
 
 
 
 
 
 
 
 
 
 
2122	size = i_size_read(inode);
2123
2124	/* Are we punching a hole beyond EOF? */
2125	if (offset >= size)
2126		goto unlock;
2127	if ((offset + length) > size)
2128		length = size - offset;
2129
2130	if (fi->fmode & CEPH_FILE_MODE_LAZY)
2131		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2132	else
2133		want = CEPH_CAP_FILE_BUFFER;
2134
2135	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2136	if (ret < 0)
2137		goto unlock;
2138
2139	filemap_invalidate_lock(inode->i_mapping);
2140	ceph_fscache_invalidate(inode, false);
2141	ceph_zero_pagecache_range(inode, offset, length);
2142	ret = ceph_zero_objects(inode, offset, length);
 
 
 
 
 
 
2143
2144	if (!ret) {
2145		spin_lock(&ci->i_ceph_lock);
 
2146		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2147					       &prealloc_cf);
2148		spin_unlock(&ci->i_ceph_lock);
2149		if (dirty)
2150			__mark_inode_dirty(inode, dirty);
2151	}
2152	filemap_invalidate_unlock(inode->i_mapping);
2153
2154	ceph_put_cap_refs(ci, got);
2155unlock:
2156	inode_unlock(inode);
2157	ceph_free_cap_flush(prealloc_cf);
2158	return ret;
2159}
2160
2161/*
2162 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2163 * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2164 * this fails; zero is returned on success.
2165 */
2166static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2167			  struct file *dst_filp,
2168			  loff_t dst_endoff, int *dst_got)
2169{
2170	int ret = 0;
2171	bool retrying = false;
2172
2173retry_caps:
2174	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2175			    dst_endoff, dst_got);
2176	if (ret < 0)
2177		return ret;
2178
2179	/*
2180	 * Since we're already holding the FILE_WR capability for the dst file,
2181	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2182	 * retry dance instead to try to get both capabilities.
2183	 */
2184	ret = ceph_try_get_caps(file_inode(src_filp),
2185				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2186				false, src_got);
2187	if (ret <= 0) {
2188		/* Start by dropping dst_ci caps and getting src_ci caps */
2189		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2190		if (retrying) {
2191			if (!ret)
2192				/* ceph_try_get_caps masks EAGAIN */
2193				ret = -EAGAIN;
2194			return ret;
2195		}
2196		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2197				    CEPH_CAP_FILE_SHARED, -1, src_got);
2198		if (ret < 0)
2199			return ret;
2200		/*... drop src_ci caps too, and retry */
2201		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2202		retrying = true;
2203		goto retry_caps;
2204	}
2205	return ret;
2206}
2207
2208static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2209			   struct ceph_inode_info *dst_ci, int dst_got)
2210{
2211	ceph_put_cap_refs(src_ci, src_got);
2212	ceph_put_cap_refs(dst_ci, dst_got);
2213}
2214
2215/*
2216 * This function does several size-related checks, returning an error if:
2217 *  - source file is smaller than off+len
2218 *  - destination file size is not OK (inode_newsize_ok())
2219 *  - max bytes quotas is exceeded
2220 */
2221static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2222			   loff_t src_off, loff_t dst_off, size_t len)
2223{
2224	loff_t size, endoff;
2225
2226	size = i_size_read(src_inode);
2227	/*
2228	 * Don't copy beyond source file EOF.  Instead of simply setting length
2229	 * to (size - src_off), just drop to VFS default implementation, as the
2230	 * local i_size may be stale due to other clients writing to the source
2231	 * inode.
2232	 */
2233	if (src_off + len > size) {
2234		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2235		     src_off, len, size);
2236		return -EOPNOTSUPP;
2237	}
2238	size = i_size_read(dst_inode);
2239
2240	endoff = dst_off + len;
2241	if (inode_newsize_ok(dst_inode, endoff))
2242		return -EOPNOTSUPP;
2243
2244	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2245		return -EDQUOT;
2246
2247	return 0;
2248}
2249
2250static struct ceph_osd_request *
2251ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2252			    u64 src_snapid,
2253			    struct ceph_object_id *src_oid,
2254			    struct ceph_object_locator *src_oloc,
2255			    struct ceph_object_id *dst_oid,
2256			    struct ceph_object_locator *dst_oloc,
2257			    u32 truncate_seq, u64 truncate_size)
2258{
2259	struct ceph_osd_request *req;
2260	int ret;
2261	u32 src_fadvise_flags =
2262		CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2263		CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2264	u32 dst_fadvise_flags =
2265		CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2266		CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2267
2268	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2269	if (!req)
2270		return ERR_PTR(-ENOMEM);
2271
2272	req->r_flags = CEPH_OSD_FLAG_WRITE;
2273
2274	ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2275	ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2276
2277	ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2278					src_oid, src_oloc,
2279					src_fadvise_flags,
2280					dst_fadvise_flags,
2281					truncate_seq,
2282					truncate_size,
2283					CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2284	if (ret)
2285		goto out;
2286
2287	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2288	if (ret)
2289		goto out;
2290
2291	return req;
2292
2293out:
2294	ceph_osdc_put_request(req);
2295	return ERR_PTR(ret);
2296}
2297
2298static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2299				    struct ceph_inode_info *dst_ci, u64 *dst_off,
2300				    struct ceph_fs_client *fsc,
2301				    size_t len, unsigned int flags)
2302{
2303	struct ceph_object_locator src_oloc, dst_oloc;
2304	struct ceph_object_id src_oid, dst_oid;
2305	struct ceph_osd_client *osdc;
2306	struct ceph_osd_request *req;
2307	size_t bytes = 0;
2308	u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2309	u32 src_objlen, dst_objlen;
2310	u32 object_size = src_ci->i_layout.object_size;
2311	int ret;
2312
2313	src_oloc.pool = src_ci->i_layout.pool_id;
2314	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2315	dst_oloc.pool = dst_ci->i_layout.pool_id;
2316	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2317	osdc = &fsc->client->osdc;
2318
2319	while (len >= object_size) {
2320		ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2321					      object_size, &src_objnum,
2322					      &src_objoff, &src_objlen);
2323		ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2324					      object_size, &dst_objnum,
2325					      &dst_objoff, &dst_objlen);
2326		ceph_oid_init(&src_oid);
2327		ceph_oid_printf(&src_oid, "%llx.%08llx",
2328				src_ci->i_vino.ino, src_objnum);
2329		ceph_oid_init(&dst_oid);
2330		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2331				dst_ci->i_vino.ino, dst_objnum);
2332		/* Do an object remote copy */
2333		req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2334						  &src_oid, &src_oloc,
2335						  &dst_oid, &dst_oloc,
2336						  dst_ci->i_truncate_seq,
2337						  dst_ci->i_truncate_size);
2338		if (IS_ERR(req))
2339			ret = PTR_ERR(req);
2340		else {
2341			ceph_osdc_start_request(osdc, req);
2342			ret = ceph_osdc_wait_request(osdc, req);
2343			ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2344						     req->r_start_latency,
2345						     req->r_end_latency,
2346						     object_size, ret);
2347			ceph_osdc_put_request(req);
2348		}
2349		if (ret) {
2350			if (ret == -EOPNOTSUPP) {
2351				fsc->have_copy_from2 = false;
2352				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2353			}
2354			dout("ceph_osdc_copy_from returned %d\n", ret);
2355			if (!bytes)
2356				bytes = ret;
2357			goto out;
2358		}
2359		len -= object_size;
2360		bytes += object_size;
2361		*src_off += object_size;
2362		*dst_off += object_size;
2363	}
2364
2365out:
2366	ceph_oloc_destroy(&src_oloc);
2367	ceph_oloc_destroy(&dst_oloc);
2368	return bytes;
2369}
2370
2371static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2372				      struct file *dst_file, loff_t dst_off,
2373				      size_t len, unsigned int flags)
2374{
2375	struct inode *src_inode = file_inode(src_file);
2376	struct inode *dst_inode = file_inode(dst_file);
2377	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2378	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2379	struct ceph_cap_flush *prealloc_cf;
2380	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2381	loff_t size;
2382	ssize_t ret = -EIO, bytes;
2383	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2384	u32 src_objlen, dst_objlen;
2385	int src_got = 0, dst_got = 0, err, dirty;
2386
2387	if (src_inode->i_sb != dst_inode->i_sb) {
2388		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2389
2390		if (ceph_fsid_compare(&src_fsc->client->fsid,
2391				      &dst_fsc->client->fsid)) {
2392			dout("Copying files across clusters: src: %pU dst: %pU\n",
2393			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
2394			return -EXDEV;
2395		}
2396	}
2397	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2398		return -EROFS;
2399
2400	/*
2401	 * Some of the checks below will return -EOPNOTSUPP, which will force a
2402	 * fallback to the default VFS copy_file_range implementation.  This is
2403	 * desirable in several cases (for ex, the 'len' is smaller than the
2404	 * size of the objects, or in cases where that would be more
2405	 * efficient).
2406	 */
2407
2408	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2409		return -EOPNOTSUPP;
2410
2411	if (!src_fsc->have_copy_from2)
2412		return -EOPNOTSUPP;
2413
2414	/*
2415	 * Striped file layouts require that we copy partial objects, but the
2416	 * OSD copy-from operation only supports full-object copies.  Limit
2417	 * this to non-striped file layouts for now.
2418	 */
2419	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2420	    (src_ci->i_layout.stripe_count != 1) ||
2421	    (dst_ci->i_layout.stripe_count != 1) ||
2422	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2423		dout("Invalid src/dst files layout\n");
2424		return -EOPNOTSUPP;
2425	}
2426
2427	if (len < src_ci->i_layout.object_size)
2428		return -EOPNOTSUPP; /* no remote copy will be done */
2429
2430	prealloc_cf = ceph_alloc_cap_flush();
2431	if (!prealloc_cf)
2432		return -ENOMEM;
2433
2434	/* Start by sync'ing the source and destination files */
2435	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2436	if (ret < 0) {
2437		dout("failed to write src file (%zd)\n", ret);
2438		goto out;
2439	}
2440	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2441	if (ret < 0) {
2442		dout("failed to write dst file (%zd)\n", ret);
2443		goto out;
2444	}
2445
2446	/*
2447	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2448	 * clients may have dirty data in their caches.  And OSDs know nothing
2449	 * about caps, so they can't safely do the remote object copies.
2450	 */
2451	err = get_rd_wr_caps(src_file, &src_got,
2452			     dst_file, (dst_off + len), &dst_got);
2453	if (err < 0) {
2454		dout("get_rd_wr_caps returned %d\n", err);
2455		ret = -EOPNOTSUPP;
2456		goto out;
2457	}
2458
2459	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2460	if (ret < 0)
2461		goto out_caps;
2462
2463	/* Drop dst file cached pages */
2464	ceph_fscache_invalidate(dst_inode, false);
2465	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2466					    dst_off >> PAGE_SHIFT,
2467					    (dst_off + len) >> PAGE_SHIFT);
2468	if (ret < 0) {
2469		dout("Failed to invalidate inode pages (%zd)\n", ret);
2470		ret = 0; /* XXX */
2471	}
2472	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2473				      src_ci->i_layout.object_size,
2474				      &src_objnum, &src_objoff, &src_objlen);
2475	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2476				      dst_ci->i_layout.object_size,
2477				      &dst_objnum, &dst_objoff, &dst_objlen);
2478	/* object-level offsets need to the same */
2479	if (src_objoff != dst_objoff) {
2480		ret = -EOPNOTSUPP;
2481		goto out_caps;
2482	}
2483
2484	/*
2485	 * Do a manual copy if the object offset isn't object aligned.
2486	 * 'src_objlen' contains the bytes left until the end of the object,
2487	 * starting at the src_off
2488	 */
2489	if (src_objoff) {
2490		dout("Initial partial copy of %u bytes\n", src_objlen);
2491
2492		/*
2493		 * we need to temporarily drop all caps as we'll be calling
2494		 * {read,write}_iter, which will get caps again.
2495		 */
2496		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2497		ret = do_splice_direct(src_file, &src_off, dst_file,
2498				       &dst_off, src_objlen, flags);
2499		/* Abort on short copies or on error */
2500		if (ret < src_objlen) {
2501			dout("Failed partial copy (%zd)\n", ret);
2502			goto out;
2503		}
2504		len -= ret;
2505		err = get_rd_wr_caps(src_file, &src_got,
2506				     dst_file, (dst_off + len), &dst_got);
2507		if (err < 0)
2508			goto out;
2509		err = is_file_size_ok(src_inode, dst_inode,
2510				      src_off, dst_off, len);
2511		if (err < 0)
2512			goto out_caps;
2513	}
2514
2515	size = i_size_read(dst_inode);
2516	bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2517				     src_fsc, len, flags);
2518	if (bytes <= 0) {
2519		if (!ret)
2520			ret = bytes;
2521		goto out_caps;
2522	}
2523	dout("Copied %zu bytes out of %zu\n", bytes, len);
2524	len -= bytes;
2525	ret += bytes;
2526
2527	file_update_time(dst_file);
2528	inode_inc_iversion_raw(dst_inode);
2529
2530	if (dst_off > size) {
2531		/* Let the MDS know about dst file size change */
2532		if (ceph_inode_set_size(dst_inode, dst_off) ||
2533		    ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2534			ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
2535	}
2536	/* Mark Fw dirty */
2537	spin_lock(&dst_ci->i_ceph_lock);
2538	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2539	spin_unlock(&dst_ci->i_ceph_lock);
2540	if (dirty)
2541		__mark_inode_dirty(dst_inode, dirty);
2542
2543out_caps:
2544	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2545
2546	/*
2547	 * Do the final manual copy if we still have some bytes left, unless
2548	 * there were errors in remote object copies (len >= object_size).
2549	 */
2550	if (len && (len < src_ci->i_layout.object_size)) {
2551		dout("Final partial copy of %zu bytes\n", len);
2552		bytes = do_splice_direct(src_file, &src_off, dst_file,
2553					 &dst_off, len, flags);
2554		if (bytes > 0)
2555			ret += bytes;
2556		else
2557			dout("Failed partial copy (%zd)\n", bytes);
2558	}
2559
2560out:
2561	ceph_free_cap_flush(prealloc_cf);
2562
2563	return ret;
2564}
2565
2566static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2567				    struct file *dst_file, loff_t dst_off,
2568				    size_t len, unsigned int flags)
2569{
2570	ssize_t ret;
2571
2572	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2573				     len, flags);
2574
2575	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2576		ret = generic_copy_file_range(src_file, src_off, dst_file,
2577					      dst_off, len, flags);
2578	return ret;
2579}
2580
2581const struct file_operations ceph_file_fops = {
2582	.open = ceph_open,
2583	.release = ceph_release,
2584	.llseek = ceph_llseek,
2585	.read_iter = ceph_read_iter,
2586	.write_iter = ceph_write_iter,
2587	.mmap = ceph_mmap,
2588	.fsync = ceph_fsync,
2589	.lock = ceph_lock,
2590	.setlease = simple_nosetlease,
2591	.flock = ceph_flock,
2592	.splice_read = generic_file_splice_read,
2593	.splice_write = iter_file_splice_write,
2594	.unlocked_ioctl = ceph_ioctl,
2595	.compat_ioctl = compat_ptr_ioctl,
2596	.fallocate	= ceph_fallocate,
2597	.copy_file_range = ceph_copy_file_range,
2598};