Linux Audio

Check our new training course

Loading...
v4.6
 
   1#include <linux/ceph/ceph_debug.h>
 
   2
   3#include <linux/module.h>
   4#include <linux/sched.h>
   5#include <linux/slab.h>
   6#include <linux/file.h>
   7#include <linux/mount.h>
   8#include <linux/namei.h>
   9#include <linux/writeback.h>
  10#include <linux/falloc.h>
 
 
  11
  12#include "super.h"
  13#include "mds_client.h"
  14#include "cache.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  15
  16/*
  17 * Ceph file operations
  18 *
  19 * Implement basic open/close functionality, and implement
  20 * read/write.
  21 *
  22 * We implement three modes of file I/O:
  23 *  - buffered uses the generic_file_aio_{read,write} helpers
  24 *
  25 *  - synchronous is used when there is multi-client read/write
  26 *    sharing, avoids the page cache, and synchronously waits for an
  27 *    ack from the OSD.
  28 *
  29 *  - direct io takes the variant of the sync path that references
  30 *    user pages directly.
  31 *
  32 * fsync() flushes and waits on dirty pages, but just queues metadata
  33 * for writeback: since the MDS can recover size and mtime there is no
  34 * need to wait for MDS acknowledgement.
  35 */
  36
  37/*
  38 * Calculate the length sum of direct io vectors that can
  39 * be combined into one page vector.
  40 */
  41static size_t dio_get_pagev_size(const struct iov_iter *it)
 
 
 
  42{
  43    const struct iovec *iov = it->iov;
  44    const struct iovec *iovend = iov + it->nr_segs;
  45    size_t size;
  46
  47    size = iov->iov_len - it->iov_offset;
  48    /*
  49     * An iov can be page vectored when both the current tail
  50     * and the next base are page aligned.
  51     */
  52    while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
  53           (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
  54        size += iov->iov_len;
  55    }
  56    dout("dio_get_pagevlen len = %zu\n", size);
  57    return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58}
  59
  60/*
  61 * Allocate a page vector based on (@it, @nbytes).
  62 * The return value is the tuple describing a page vector,
  63 * that is (@pages, @page_align, @num_pages).
 
 
 
  64 */
  65static struct page **
  66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
  67		    size_t *page_align, int *num_pages)
  68{
  69	struct iov_iter tmp_it = *it;
  70	size_t align;
  71	struct page **pages;
  72	int ret = 0, idx, npages;
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74	align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
  75		(PAGE_SIZE - 1);
  76	npages = calc_pages_for(align, nbytes);
  77	pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
  78	if (!pages) {
  79		pages = vmalloc(sizeof(*pages) * npages);
  80		if (!pages)
  81			return ERR_PTR(-ENOMEM);
  82	}
  83
  84	for (idx = 0; idx < npages; ) {
  85		size_t start;
  86		ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
  87					 npages - idx, &start);
  88		if (ret < 0)
  89			goto fail;
  90
  91		iov_iter_advance(&tmp_it, ret);
  92		nbytes -= ret;
  93		idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
  94	}
  95
  96	BUG_ON(nbytes != 0);
  97	*num_pages = npages;
  98	*page_align = align;
  99	dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
 100	return pages;
 101fail:
 102	ceph_put_page_vector(pages, idx, false);
 103	return ERR_PTR(ret);
 104}
 105
 106/*
 107 * Prepare an open request.  Preallocate ceph_cap to avoid an
 108 * inopportune ENOMEM later.
 109 */
 110static struct ceph_mds_request *
 111prepare_open_request(struct super_block *sb, int flags, int create_mode)
 112{
 113	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 114	struct ceph_mds_client *mdsc = fsc->mdsc;
 115	struct ceph_mds_request *req;
 116	int want_auth = USE_ANY_MDS;
 117	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 118
 119	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 120		want_auth = USE_AUTH_MDS;
 121
 122	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 123	if (IS_ERR(req))
 124		goto out;
 125	req->r_fmode = ceph_flags_to_mode(flags);
 126	req->r_args.open.flags = cpu_to_le32(flags);
 127	req->r_args.open.mode = cpu_to_le32(create_mode);
 128out:
 129	return req;
 130}
 131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132/*
 133 * initialize private struct file data.
 134 * if we fail, clean up by dropping fmode reference on the ceph_inode
 135 */
 136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 137{
 138	struct ceph_file_info *cf;
 139	int ret = 0;
 140	struct ceph_inode_info *ci = ceph_inode(inode);
 141	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 142	struct ceph_mds_client *mdsc = fsc->mdsc;
 143
 144	switch (inode->i_mode & S_IFMT) {
 145	case S_IFREG:
 146		/* First file open request creates the cookie, we want to keep
 147		 * this cookie around for the filetime of the inode as not to
 148		 * have to worry about fscache register / revoke / operation
 149		 * races.
 150		 *
 151		 * Also, if we know the operation is going to invalidate data
 152		 * (non readonly) just nuke the cache right away.
 153		 */
 154		ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
 155		if ((fmode & CEPH_FILE_MODE_WR))
 156			ceph_fscache_invalidate(inode);
 157	case S_IFDIR:
 158		dout("init_file %p %p 0%o (regular)\n", inode, file,
 159		     inode->i_mode);
 160		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 161		if (cf == NULL) {
 162			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 163			return -ENOMEM;
 164		}
 165		cf->fmode = fmode;
 166		cf->next_offset = 2;
 167		cf->readdir_cache_idx = -1;
 168		file->private_data = cf;
 169		BUG_ON(inode->i_fop->release != ceph_release);
 170		break;
 171
 172	case S_IFLNK:
 173		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 174		     inode->i_mode);
 175		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 176		break;
 177
 178	default:
 179		dout("init_file %p %p 0%o (special)\n", inode, file,
 180		     inode->i_mode);
 181		/*
 182		 * we need to drop the open ref now, since we don't
 183		 * have .release set to ceph_release.
 184		 */
 185		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 186		BUG_ON(inode->i_fop->release == ceph_release);
 187
 188		/* call the proper open fop */
 189		ret = inode->i_fop->open(inode, file);
 190	}
 191	return ret;
 192}
 193
 194/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195 * If we already have the requisite capabilities, we can satisfy
 196 * the open request locally (no need to request new caps from the
 197 * MDS).  We do, however, need to inform the MDS (asynchronously)
 198 * if our wanted caps set expands.
 199 */
 200int ceph_open(struct inode *inode, struct file *file)
 201{
 202	struct ceph_inode_info *ci = ceph_inode(inode);
 203	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 204	struct ceph_mds_client *mdsc = fsc->mdsc;
 205	struct ceph_mds_request *req;
 206	struct ceph_file_info *cf = file->private_data;
 207	int err;
 208	int flags, fmode, wanted;
 209
 210	if (cf) {
 211		dout("open file %p is already opened\n", file);
 212		return 0;
 213	}
 214
 215	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 216	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 217	if (S_ISDIR(inode->i_mode))
 218		flags = O_DIRECTORY;  /* mds likes to know */
 219
 220	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 221	     ceph_vinop(inode), file, flags, file->f_flags);
 222	fmode = ceph_flags_to_mode(flags);
 223	wanted = ceph_caps_for_mode(fmode);
 224
 225	/* snapped files are read-only */
 226	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 227		return -EROFS;
 228
 229	/* trivially open snapdir */
 230	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 231		spin_lock(&ci->i_ceph_lock);
 232		__ceph_get_fmode(ci, fmode);
 233		spin_unlock(&ci->i_ceph_lock);
 234		return ceph_init_file(inode, file, fmode);
 235	}
 236
 237	/*
 238	 * No need to block if we have caps on the auth MDS (for
 239	 * write) or any MDS (for read).  Update wanted set
 240	 * asynchronously.
 241	 */
 242	spin_lock(&ci->i_ceph_lock);
 243	if (__ceph_is_any_real_caps(ci) &&
 244	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 245		int mds_wanted = __ceph_caps_mds_wanted(ci);
 246		int issued = __ceph_caps_issued(ci, NULL);
 247
 248		dout("open %p fmode %d want %s issued %s using existing\n",
 249		     inode, fmode, ceph_cap_string(wanted),
 250		     ceph_cap_string(issued));
 251		__ceph_get_fmode(ci, fmode);
 252		spin_unlock(&ci->i_ceph_lock);
 253
 254		/* adjust wanted? */
 255		if ((issued & wanted) != wanted &&
 256		    (mds_wanted & wanted) != wanted &&
 257		    ceph_snap(inode) != CEPH_SNAPDIR)
 258			ceph_check_caps(ci, 0, NULL);
 259
 260		return ceph_init_file(inode, file, fmode);
 261	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 262		   (ci->i_snap_caps & wanted) == wanted) {
 263		__ceph_get_fmode(ci, fmode);
 264		spin_unlock(&ci->i_ceph_lock);
 265		return ceph_init_file(inode, file, fmode);
 266	}
 267
 268	spin_unlock(&ci->i_ceph_lock);
 269
 270	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 271	req = prepare_open_request(inode->i_sb, flags, 0);
 272	if (IS_ERR(req)) {
 273		err = PTR_ERR(req);
 274		goto out;
 275	}
 276	req->r_inode = inode;
 277	ihold(inode);
 278
 279	req->r_num_caps = 1;
 280	err = ceph_mdsc_do_request(mdsc, NULL, req);
 281	if (!err)
 282		err = ceph_init_file(inode, file, req->r_fmode);
 283	ceph_mdsc_put_request(req);
 284	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 285out:
 286	return err;
 287}
 288
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 289
 290/*
 291 * Do a lookup + open with a single request.  If we get a non-existent
 292 * file or symlink, return 1 so the VFS can retry.
 293 */
 294int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 295		     struct file *file, unsigned flags, umode_t mode,
 296		     int *opened)
 297{
 298	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 299	struct ceph_mds_client *mdsc = fsc->mdsc;
 300	struct ceph_mds_request *req;
 301	struct dentry *dn;
 302	struct ceph_acls_info acls = {};
 303       int mask;
 
 304	int err;
 305
 306	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 307	     dir, dentry, dentry,
 308	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 309
 310	if (dentry->d_name.len > NAME_MAX)
 311		return -ENAMETOOLONG;
 312
 313	err = ceph_init_dentry(dentry);
 314	if (err < 0)
 315		return err;
 316
 317	if (flags & O_CREAT) {
 318		err = ceph_pre_init_acls(dir, &mode, &acls);
 
 
 319		if (err < 0)
 320			return err;
 
 
 
 
 
 
 321	}
 322
 323	/* do the open */
 324	req = prepare_open_request(dir->i_sb, flags, mode);
 325	if (IS_ERR(req)) {
 326		err = PTR_ERR(req);
 327		goto out_acl;
 328	}
 329	req->r_dentry = dget(dentry);
 330	req->r_num_caps = 2;
 
 
 
 
 
 
 331	if (flags & O_CREAT) {
 332		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
 
 
 333		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 334		if (acls.pagelist) {
 335			req->r_pagelist = acls.pagelist;
 336			acls.pagelist = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337		}
 338	}
 339
 340       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 341       if (ceph_security_xattr_wanted(dir))
 342               mask |= CEPH_CAP_XATTR_SHARED;
 343       req->r_args.open.mask = cpu_to_le32(mask);
 344
 345	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
 346	err = ceph_mdsc_do_request(mdsc,
 347				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 348				   req);
 349	err = ceph_handle_snapdir(req, dentry, err);
 350	if (err)
 351		goto out_req;
 352
 353	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 354		err = ceph_handle_notrace_create(dir, dentry);
 355
 356	if (d_unhashed(dentry)) {
 357		dn = ceph_finish_lookup(req, dentry, err);
 358		if (IS_ERR(dn))
 359			err = PTR_ERR(dn);
 360	} else {
 361		/* we were given a hashed negative dentry */
 362		dn = NULL;
 363	}
 364	if (err)
 365		goto out_req;
 366	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 367		/* make vfs retry on splice, ENOENT, or symlink */
 368		dout("atomic_open finish_no_open on dn %p\n", dn);
 369		err = finish_no_open(file, dn);
 370	} else {
 371		dout("atomic_open finish_open on dn %p\n", dn);
 372		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 373			ceph_init_inode_acls(d_inode(dentry), &acls);
 374			*opened |= FILE_CREATED;
 
 
 
 375		}
 376		err = finish_open(file, dentry, ceph_open, opened);
 377	}
 378out_req:
 379	if (!req->r_err && req->r_target_inode)
 380		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 381	ceph_mdsc_put_request(req);
 382out_acl:
 383	ceph_release_acls_info(&acls);
 384	dout("atomic_open result=%d\n", err);
 385	return err;
 386}
 387
 388int ceph_release(struct inode *inode, struct file *file)
 389{
 390	struct ceph_inode_info *ci = ceph_inode(inode);
 391	struct ceph_file_info *cf = file->private_data;
 392
 393	dout("release inode %p file %p\n", inode, file);
 394	ceph_put_fmode(ci, cf->fmode);
 395	if (cf->last_readdir)
 396		ceph_mdsc_put_request(cf->last_readdir);
 397	kfree(cf->last_name);
 398	kfree(cf->dir_info);
 399	kmem_cache_free(ceph_file_cachep, cf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400
 401	/* wake up anyone waiting for caps on this inode */
 402	wake_up_all(&ci->i_cap_wq);
 403	return 0;
 404}
 405
 406enum {
 407	HAVE_RETRIED = 1,
 408	CHECK_EOF =    2,
 409	READ_INLINE =  3,
 410};
 411
 412/*
 413 * Read a range of bytes striped over one or more objects.  Iterate over
 414 * objects we stripe over.  (That's not atomic, but good enough for now.)
 415 *
 416 * If we get a short result from the OSD, check against i_size; we need to
 417 * only return a short read to the caller if we hit EOF.
 418 */
 419static int striped_read(struct inode *inode,
 420			u64 off, u64 len,
 421			struct page **pages, int num_pages,
 422			int *checkeof)
 423{
 424	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 425	struct ceph_inode_info *ci = ceph_inode(inode);
 426	u64 pos, this_len, left;
 427	loff_t i_size;
 428	int page_align, pages_left;
 429	int read, ret;
 430	struct page **page_pos;
 431	bool hit_stripe, was_short;
 432
 433	/*
 434	 * we may need to do multiple reads.  not atomic, unfortunately.
 435	 */
 436	pos = off;
 437	left = len;
 438	page_pos = pages;
 439	pages_left = num_pages;
 440	read = 0;
 441
 442more:
 443	page_align = pos & ~PAGE_MASK;
 444	this_len = left;
 445	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
 446				  &ci->i_layout, pos, &this_len,
 447				  ci->i_truncate_seq,
 448				  ci->i_truncate_size,
 449				  page_pos, pages_left, page_align);
 450	if (ret == -ENOENT)
 451		ret = 0;
 452	hit_stripe = this_len < left;
 453	was_short = ret >= 0 && ret < this_len;
 454	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
 455	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 456
 457	i_size = i_size_read(inode);
 458	if (ret >= 0) {
 459		int didpages;
 460		if (was_short && (pos + ret < i_size)) {
 461			int zlen = min(this_len - ret, i_size - pos - ret);
 462			int zoff = (off & ~PAGE_MASK) + read + ret;
 463			dout(" zero gap %llu to %llu\n",
 464				pos + ret, pos + ret + zlen);
 465			ceph_zero_page_vector_range(zoff, zlen, pages);
 466			ret += zlen;
 467		}
 468
 469		didpages = (page_align + ret) >> PAGE_SHIFT;
 470		pos += ret;
 471		read = pos - off;
 472		left -= ret;
 473		page_pos += didpages;
 474		pages_left -= didpages;
 475
 476		/* hit stripe and need continue*/
 477		if (left && hit_stripe && pos < i_size)
 478			goto more;
 479	}
 480
 481	if (read > 0) {
 482		ret = read;
 483		/* did we bounce off eof? */
 484		if (pos + left > i_size)
 485			*checkeof = CHECK_EOF;
 486	}
 487
 488	dout("striped_read returns %d\n", ret);
 489	return ret;
 490}
 491
 492/*
 493 * Completely synchronous read and write methods.  Direct from __user
 494 * buffer to osd, or directly to user pages (if O_DIRECT).
 495 *
 496 * If the read spans object boundary, just do multiple reads.
 
 
 
 
 497 */
 498static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
 499				int *checkeof)
 500{
 501	struct file *file = iocb->ki_filp;
 502	struct inode *inode = file_inode(file);
 503	struct page **pages;
 
 
 
 504	u64 off = iocb->ki_pos;
 505	int num_pages, ret;
 506	size_t len = iov_iter_count(i);
 507
 508	dout("sync_read on file %p %llu~%u %s\n", file, off,
 509	     (unsigned)len,
 510	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 511
 512	if (!len)
 513		return 0;
 514	/*
 515	 * flush any page cache pages in this range.  this
 516	 * will make concurrent normal and sync io slow,
 517	 * but it will at least behave sensibly when they are
 518	 * in sequence.
 519	 */
 520	ret = filemap_write_and_wait_range(inode->i_mapping, off,
 521						off + len);
 522	if (ret < 0)
 523		return ret;
 524
 525	num_pages = calc_pages_for(off, len);
 526	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 527	if (IS_ERR(pages))
 528		return PTR_ERR(pages);
 529	ret = striped_read(inode, off, len, pages,
 530				num_pages, checkeof);
 531	if (ret > 0) {
 532		int l, k = 0;
 533		size_t left = ret;
 534
 535		while (left) {
 536			size_t page_off = off & ~PAGE_MASK;
 537			size_t copy = min_t(size_t, left,
 538					    PAGE_SIZE - page_off);
 539			l = copy_page_to_iter(pages[k++], page_off, copy, i);
 540			off += l;
 541			left -= l;
 542			if (l < copy)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544		}
 
 
 
 545	}
 546	ceph_release_page_vector(pages, num_pages);
 547
 548	if (off > iocb->ki_pos) {
 
 
 
 549		ret = off - iocb->ki_pos;
 550		iocb->ki_pos = off;
 551	}
 552
 553	dout("sync_read result %d\n", ret);
 554	return ret;
 555}
 556
 557struct ceph_aio_request {
 558	struct kiocb *iocb;
 559	size_t total_len;
 560	int write;
 
 561	int error;
 562	struct list_head osd_reqs;
 563	unsigned num_reqs;
 564	atomic_t pending_reqs;
 565	struct timespec mtime;
 566	struct ceph_cap_flush *prealloc_cf;
 567};
 568
 569struct ceph_aio_work {
 570	struct work_struct work;
 571	struct ceph_osd_request *req;
 572};
 573
 574static void ceph_aio_retry_work(struct work_struct *work);
 575
 576static void ceph_aio_complete(struct inode *inode,
 577			      struct ceph_aio_request *aio_req)
 578{
 579	struct ceph_inode_info *ci = ceph_inode(inode);
 580	int ret;
 581
 582	if (!atomic_dec_and_test(&aio_req->pending_reqs))
 583		return;
 584
 
 
 
 585	ret = aio_req->error;
 586	if (!ret)
 587		ret = aio_req->total_len;
 588
 589	dout("ceph_aio_complete %p rc %d\n", inode, ret);
 590
 591	if (ret >= 0 && aio_req->write) {
 592		int dirty;
 593
 594		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 595		if (endoff > i_size_read(inode)) {
 596			if (ceph_inode_set_size(inode, endoff))
 597				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 598		}
 599
 600		spin_lock(&ci->i_ceph_lock);
 601		ci->i_inline_version = CEPH_INLINE_NONE;
 602		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 603					       &aio_req->prealloc_cf);
 604		spin_unlock(&ci->i_ceph_lock);
 605		if (dirty)
 606			__mark_inode_dirty(inode, dirty);
 607
 608	}
 609
 610	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 611						CEPH_CAP_FILE_RD));
 612
 613	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 614
 615	ceph_free_cap_flush(aio_req->prealloc_cf);
 616	kfree(aio_req);
 617}
 618
 619static void ceph_aio_complete_req(struct ceph_osd_request *req,
 620				  struct ceph_msg *msg)
 621{
 622	int rc = req->r_result;
 623	struct inode *inode = req->r_inode;
 624	struct ceph_aio_request *aio_req = req->r_priv;
 625	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 626	int num_pages = calc_pages_for((u64)osd_data->alignment,
 627				       osd_data->length);
 628
 629	dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
 630	     inode, rc, osd_data->length);
 
 
 
 
 
 
 
 
 
 
 
 
 
 631
 632	if (rc == -EOLDSNAPC) {
 633		struct ceph_aio_work *aio_work;
 634		BUG_ON(!aio_req->write);
 635
 636		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 637		if (aio_work) {
 638			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 639			aio_work->req = req;
 640			queue_work(ceph_inode_to_client(inode)->wb_wq,
 641				   &aio_work->work);
 642			return;
 643		}
 644		rc = -ENOMEM;
 645	} else if (!aio_req->write) {
 646		if (rc == -ENOENT)
 647			rc = 0;
 648		if (rc >= 0 && osd_data->length > rc) {
 649			int zoff = osd_data->alignment + rc;
 650			int zlen = osd_data->length - rc;
 
 651			/*
 652			 * If read is satisfied by single OSD request,
 653			 * it can pass EOF. Otherwise read is within
 654			 * i_size.
 655			 */
 656			if (aio_req->num_reqs == 1) {
 657				loff_t i_size = i_size_read(inode);
 658				loff_t endoff = aio_req->iocb->ki_pos + rc;
 659				if (endoff < i_size)
 660					zlen = min_t(size_t, zlen,
 661						     i_size - endoff);
 662				aio_req->total_len = rc + zlen;
 663			}
 664
 665			if (zlen > 0)
 666				ceph_zero_page_vector_range(zoff, zlen,
 667							    osd_data->pages);
 
 
 668		}
 669	}
 670
 671	ceph_put_page_vector(osd_data->pages, num_pages, false);
 
 672	ceph_osdc_put_request(req);
 673
 674	if (rc < 0)
 675		cmpxchg(&aio_req->error, 0, rc);
 676
 677	ceph_aio_complete(inode, aio_req);
 678	return;
 679}
 680
 681static void ceph_aio_retry_work(struct work_struct *work)
 682{
 683	struct ceph_aio_work *aio_work =
 684		container_of(work, struct ceph_aio_work, work);
 685	struct ceph_osd_request *orig_req = aio_work->req;
 686	struct ceph_aio_request *aio_req = orig_req->r_priv;
 687	struct inode *inode = orig_req->r_inode;
 688	struct ceph_inode_info *ci = ceph_inode(inode);
 689	struct ceph_snap_context *snapc;
 690	struct ceph_osd_request *req;
 691	int ret;
 692
 693	spin_lock(&ci->i_ceph_lock);
 694	if (__ceph_have_pending_cap_snap(ci)) {
 695		struct ceph_cap_snap *capsnap =
 696			list_last_entry(&ci->i_cap_snaps,
 697					struct ceph_cap_snap,
 698					ci_item);
 699		snapc = ceph_get_snap_context(capsnap->context);
 700	} else {
 701		BUG_ON(!ci->i_head_snapc);
 702		snapc = ceph_get_snap_context(ci->i_head_snapc);
 703	}
 704	spin_unlock(&ci->i_ceph_lock);
 705
 706	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
 707			false, GFP_NOFS);
 708	if (!req) {
 709		ret = -ENOMEM;
 710		req = orig_req;
 711		goto out;
 712	}
 713
 714	req->r_flags =	CEPH_OSD_FLAG_ORDERSNAP |
 715			CEPH_OSD_FLAG_ONDISK |
 716			CEPH_OSD_FLAG_WRITE;
 717	req->r_base_oloc = orig_req->r_base_oloc;
 718	req->r_base_oid = orig_req->r_base_oid;
 719
 720	req->r_ops[0] = orig_req->r_ops[0];
 721	osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 722
 723	ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
 724				snapc, CEPH_NOSNAP, &aio_req->mtime);
 
 
 
 
 
 
 
 725
 726	ceph_osdc_put_request(orig_req);
 727
 728	req->r_callback = ceph_aio_complete_req;
 729	req->r_inode = inode;
 730	req->r_priv = aio_req;
 731
 732	ret = ceph_osdc_start_request(req->r_osdc, req, false);
 733out:
 734	if (ret < 0) {
 735		req->r_result = ret;
 736		ceph_aio_complete_req(req, NULL);
 737	}
 738
 739	ceph_put_snap_context(snapc);
 740	kfree(aio_work);
 741}
 742
 743/*
 744 * Write commit request unsafe callback, called to tell us when a
 745 * request is unsafe (that is, in flight--has been handed to the
 746 * messenger to send to its target osd).  It is called again when
 747 * we've received a response message indicating the request is
 748 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
 749 * is completed early (and unsuccessfully) due to a timeout or
 750 * interrupt.
 751 *
 752 * This is used if we requested both an ACK and ONDISK commit reply
 753 * from the OSD.
 754 */
 755static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
 756{
 757	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
 758
 759	dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
 760		unsafe ? "un" : "");
 761	if (unsafe) {
 762		ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
 763		spin_lock(&ci->i_unsafe_lock);
 764		list_add_tail(&req->r_unsafe_item,
 765			      &ci->i_unsafe_writes);
 766		spin_unlock(&ci->i_unsafe_lock);
 767	} else {
 768		spin_lock(&ci->i_unsafe_lock);
 769		list_del_init(&req->r_unsafe_item);
 770		spin_unlock(&ci->i_unsafe_lock);
 771		ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
 772	}
 773}
 774
 775
 776static ssize_t
 777ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 778		       struct ceph_snap_context *snapc,
 779		       struct ceph_cap_flush **pcf)
 780{
 781	struct file *file = iocb->ki_filp;
 782	struct inode *inode = file_inode(file);
 783	struct ceph_inode_info *ci = ceph_inode(inode);
 784	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
 785	struct ceph_vino vino;
 786	struct ceph_osd_request *req;
 787	struct page **pages;
 788	struct ceph_aio_request *aio_req = NULL;
 789	int num_pages = 0;
 790	int flags;
 791	int ret;
 792	struct timespec mtime = current_fs_time(inode->i_sb);
 793	size_t count = iov_iter_count(iter);
 794	loff_t pos = iocb->ki_pos;
 795	bool write = iov_iter_rw(iter) == WRITE;
 
 796
 797	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 798		return -EROFS;
 799
 800	dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
 801	     (write ? "write" : "read"), file, pos, (unsigned)count);
 802
 803	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
 804	if (ret < 0)
 805		return ret;
 806
 807	if (write) {
 808		ret = invalidate_inode_pages2_range(inode->i_mapping,
 809					pos >> PAGE_SHIFT,
 810					(pos + count) >> PAGE_SHIFT);
 811		if (ret < 0)
 812			dout("invalidate_inode_pages2_range returned %d\n", ret);
 813
 814		flags = CEPH_OSD_FLAG_ORDERSNAP |
 815			CEPH_OSD_FLAG_ONDISK |
 816			CEPH_OSD_FLAG_WRITE;
 817	} else {
 818		flags = CEPH_OSD_FLAG_READ;
 819	}
 820
 821	while (iov_iter_count(iter) > 0) {
 822		u64 size = dio_get_pagev_size(iter);
 823		size_t start = 0;
 824		ssize_t len;
 825
 
 
 
 
 
 826		vino = ceph_vino(inode);
 827		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 828					    vino, pos, &size, 0,
 829					    /*include a 'startsync' command*/
 830					    write ? 2 : 1,
 831					    write ? CEPH_OSD_OP_WRITE :
 832						    CEPH_OSD_OP_READ,
 833					    flags, snapc,
 834					    ci->i_truncate_seq,
 835					    ci->i_truncate_size,
 836					    false);
 837		if (IS_ERR(req)) {
 838			ret = PTR_ERR(req);
 839			break;
 840		}
 841
 842		len = size;
 843		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
 844		if (IS_ERR(pages)) {
 845			ceph_osdc_put_request(req);
 846			ret = PTR_ERR(pages);
 847			break;
 848		}
 
 
 849
 850		/*
 851		 * To simplify error handling, allow AIO when IO within i_size
 852		 * or IO can be satisfied by single OSD request.
 853		 */
 854		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
 855		    (len == count || pos + count <= i_size_read(inode))) {
 856			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
 857			if (aio_req) {
 858				aio_req->iocb = iocb;
 859				aio_req->write = write;
 
 860				INIT_LIST_HEAD(&aio_req->osd_reqs);
 861				if (write) {
 862					aio_req->mtime = mtime;
 863					swap(aio_req->prealloc_cf, *pcf);
 864				}
 865			}
 866			/* ignore error */
 867		}
 868
 869		if (write) {
 870			/*
 871			 * throw out any page cache pages in this range. this
 872			 * may block.
 873			 */
 874			truncate_inode_pages_range(inode->i_mapping, pos,
 875					(pos+len) | (PAGE_SIZE - 1));
 876
 877			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 878		}
 879
 880
 881		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
 882						 false, false);
 883
 884		ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
 885
 886		if (aio_req) {
 887			aio_req->total_len += len;
 888			aio_req->num_reqs++;
 889			atomic_inc(&aio_req->pending_reqs);
 890
 891			req->r_callback = ceph_aio_complete_req;
 892			req->r_inode = inode;
 893			req->r_priv = aio_req;
 894			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
 895
 896			pos += len;
 897			iov_iter_advance(iter, len);
 898			continue;
 899		}
 900
 901		ret = ceph_osdc_start_request(req->r_osdc, req, false);
 902		if (!ret)
 903			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 904
 
 
 
 
 
 
 
 905		size = i_size_read(inode);
 906		if (!write) {
 907			if (ret == -ENOENT)
 908				ret = 0;
 909			if (ret >= 0 && ret < len && pos + ret < size) {
 
 910				int zlen = min_t(size_t, len - ret,
 911						 size - pos - ret);
 912				ceph_zero_page_vector_range(start + ret, zlen,
 913							    pages);
 
 
 914				ret += zlen;
 915			}
 916			if (ret >= 0)
 917				len = ret;
 918		}
 919
 920		ceph_put_page_vector(pages, num_pages, false);
 921
 922		ceph_osdc_put_request(req);
 923		if (ret < 0)
 924			break;
 925
 926		pos += len;
 927		iov_iter_advance(iter, len);
 928
 929		if (!write && pos >= size)
 930			break;
 931
 932		if (write && pos > size) {
 933			if (ceph_inode_set_size(inode, pos))
 934				ceph_check_caps(ceph_inode(inode),
 935						CHECK_CAPS_AUTHONLY,
 936						NULL);
 937		}
 938	}
 939
 940	if (aio_req) {
 
 
 941		if (aio_req->num_reqs == 0) {
 942			kfree(aio_req);
 943			return ret;
 944		}
 945
 946		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
 947					      CEPH_CAP_FILE_RD);
 948
 949		while (!list_empty(&aio_req->osd_reqs)) {
 950			req = list_first_entry(&aio_req->osd_reqs,
 
 
 951					       struct ceph_osd_request,
 952					       r_unsafe_item);
 953			list_del_init(&req->r_unsafe_item);
 954			if (ret >= 0)
 955				ret = ceph_osdc_start_request(req->r_osdc,
 956							      req, false);
 957			if (ret < 0) {
 958				req->r_result = ret;
 959				ceph_aio_complete_req(req, NULL);
 960			}
 961		}
 962		return -EIOCBQUEUED;
 963	}
 964
 965	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
 966		ret = pos - iocb->ki_pos;
 967		iocb->ki_pos = pos;
 968	}
 969	return ret;
 970}
 971
 972/*
 973 * Synchronous write, straight from __user pointer or user pages.
 974 *
 975 * If write spans object boundary, just do multiple writes.  (For a
 976 * correct atomic write, we should e.g. take write locks on all
 977 * objects, rollback on failure, etc.)
 978 */
 979static ssize_t
 980ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
 981		struct ceph_snap_context *snapc)
 982{
 983	struct file *file = iocb->ki_filp;
 984	struct inode *inode = file_inode(file);
 985	struct ceph_inode_info *ci = ceph_inode(inode);
 986	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 987	struct ceph_vino vino;
 988	struct ceph_osd_request *req;
 989	struct page **pages;
 990	u64 len;
 991	int num_pages;
 992	int written = 0;
 993	int flags;
 994	int check_caps = 0;
 995	int ret;
 996	struct timespec mtime = current_fs_time(inode->i_sb);
 
 997	size_t count = iov_iter_count(from);
 998
 999	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1000		return -EROFS;
1001
1002	dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
 
1003
1004	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
 
1005	if (ret < 0)
1006		return ret;
1007
1008	ret = invalidate_inode_pages2_range(inode->i_mapping,
1009					    pos >> PAGE_SHIFT,
1010					    (pos + count) >> PAGE_SHIFT);
1011	if (ret < 0)
1012		dout("invalidate_inode_pages2_range returned %d\n", ret);
1013
1014	flags = CEPH_OSD_FLAG_ORDERSNAP |
1015		CEPH_OSD_FLAG_ONDISK |
1016		CEPH_OSD_FLAG_WRITE |
1017		CEPH_OSD_FLAG_ACK;
1018
1019	while ((len = iov_iter_count(from)) > 0) {
1020		size_t left;
1021		int n;
1022
1023		vino = ceph_vino(inode);
1024		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1025					    vino, pos, &len, 0, 1,
1026					    CEPH_OSD_OP_WRITE, flags, snapc,
1027					    ci->i_truncate_seq,
1028					    ci->i_truncate_size,
1029					    false);
1030		if (IS_ERR(req)) {
1031			ret = PTR_ERR(req);
1032			break;
1033		}
1034
1035		/*
1036		 * write from beginning of first page,
1037		 * regardless of io alignment
1038		 */
1039		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1040
1041		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1042		if (IS_ERR(pages)) {
1043			ret = PTR_ERR(pages);
1044			goto out;
1045		}
1046
1047		left = len;
1048		for (n = 0; n < num_pages; n++) {
1049			size_t plen = min_t(size_t, left, PAGE_SIZE);
1050			ret = copy_page_from_iter(pages[n], 0, plen, from);
1051			if (ret != plen) {
1052				ret = -EFAULT;
1053				break;
1054			}
1055			left -= ret;
1056		}
1057
1058		if (ret < 0) {
1059			ceph_release_page_vector(pages, num_pages);
1060			goto out;
1061		}
1062
1063		/* get a second commit callback */
1064		req->r_unsafe_callback = ceph_sync_write_unsafe;
1065		req->r_inode = inode;
1066
1067		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1068						false, true);
1069
1070		/* BUG_ON(vino.snap != CEPH_NOSNAP); */
1071		ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
1072
1073		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1074		if (!ret)
1075			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1076
 
 
1077out:
1078		ceph_osdc_put_request(req);
1079		if (ret == 0) {
1080			pos += len;
1081			written += len;
1082
1083			if (pos > i_size_read(inode)) {
1084				check_caps = ceph_inode_set_size(inode, pos);
1085				if (check_caps)
1086					ceph_check_caps(ceph_inode(inode),
1087							CHECK_CAPS_AUTHONLY,
1088							NULL);
1089			}
1090		} else
1091			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
1092	}
1093
1094	if (ret != -EOLDSNAPC && written > 0) {
1095		ret = written;
1096		iocb->ki_pos = pos;
1097	}
1098	return ret;
1099}
1100
1101/*
1102 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1103 * Atomically grab references, so that those bits are not released
1104 * back to the MDS mid-read.
1105 *
1106 * Hmm, the sync read case isn't actually async... should it be?
1107 */
1108static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1109{
1110	struct file *filp = iocb->ki_filp;
1111	struct ceph_file_info *fi = filp->private_data;
1112	size_t len = iov_iter_count(to);
1113	struct inode *inode = file_inode(filp);
1114	struct ceph_inode_info *ci = ceph_inode(inode);
1115	struct page *pinned_page = NULL;
 
1116	ssize_t ret;
1117	int want, got = 0;
1118	int retry_op = 0, read = 0;
1119
1120again:
1121	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1122	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1123
 
 
 
 
 
1124	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1125		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1126	else
1127		want = CEPH_CAP_FILE_CACHE;
1128	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1129	if (ret < 0)
 
 
 
 
 
1130		return ret;
 
1131
1132	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1133	    (iocb->ki_flags & IOCB_DIRECT) ||
1134	    (fi->flags & CEPH_F_SYNC)) {
1135
1136		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1137		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1138		     ceph_cap_string(got));
1139
1140		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1141			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1142				ret = ceph_direct_read_write(iocb, to,
1143							     NULL, NULL);
1144				if (ret >= 0 && ret < len)
1145					retry_op = CHECK_EOF;
1146			} else {
1147				ret = ceph_sync_read(iocb, to, &retry_op);
1148			}
1149		} else {
1150			retry_op = READ_INLINE;
1151		}
1152	} else {
 
1153		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1154		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1155		     ceph_cap_string(got));
1156
1157		ret = generic_file_read_iter(iocb, to);
 
1158	}
 
1159	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1160	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1161	if (pinned_page) {
1162		put_page(pinned_page);
1163		pinned_page = NULL;
1164	}
1165	ceph_put_cap_refs(ci, got);
 
 
 
 
 
 
1166	if (retry_op > HAVE_RETRIED && ret >= 0) {
1167		int statret;
1168		struct page *page = NULL;
1169		loff_t i_size;
1170		if (retry_op == READ_INLINE) {
1171			page = __page_cache_alloc(GFP_KERNEL);
1172			if (!page)
1173				return -ENOMEM;
1174		}
1175
1176		statret = __ceph_do_getattr(inode, page,
1177					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1178		if (statret < 0) {
1179			 __free_page(page);
 
1180			if (statret == -ENODATA) {
1181				BUG_ON(retry_op != READ_INLINE);
1182				goto again;
1183			}
1184			return statret;
1185		}
1186
1187		i_size = i_size_read(inode);
1188		if (retry_op == READ_INLINE) {
1189			BUG_ON(ret > 0 || read > 0);
1190			if (iocb->ki_pos < i_size &&
1191			    iocb->ki_pos < PAGE_SIZE) {
1192				loff_t end = min_t(loff_t, i_size,
1193						   iocb->ki_pos + len);
1194				end = min_t(loff_t, end, PAGE_SIZE);
1195				if (statret < end)
1196					zero_user_segment(page, statret, end);
1197				ret = copy_page_to_iter(page,
1198						iocb->ki_pos & ~PAGE_MASK,
1199						end - iocb->ki_pos, to);
1200				iocb->ki_pos += ret;
1201				read += ret;
1202			}
1203			if (iocb->ki_pos < i_size && read < len) {
1204				size_t zlen = min_t(size_t, len - read,
1205						    i_size - iocb->ki_pos);
1206				ret = iov_iter_zero(zlen, to);
1207				iocb->ki_pos += ret;
1208				read += ret;
1209			}
1210			__free_pages(page, 0);
1211			return read;
1212		}
1213
1214		/* hit EOF or hole? */
1215		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1216		    ret < len) {
1217			dout("sync_read hit hole, ppos %lld < size %lld"
1218			     ", reading more\n", iocb->ki_pos, i_size);
1219
1220			read += ret;
1221			len -= ret;
1222			retry_op = HAVE_RETRIED;
1223			goto again;
1224		}
1225	}
1226
1227	if (ret >= 0)
1228		ret += read;
1229
1230	return ret;
1231}
1232
1233/*
1234 * Take cap references to avoid releasing caps to MDS mid-write.
1235 *
1236 * If we are synchronous, and write with an old snap context, the OSD
1237 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1238 * dropping our cap refs and allowing the pending snap to logically
1239 * complete _before_ this write occurs.
1240 *
1241 * If we are near ENOSPC, write synchronously.
1242 */
1243static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1244{
1245	struct file *file = iocb->ki_filp;
1246	struct ceph_file_info *fi = file->private_data;
1247	struct inode *inode = file_inode(file);
1248	struct ceph_inode_info *ci = ceph_inode(inode);
1249	struct ceph_osd_client *osdc =
1250		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1251	struct ceph_cap_flush *prealloc_cf;
1252	ssize_t count, written = 0;
1253	int err, want, got;
 
 
 
1254	loff_t pos;
 
1255
1256	if (ceph_snap(inode) != CEPH_NOSNAP)
1257		return -EROFS;
1258
1259	prealloc_cf = ceph_alloc_cap_flush();
1260	if (!prealloc_cf)
1261		return -ENOMEM;
1262
1263	inode_lock(inode);
 
 
 
 
 
 
 
1264
1265	/* We can write back this queue in page reclaim */
1266	current->backing_dev_info = inode_to_bdi(inode);
1267
1268	if (iocb->ki_flags & IOCB_APPEND) {
1269		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1270		if (err < 0)
1271			goto out;
1272	}
1273
1274	err = generic_write_checks(iocb, from);
1275	if (err <= 0)
1276		goto out;
1277
1278	pos = iocb->ki_pos;
 
 
 
 
 
 
 
1279	count = iov_iter_count(from);
 
 
 
 
 
1280	err = file_remove_privs(file);
1281	if (err)
1282		goto out;
1283
1284	err = file_update_time(file);
1285	if (err)
1286		goto out;
1287
 
 
1288	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1289		err = ceph_uninline_data(file, NULL);
1290		if (err < 0)
1291			goto out;
1292	}
1293
1294retry_snap:
1295	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
 
 
 
 
1296		err = -ENOSPC;
1297		goto out;
1298	}
1299
1300	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1301	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1302	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1303		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1304	else
1305		want = CEPH_CAP_FILE_BUFFER;
1306	got = 0;
1307	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1308			    &got, NULL);
1309	if (err < 0)
1310		goto out;
1311
1312	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1313	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1314
1315	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1316	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
 
1317		struct ceph_snap_context *snapc;
1318		struct iov_iter data;
1319		inode_unlock(inode);
1320
1321		spin_lock(&ci->i_ceph_lock);
1322		if (__ceph_have_pending_cap_snap(ci)) {
1323			struct ceph_cap_snap *capsnap =
1324					list_last_entry(&ci->i_cap_snaps,
1325							struct ceph_cap_snap,
1326							ci_item);
1327			snapc = ceph_get_snap_context(capsnap->context);
1328		} else {
1329			BUG_ON(!ci->i_head_snapc);
1330			snapc = ceph_get_snap_context(ci->i_head_snapc);
1331		}
1332		spin_unlock(&ci->i_ceph_lock);
1333
1334		/* we might need to revert back to that point */
1335		data = *from;
1336		if (iocb->ki_flags & IOCB_DIRECT)
1337			written = ceph_direct_read_write(iocb, &data, snapc,
1338							 &prealloc_cf);
1339		else
1340			written = ceph_sync_write(iocb, &data, pos, snapc);
1341		if (written == -EOLDSNAPC) {
1342			dout("aio_write %p %llx.%llx %llu~%u"
1343				"got EOLDSNAPC, retrying\n",
1344				inode, ceph_vinop(inode),
1345				pos, (unsigned)count);
1346			inode_lock(inode);
1347			goto retry_snap;
1348		}
1349		if (written > 0)
1350			iov_iter_advance(from, written);
1351		ceph_put_snap_context(snapc);
1352	} else {
1353		loff_t old_size = i_size_read(inode);
1354		/*
1355		 * No need to acquire the i_truncate_mutex. Because
1356		 * the MDS revokes Fwb caps before sending truncate
1357		 * message to us. We can't get Fwb cap while there
1358		 * are pending vmtruncate. So write and vmtruncate
1359		 * can not run at the same time
1360		 */
1361		written = generic_perform_write(file, from, pos);
1362		if (likely(written >= 0))
1363			iocb->ki_pos = pos + written;
1364		if (i_size_read(inode) > old_size)
1365			ceph_fscache_update_objectsize(inode);
1366		inode_unlock(inode);
1367	}
1368
1369	if (written >= 0) {
1370		int dirty;
 
1371		spin_lock(&ci->i_ceph_lock);
1372		ci->i_inline_version = CEPH_INLINE_NONE;
1373		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1374					       &prealloc_cf);
1375		spin_unlock(&ci->i_ceph_lock);
1376		if (dirty)
1377			__mark_inode_dirty(inode, dirty);
 
 
1378	}
1379
1380	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1381	     inode, ceph_vinop(inode), pos, (unsigned)count,
1382	     ceph_cap_string(got));
1383	ceph_put_cap_refs(ci, got);
1384
1385	if (written >= 0 &&
1386	    ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1387	     ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
1388		err = vfs_fsync_range(file, pos, pos + written - 1, 1);
1389		if (err < 0)
1390			written = err;
1391	}
1392
1393	goto out_unlocked;
 
 
 
 
 
1394
 
1395out:
1396	inode_unlock(inode);
 
 
 
1397out_unlocked:
1398	ceph_free_cap_flush(prealloc_cf);
1399	current->backing_dev_info = NULL;
1400	return written ? written : err;
1401}
1402
1403/*
1404 * llseek.  be sure to verify file size on SEEK_END.
1405 */
1406static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1407{
1408	struct inode *inode = file->f_mapping->host;
 
1409	loff_t i_size;
1410	int ret;
1411
1412	inode_lock(inode);
1413
1414	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1415		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1416		if (ret < 0) {
1417			offset = ret;
1418			goto out;
1419		}
1420	}
1421
1422	i_size = i_size_read(inode);
1423	switch (whence) {
1424	case SEEK_END:
1425		offset += i_size;
1426		break;
1427	case SEEK_CUR:
1428		/*
1429		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1430		 * position-querying operation.  Avoid rewriting the "same"
1431		 * f_pos value back to the file because a concurrent read(),
1432		 * write() or lseek() might have altered it
1433		 */
1434		if (offset == 0) {
1435			offset = file->f_pos;
1436			goto out;
1437		}
1438		offset += file->f_pos;
1439		break;
1440	case SEEK_DATA:
1441		if (offset >= i_size) {
1442			ret = -ENXIO;
1443			goto out;
1444		}
1445		break;
1446	case SEEK_HOLE:
1447		if (offset >= i_size) {
1448			ret = -ENXIO;
1449			goto out;
1450		}
1451		offset = i_size;
1452		break;
1453	}
1454
1455	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1456
1457out:
1458	inode_unlock(inode);
1459	return offset;
1460}
1461
1462static inline void ceph_zero_partial_page(
1463	struct inode *inode, loff_t offset, unsigned size)
1464{
1465	struct page *page;
1466	pgoff_t index = offset >> PAGE_SHIFT;
1467
1468	page = find_lock_page(inode->i_mapping, index);
1469	if (page) {
1470		wait_on_page_writeback(page);
1471		zero_user(page, offset & (PAGE_SIZE - 1), size);
1472		unlock_page(page);
1473		put_page(page);
1474	}
1475}
1476
1477static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1478				      loff_t length)
1479{
1480	loff_t nearly = round_up(offset, PAGE_SIZE);
1481	if (offset < nearly) {
1482		loff_t size = nearly - offset;
1483		if (length < size)
1484			size = length;
1485		ceph_zero_partial_page(inode, offset, size);
1486		offset += size;
1487		length -= size;
1488	}
1489	if (length >= PAGE_SIZE) {
1490		loff_t size = round_down(length, PAGE_SIZE);
1491		truncate_pagecache_range(inode, offset, offset + size - 1);
1492		offset += size;
1493		length -= size;
1494	}
1495	if (length)
1496		ceph_zero_partial_page(inode, offset, length);
1497}
1498
1499static int ceph_zero_partial_object(struct inode *inode,
1500				    loff_t offset, loff_t *length)
1501{
1502	struct ceph_inode_info *ci = ceph_inode(inode);
1503	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1504	struct ceph_osd_request *req;
1505	int ret = 0;
1506	loff_t zero = 0;
1507	int op;
1508
1509	if (!length) {
1510		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1511		length = &zero;
1512	} else {
1513		op = CEPH_OSD_OP_ZERO;
1514	}
1515
1516	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1517					ceph_vino(inode),
1518					offset, length,
1519					0, 1, op,
1520					CEPH_OSD_FLAG_WRITE |
1521					CEPH_OSD_FLAG_ONDISK,
1522					NULL, 0, 0, false);
1523	if (IS_ERR(req)) {
1524		ret = PTR_ERR(req);
1525		goto out;
1526	}
1527
1528	ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1529				&inode->i_mtime);
1530
1531	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1532	if (!ret) {
1533		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1534		if (ret == -ENOENT)
1535			ret = 0;
1536	}
1537	ceph_osdc_put_request(req);
1538
1539out:
1540	return ret;
1541}
1542
1543static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1544{
1545	int ret = 0;
1546	struct ceph_inode_info *ci = ceph_inode(inode);
1547	s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1548	s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1549	s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1550	u64 object_set_size = object_size * stripe_count;
1551	u64 nearly, t;
1552
1553	/* round offset up to next period boundary */
1554	nearly = offset + object_set_size - 1;
1555	t = nearly;
1556	nearly -= do_div(t, object_set_size);
1557
1558	while (length && offset < nearly) {
1559		loff_t size = length;
1560		ret = ceph_zero_partial_object(inode, offset, &size);
1561		if (ret < 0)
1562			return ret;
1563		offset += size;
1564		length -= size;
1565	}
1566	while (length >= object_set_size) {
1567		int i;
1568		loff_t pos = offset;
1569		for (i = 0; i < stripe_count; ++i) {
1570			ret = ceph_zero_partial_object(inode, pos, NULL);
1571			if (ret < 0)
1572				return ret;
1573			pos += stripe_unit;
1574		}
1575		offset += object_set_size;
1576		length -= object_set_size;
1577	}
1578	while (length) {
1579		loff_t size = length;
1580		ret = ceph_zero_partial_object(inode, offset, &size);
1581		if (ret < 0)
1582			return ret;
1583		offset += size;
1584		length -= size;
1585	}
1586	return ret;
1587}
1588
1589static long ceph_fallocate(struct file *file, int mode,
1590				loff_t offset, loff_t length)
1591{
1592	struct ceph_file_info *fi = file->private_data;
1593	struct inode *inode = file_inode(file);
1594	struct ceph_inode_info *ci = ceph_inode(inode);
1595	struct ceph_osd_client *osdc =
1596		&ceph_inode_to_client(inode)->client->osdc;
1597	struct ceph_cap_flush *prealloc_cf;
1598	int want, got = 0;
1599	int dirty;
1600	int ret = 0;
1601	loff_t endoff = 0;
1602	loff_t size;
1603
1604	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1605		return -EOPNOTSUPP;
1606
1607	if (!S_ISREG(inode->i_mode))
1608		return -EOPNOTSUPP;
1609
1610	prealloc_cf = ceph_alloc_cap_flush();
1611	if (!prealloc_cf)
1612		return -ENOMEM;
1613
1614	inode_lock(inode);
1615
1616	if (ceph_snap(inode) != CEPH_NOSNAP) {
1617		ret = -EROFS;
1618		goto unlock;
1619	}
1620
1621	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1622		!(mode & FALLOC_FL_PUNCH_HOLE)) {
1623		ret = -ENOSPC;
1624		goto unlock;
1625	}
1626
1627	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1628		ret = ceph_uninline_data(file, NULL);
1629		if (ret < 0)
1630			goto unlock;
1631	}
1632
1633	size = i_size_read(inode);
1634	if (!(mode & FALLOC_FL_KEEP_SIZE))
1635		endoff = offset + length;
 
 
 
 
1636
1637	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1638		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1639	else
1640		want = CEPH_CAP_FILE_BUFFER;
1641
1642	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1643	if (ret < 0)
1644		goto unlock;
1645
1646	if (mode & FALLOC_FL_PUNCH_HOLE) {
1647		if (offset < size)
1648			ceph_zero_pagecache_range(inode, offset, length);
1649		ret = ceph_zero_objects(inode, offset, length);
1650	} else if (endoff > size) {
1651		truncate_pagecache_range(inode, size, -1);
1652		if (ceph_inode_set_size(inode, endoff))
1653			ceph_check_caps(ceph_inode(inode),
1654				CHECK_CAPS_AUTHONLY, NULL);
1655	}
1656
1657	if (!ret) {
1658		spin_lock(&ci->i_ceph_lock);
1659		ci->i_inline_version = CEPH_INLINE_NONE;
1660		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1661					       &prealloc_cf);
1662		spin_unlock(&ci->i_ceph_lock);
1663		if (dirty)
1664			__mark_inode_dirty(inode, dirty);
1665	}
1666
1667	ceph_put_cap_refs(ci, got);
1668unlock:
1669	inode_unlock(inode);
1670	ceph_free_cap_flush(prealloc_cf);
1671	return ret;
1672}
1673
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1674const struct file_operations ceph_file_fops = {
1675	.open = ceph_open,
1676	.release = ceph_release,
1677	.llseek = ceph_llseek,
1678	.read_iter = ceph_read_iter,
1679	.write_iter = ceph_write_iter,
1680	.mmap = ceph_mmap,
1681	.fsync = ceph_fsync,
1682	.lock = ceph_lock,
 
1683	.flock = ceph_flock,
1684	.splice_read = generic_file_splice_read,
1685	.splice_write = iter_file_splice_write,
1686	.unlocked_ioctl = ceph_ioctl,
1687	.compat_ioctl	= ceph_ioctl,
1688	.fallocate	= ceph_fallocate,
 
1689};
1690
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3#include <linux/ceph/striper.h>
   4
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/file.h>
   9#include <linux/mount.h>
  10#include <linux/namei.h>
  11#include <linux/writeback.h>
  12#include <linux/falloc.h>
  13#include <linux/iversion.h>
  14#include <linux/ktime.h>
  15
  16#include "super.h"
  17#include "mds_client.h"
  18#include "cache.h"
  19#include "io.h"
  20#include "metric.h"
  21
  22static __le32 ceph_flags_sys2wire(u32 flags)
  23{
  24	u32 wire_flags = 0;
  25
  26	switch (flags & O_ACCMODE) {
  27	case O_RDONLY:
  28		wire_flags |= CEPH_O_RDONLY;
  29		break;
  30	case O_WRONLY:
  31		wire_flags |= CEPH_O_WRONLY;
  32		break;
  33	case O_RDWR:
  34		wire_flags |= CEPH_O_RDWR;
  35		break;
  36	}
  37
  38	flags &= ~O_ACCMODE;
  39
  40#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  41
  42	ceph_sys2wire(O_CREAT);
  43	ceph_sys2wire(O_EXCL);
  44	ceph_sys2wire(O_TRUNC);
  45	ceph_sys2wire(O_DIRECTORY);
  46	ceph_sys2wire(O_NOFOLLOW);
  47
  48#undef ceph_sys2wire
  49
  50	if (flags)
  51		dout("unused open flags: %x\n", flags);
  52
  53	return cpu_to_le32(wire_flags);
  54}
  55
  56/*
  57 * Ceph file operations
  58 *
  59 * Implement basic open/close functionality, and implement
  60 * read/write.
  61 *
  62 * We implement three modes of file I/O:
  63 *  - buffered uses the generic_file_aio_{read,write} helpers
  64 *
  65 *  - synchronous is used when there is multi-client read/write
  66 *    sharing, avoids the page cache, and synchronously waits for an
  67 *    ack from the OSD.
  68 *
  69 *  - direct io takes the variant of the sync path that references
  70 *    user pages directly.
  71 *
  72 * fsync() flushes and waits on dirty pages, but just queues metadata
  73 * for writeback: since the MDS can recover size and mtime there is no
  74 * need to wait for MDS acknowledgement.
  75 */
  76
  77/*
  78 * How many pages to get in one call to iov_iter_get_pages().  This
  79 * determines the size of the on-stack array used as a buffer.
  80 */
  81#define ITER_GET_BVECS_PAGES	64
  82
  83static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  84				struct bio_vec *bvecs)
  85{
  86	size_t size = 0;
  87	int bvec_idx = 0;
  88
  89	if (maxsize > iov_iter_count(iter))
  90		maxsize = iov_iter_count(iter);
  91
  92	while (size < maxsize) {
  93		struct page *pages[ITER_GET_BVECS_PAGES];
  94		ssize_t bytes;
  95		size_t start;
  96		int idx = 0;
  97
  98		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
  99					   ITER_GET_BVECS_PAGES, &start);
 100		if (bytes < 0)
 101			return size ?: bytes;
 102
 103		iov_iter_advance(iter, bytes);
 104		size += bytes;
 105
 106		for ( ; bytes; idx++, bvec_idx++) {
 107			struct bio_vec bv = {
 108				.bv_page = pages[idx],
 109				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
 110				.bv_offset = start,
 111			};
 112
 113			bvecs[bvec_idx] = bv;
 114			bytes -= bv.bv_len;
 115			start = 0;
 116		}
 117	}
 118
 119	return size;
 120}
 121
 122/*
 123 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 124 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 125 * page.
 126 *
 127 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 128 * Return the number of bytes in the created bio_vec array, or an error.
 129 */
 130static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 131				    struct bio_vec **bvecs, int *num_bvecs)
 
 132{
 133	struct bio_vec *bv;
 134	size_t orig_count = iov_iter_count(iter);
 135	ssize_t bytes;
 136	int npages;
 137
 138	iov_iter_truncate(iter, maxsize);
 139	npages = iov_iter_npages(iter, INT_MAX);
 140	iov_iter_reexpand(iter, orig_count);
 141
 142	/*
 143	 * __iter_get_bvecs() may populate only part of the array -- zero it
 144	 * out.
 145	 */
 146	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
 147	if (!bv)
 148		return -ENOMEM;
 149
 150	bytes = __iter_get_bvecs(iter, maxsize, bv);
 151	if (bytes < 0) {
 152		/*
 153		 * No pages were pinned -- just free the array.
 154		 */
 155		kvfree(bv);
 156		return bytes;
 
 157	}
 158
 159	*bvecs = bv;
 160	*num_bvecs = npages;
 161	return bytes;
 162}
 
 
 163
 164static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
 165{
 166	int i;
 
 167
 168	for (i = 0; i < num_bvecs; i++) {
 169		if (bvecs[i].bv_page) {
 170			if (should_dirty)
 171				set_page_dirty_lock(bvecs[i].bv_page);
 172			put_page(bvecs[i].bv_page);
 173		}
 174	}
 175	kvfree(bvecs);
 176}
 177
 178/*
 179 * Prepare an open request.  Preallocate ceph_cap to avoid an
 180 * inopportune ENOMEM later.
 181 */
 182static struct ceph_mds_request *
 183prepare_open_request(struct super_block *sb, int flags, int create_mode)
 184{
 185	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 186	struct ceph_mds_client *mdsc = fsc->mdsc;
 187	struct ceph_mds_request *req;
 188	int want_auth = USE_ANY_MDS;
 189	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 190
 191	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 192		want_auth = USE_AUTH_MDS;
 193
 194	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 195	if (IS_ERR(req))
 196		goto out;
 197	req->r_fmode = ceph_flags_to_mode(flags);
 198	req->r_args.open.flags = ceph_flags_sys2wire(flags);
 199	req->r_args.open.mode = cpu_to_le32(create_mode);
 200out:
 201	return req;
 202}
 203
 204static int ceph_init_file_info(struct inode *inode, struct file *file,
 205					int fmode, bool isdir)
 206{
 207	struct ceph_inode_info *ci = ceph_inode(inode);
 208	struct ceph_file_info *fi;
 209
 210	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
 211			inode->i_mode, isdir ? "dir" : "regular");
 212	BUG_ON(inode->i_fop->release != ceph_release);
 213
 214	if (isdir) {
 215		struct ceph_dir_file_info *dfi =
 216			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 217		if (!dfi)
 218			return -ENOMEM;
 219
 220		file->private_data = dfi;
 221		fi = &dfi->file_info;
 222		dfi->next_offset = 2;
 223		dfi->readdir_cache_idx = -1;
 224	} else {
 225		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 226		if (!fi)
 227			return -ENOMEM;
 228
 229		file->private_data = fi;
 230	}
 231
 232	ceph_get_fmode(ci, fmode, 1);
 233	fi->fmode = fmode;
 234
 235	spin_lock_init(&fi->rw_contexts_lock);
 236	INIT_LIST_HEAD(&fi->rw_contexts);
 237	fi->meta_err = errseq_sample(&ci->i_meta_err);
 238	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
 239
 240	return 0;
 241}
 242
 243/*
 244 * initialize private struct file data.
 245 * if we fail, clean up by dropping fmode reference on the ceph_inode
 246 */
 247static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 248{
 
 249	int ret = 0;
 
 
 
 250
 251	switch (inode->i_mode & S_IFMT) {
 252	case S_IFREG:
 253		ceph_fscache_register_inode_cookie(inode);
 254		ceph_fscache_file_set_cookie(inode, file);
 255		fallthrough;
 
 
 
 
 
 
 
 
 256	case S_IFDIR:
 257		ret = ceph_init_file_info(inode, file, fmode,
 258						S_ISDIR(inode->i_mode));
 259		if (ret)
 260			return ret;
 
 
 
 
 
 
 
 
 261		break;
 262
 263	case S_IFLNK:
 264		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 265		     inode->i_mode);
 
 266		break;
 267
 268	default:
 269		dout("init_file %p %p 0%o (special)\n", inode, file,
 270		     inode->i_mode);
 271		/*
 272		 * we need to drop the open ref now, since we don't
 273		 * have .release set to ceph_release.
 274		 */
 
 275		BUG_ON(inode->i_fop->release == ceph_release);
 276
 277		/* call the proper open fop */
 278		ret = inode->i_fop->open(inode, file);
 279	}
 280	return ret;
 281}
 282
 283/*
 284 * try renew caps after session gets killed.
 285 */
 286int ceph_renew_caps(struct inode *inode, int fmode)
 287{
 288	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
 289	struct ceph_inode_info *ci = ceph_inode(inode);
 290	struct ceph_mds_request *req;
 291	int err, flags, wanted;
 292
 293	spin_lock(&ci->i_ceph_lock);
 294	__ceph_touch_fmode(ci, mdsc, fmode);
 295	wanted = __ceph_caps_file_wanted(ci);
 296	if (__ceph_is_any_real_caps(ci) &&
 297	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
 298		int issued = __ceph_caps_issued(ci, NULL);
 299		spin_unlock(&ci->i_ceph_lock);
 300		dout("renew caps %p want %s issued %s updating mds_wanted\n",
 301		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 302		ceph_check_caps(ci, 0, NULL);
 303		return 0;
 304	}
 305	spin_unlock(&ci->i_ceph_lock);
 306
 307	flags = 0;
 308	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 309		flags = O_RDWR;
 310	else if (wanted & CEPH_CAP_FILE_RD)
 311		flags = O_RDONLY;
 312	else if (wanted & CEPH_CAP_FILE_WR)
 313		flags = O_WRONLY;
 314#ifdef O_LAZY
 315	if (wanted & CEPH_CAP_FILE_LAZYIO)
 316		flags |= O_LAZY;
 317#endif
 318
 319	req = prepare_open_request(inode->i_sb, flags, 0);
 320	if (IS_ERR(req)) {
 321		err = PTR_ERR(req);
 322		goto out;
 323	}
 324
 325	req->r_inode = inode;
 326	ihold(inode);
 327	req->r_num_caps = 1;
 328
 329	err = ceph_mdsc_do_request(mdsc, NULL, req);
 330	ceph_mdsc_put_request(req);
 331out:
 332	dout("renew caps %p open result=%d\n", inode, err);
 333	return err < 0 ? err : 0;
 334}
 335
 336/*
 337 * If we already have the requisite capabilities, we can satisfy
 338 * the open request locally (no need to request new caps from the
 339 * MDS).  We do, however, need to inform the MDS (asynchronously)
 340 * if our wanted caps set expands.
 341 */
 342int ceph_open(struct inode *inode, struct file *file)
 343{
 344	struct ceph_inode_info *ci = ceph_inode(inode);
 345	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 346	struct ceph_mds_client *mdsc = fsc->mdsc;
 347	struct ceph_mds_request *req;
 348	struct ceph_file_info *fi = file->private_data;
 349	int err;
 350	int flags, fmode, wanted;
 351
 352	if (fi) {
 353		dout("open file %p is already opened\n", file);
 354		return 0;
 355	}
 356
 357	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 358	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 359	if (S_ISDIR(inode->i_mode))
 360		flags = O_DIRECTORY;  /* mds likes to know */
 361
 362	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 363	     ceph_vinop(inode), file, flags, file->f_flags);
 364	fmode = ceph_flags_to_mode(flags);
 365	wanted = ceph_caps_for_mode(fmode);
 366
 367	/* snapped files are read-only */
 368	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 369		return -EROFS;
 370
 371	/* trivially open snapdir */
 372	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 
 
 
 373		return ceph_init_file(inode, file, fmode);
 374	}
 375
 376	/*
 377	 * No need to block if we have caps on the auth MDS (for
 378	 * write) or any MDS (for read).  Update wanted set
 379	 * asynchronously.
 380	 */
 381	spin_lock(&ci->i_ceph_lock);
 382	if (__ceph_is_any_real_caps(ci) &&
 383	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 384		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
 385		int issued = __ceph_caps_issued(ci, NULL);
 386
 387		dout("open %p fmode %d want %s issued %s using existing\n",
 388		     inode, fmode, ceph_cap_string(wanted),
 389		     ceph_cap_string(issued));
 390		__ceph_touch_fmode(ci, mdsc, fmode);
 391		spin_unlock(&ci->i_ceph_lock);
 392
 393		/* adjust wanted? */
 394		if ((issued & wanted) != wanted &&
 395		    (mds_wanted & wanted) != wanted &&
 396		    ceph_snap(inode) != CEPH_SNAPDIR)
 397			ceph_check_caps(ci, 0, NULL);
 398
 399		return ceph_init_file(inode, file, fmode);
 400	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 401		   (ci->i_snap_caps & wanted) == wanted) {
 402		__ceph_touch_fmode(ci, mdsc, fmode);
 403		spin_unlock(&ci->i_ceph_lock);
 404		return ceph_init_file(inode, file, fmode);
 405	}
 406
 407	spin_unlock(&ci->i_ceph_lock);
 408
 409	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 410	req = prepare_open_request(inode->i_sb, flags, 0);
 411	if (IS_ERR(req)) {
 412		err = PTR_ERR(req);
 413		goto out;
 414	}
 415	req->r_inode = inode;
 416	ihold(inode);
 417
 418	req->r_num_caps = 1;
 419	err = ceph_mdsc_do_request(mdsc, NULL, req);
 420	if (!err)
 421		err = ceph_init_file(inode, file, req->r_fmode);
 422	ceph_mdsc_put_request(req);
 423	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 424out:
 425	return err;
 426}
 427
 428/* Clone the layout from a synchronous create, if the dir now has Dc caps */
 429static void
 430cache_file_layout(struct inode *dst, struct inode *src)
 431{
 432	struct ceph_inode_info *cdst = ceph_inode(dst);
 433	struct ceph_inode_info *csrc = ceph_inode(src);
 434
 435	spin_lock(&cdst->i_ceph_lock);
 436	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
 437	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
 438		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
 439			sizeof(cdst->i_cached_layout));
 440		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
 441				   ceph_try_get_string(csrc->i_layout.pool_ns));
 442	}
 443	spin_unlock(&cdst->i_ceph_lock);
 444}
 445
 446/*
 447 * Try to set up an async create. We need caps, a file layout, and inode number,
 448 * and either a lease on the dentry or complete dir info. If any of those
 449 * criteria are not satisfied, then return false and the caller can go
 450 * synchronous.
 451 */
 452static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
 453				 struct ceph_file_layout *lo, u64 *pino)
 454{
 455	struct ceph_inode_info *ci = ceph_inode(dir);
 456	struct ceph_dentry_info *di = ceph_dentry(dentry);
 457	int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
 458	u64 ino;
 459
 460	spin_lock(&ci->i_ceph_lock);
 461	/* No auth cap means no chance for Dc caps */
 462	if (!ci->i_auth_cap)
 463		goto no_async;
 464
 465	/* Any delegated inos? */
 466	if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
 467		goto no_async;
 468
 469	if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
 470		goto no_async;
 471
 472	if ((__ceph_caps_issued(ci, NULL) & want) != want)
 473		goto no_async;
 474
 475	if (d_in_lookup(dentry)) {
 476		if (!__ceph_dir_is_complete(ci))
 477			goto no_async;
 478		spin_lock(&dentry->d_lock);
 479		di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
 480		spin_unlock(&dentry->d_lock);
 481	} else if (atomic_read(&ci->i_shared_gen) !=
 482		   READ_ONCE(di->lease_shared_gen)) {
 483		goto no_async;
 484	}
 485
 486	ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
 487	if (!ino)
 488		goto no_async;
 489
 490	*pino = ino;
 491	ceph_take_cap_refs(ci, want, false);
 492	memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
 493	rcu_assign_pointer(lo->pool_ns,
 494			   ceph_try_get_string(ci->i_cached_layout.pool_ns));
 495	got = want;
 496no_async:
 497	spin_unlock(&ci->i_ceph_lock);
 498	return got;
 499}
 500
 501static void restore_deleg_ino(struct inode *dir, u64 ino)
 502{
 503	struct ceph_inode_info *ci = ceph_inode(dir);
 504	struct ceph_mds_session *s = NULL;
 505
 506	spin_lock(&ci->i_ceph_lock);
 507	if (ci->i_auth_cap)
 508		s = ceph_get_mds_session(ci->i_auth_cap->session);
 509	spin_unlock(&ci->i_ceph_lock);
 510	if (s) {
 511		int err = ceph_restore_deleg_ino(s, ino);
 512		if (err)
 513			pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
 514				ino, err);
 515		ceph_put_mds_session(s);
 516	}
 517}
 518
 519static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
 520                                 struct ceph_mds_request *req)
 521{
 522	int result = req->r_err ? req->r_err :
 523			le32_to_cpu(req->r_reply_info.head->result);
 524
 525	if (result == -EJUKEBOX)
 526		goto out;
 527
 528	mapping_set_error(req->r_parent->i_mapping, result);
 529
 530	if (result) {
 531		struct dentry *dentry = req->r_dentry;
 532		int pathlen = 0;
 533		u64 base = 0;
 534		char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
 535						  &base, 0);
 536
 537		ceph_dir_clear_complete(req->r_parent);
 538		if (!d_unhashed(dentry))
 539			d_drop(dentry);
 540
 541		/* FIXME: start returning I/O errors on all accesses? */
 542		pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
 543			base, IS_ERR(path) ? "<<bad>>" : path, result);
 544		ceph_mdsc_free_path(path, pathlen);
 545	}
 546
 547	if (req->r_target_inode) {
 548		struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
 549		u64 ino = ceph_vino(req->r_target_inode).ino;
 550
 551		if (req->r_deleg_ino != ino)
 552			pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
 553				__func__, req->r_err, req->r_deleg_ino, ino);
 554		mapping_set_error(req->r_target_inode->i_mapping, result);
 555
 556		spin_lock(&ci->i_ceph_lock);
 557		if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
 558			ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
 559			wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
 560		}
 561		ceph_kick_flushing_inode_caps(req->r_session, ci);
 562		spin_unlock(&ci->i_ceph_lock);
 563	} else {
 564		pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
 565			req->r_deleg_ino);
 566	}
 567out:
 568	ceph_mdsc_release_dir_caps(req);
 569}
 570
 571static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
 572				    struct file *file, umode_t mode,
 573				    struct ceph_mds_request *req,
 574				    struct ceph_acl_sec_ctx *as_ctx,
 575				    struct ceph_file_layout *lo)
 576{
 577	int ret;
 578	char xattr_buf[4];
 579	struct ceph_mds_reply_inode in = { };
 580	struct ceph_mds_reply_info_in iinfo = { .in = &in };
 581	struct ceph_inode_info *ci = ceph_inode(dir);
 582	struct inode *inode;
 583	struct timespec64 now;
 584	struct ceph_vino vino = { .ino = req->r_deleg_ino,
 585				  .snap = CEPH_NOSNAP };
 586
 587	ktime_get_real_ts64(&now);
 588
 589	inode = ceph_get_inode(dentry->d_sb, vino);
 590	if (IS_ERR(inode))
 591		return PTR_ERR(inode);
 592
 593	iinfo.inline_version = CEPH_INLINE_NONE;
 594	iinfo.change_attr = 1;
 595	ceph_encode_timespec64(&iinfo.btime, &now);
 596
 597	iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
 598	iinfo.xattr_data = xattr_buf;
 599	memset(iinfo.xattr_data, 0, iinfo.xattr_len);
 600
 601	in.ino = cpu_to_le64(vino.ino);
 602	in.snapid = cpu_to_le64(CEPH_NOSNAP);
 603	in.version = cpu_to_le64(1);	// ???
 604	in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
 605	in.cap.cap_id = cpu_to_le64(1);
 606	in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
 607	in.cap.flags = CEPH_CAP_FLAG_AUTH;
 608	in.ctime = in.mtime = in.atime = iinfo.btime;
 609	in.mode = cpu_to_le32((u32)mode);
 610	in.truncate_seq = cpu_to_le32(1);
 611	in.truncate_size = cpu_to_le64(-1ULL);
 612	in.xattr_version = cpu_to_le64(1);
 613	in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
 614	in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
 615				dir->i_gid : current_fsgid()));
 616	in.nlink = cpu_to_le32(1);
 617	in.max_size = cpu_to_le64(lo->stripe_unit);
 618
 619	ceph_file_layout_to_legacy(lo, &in.layout);
 620
 621	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
 622			      req->r_fmode, NULL);
 623	if (ret) {
 624		dout("%s failed to fill inode: %d\n", __func__, ret);
 625		ceph_dir_clear_complete(dir);
 626		if (!d_unhashed(dentry))
 627			d_drop(dentry);
 628		if (inode->i_state & I_NEW)
 629			discard_new_inode(inode);
 630	} else {
 631		struct dentry *dn;
 632
 633		dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
 634			vino.ino, ceph_ino(dir), dentry->d_name.name);
 635		ceph_dir_clear_ordered(dir);
 636		ceph_init_inode_acls(inode, as_ctx);
 637		if (inode->i_state & I_NEW) {
 638			/*
 639			 * If it's not I_NEW, then someone created this before
 640			 * we got here. Assume the server is aware of it at
 641			 * that point and don't worry about setting
 642			 * CEPH_I_ASYNC_CREATE.
 643			 */
 644			ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
 645			unlock_new_inode(inode);
 646		}
 647		if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
 648			if (!d_unhashed(dentry))
 649				d_drop(dentry);
 650			dn = d_splice_alias(inode, dentry);
 651			WARN_ON_ONCE(dn && dn != dentry);
 652		}
 653		file->f_mode |= FMODE_CREATED;
 654		ret = finish_open(file, dentry, ceph_open);
 655	}
 656	return ret;
 657}
 658
 659/*
 660 * Do a lookup + open with a single request.  If we get a non-existent
 661 * file or symlink, return 1 so the VFS can retry.
 662 */
 663int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 664		     struct file *file, unsigned flags, umode_t mode)
 
 665{
 666	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 667	struct ceph_mds_client *mdsc = fsc->mdsc;
 668	struct ceph_mds_request *req;
 669	struct dentry *dn;
 670	struct ceph_acl_sec_ctx as_ctx = {};
 671	bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
 672	int mask;
 673	int err;
 674
 675	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 676	     dir, dentry, dentry,
 677	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 678
 679	if (dentry->d_name.len > NAME_MAX)
 680		return -ENAMETOOLONG;
 681
 
 
 
 
 682	if (flags & O_CREAT) {
 683		if (ceph_quota_is_max_files_exceeded(dir))
 684			return -EDQUOT;
 685		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
 686		if (err < 0)
 687			return err;
 688		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
 689		if (err < 0)
 690			goto out_ctx;
 691	} else if (!d_in_lookup(dentry)) {
 692		/* If it's not being looked up, it's negative */
 693		return -ENOENT;
 694	}
 695retry:
 696	/* do the open */
 697	req = prepare_open_request(dir->i_sb, flags, mode);
 698	if (IS_ERR(req)) {
 699		err = PTR_ERR(req);
 700		goto out_ctx;
 701	}
 702	req->r_dentry = dget(dentry);
 703	req->r_num_caps = 2;
 704	mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 705	if (ceph_security_xattr_wanted(dir))
 706		mask |= CEPH_CAP_XATTR_SHARED;
 707	req->r_args.open.mask = cpu_to_le32(mask);
 708	req->r_parent = dir;
 709
 710	if (flags & O_CREAT) {
 711		struct ceph_file_layout lo;
 712
 713		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
 714		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 715		if (as_ctx.pagelist) {
 716			req->r_pagelist = as_ctx.pagelist;
 717			as_ctx.pagelist = NULL;
 718		}
 719		if (try_async &&
 720		    (req->r_dir_caps =
 721		      try_prep_async_create(dir, dentry, &lo,
 722					    &req->r_deleg_ino))) {
 723			set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
 724			req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
 725			req->r_callback = ceph_async_create_cb;
 726			err = ceph_mdsc_submit_request(mdsc, dir, req);
 727			if (!err) {
 728				err = ceph_finish_async_create(dir, dentry,
 729							file, mode, req,
 730							&as_ctx, &lo);
 731			} else if (err == -EJUKEBOX) {
 732				restore_deleg_ino(dir, req->r_deleg_ino);
 733				ceph_mdsc_put_request(req);
 734				try_async = false;
 735				goto retry;
 736			}
 737			goto out_req;
 738		}
 739	}
 740
 741	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
 
 
 
 
 
 742	err = ceph_mdsc_do_request(mdsc,
 743				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 744				   req);
 745	err = ceph_handle_snapdir(req, dentry, err);
 746	if (err)
 747		goto out_req;
 748
 749	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 750		err = ceph_handle_notrace_create(dir, dentry);
 751
 752	if (d_in_lookup(dentry)) {
 753		dn = ceph_finish_lookup(req, dentry, err);
 754		if (IS_ERR(dn))
 755			err = PTR_ERR(dn);
 756	} else {
 757		/* we were given a hashed negative dentry */
 758		dn = NULL;
 759	}
 760	if (err)
 761		goto out_req;
 762	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 763		/* make vfs retry on splice, ENOENT, or symlink */
 764		dout("atomic_open finish_no_open on dn %p\n", dn);
 765		err = finish_no_open(file, dn);
 766	} else {
 767		dout("atomic_open finish_open on dn %p\n", dn);
 768		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 769			struct inode *newino = d_inode(dentry);
 770
 771			cache_file_layout(dir, newino);
 772			ceph_init_inode_acls(newino, &as_ctx);
 773			file->f_mode |= FMODE_CREATED;
 774		}
 775		err = finish_open(file, dentry, ceph_open);
 776	}
 777out_req:
 
 
 778	ceph_mdsc_put_request(req);
 779out_ctx:
 780	ceph_release_acl_sec_ctx(&as_ctx);
 781	dout("atomic_open result=%d\n", err);
 782	return err;
 783}
 784
 785int ceph_release(struct inode *inode, struct file *file)
 786{
 787	struct ceph_inode_info *ci = ceph_inode(inode);
 
 788
 789	if (S_ISDIR(inode->i_mode)) {
 790		struct ceph_dir_file_info *dfi = file->private_data;
 791		dout("release inode %p dir file %p\n", inode, file);
 792		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 793
 794		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
 795
 796		if (dfi->last_readdir)
 797			ceph_mdsc_put_request(dfi->last_readdir);
 798		kfree(dfi->last_name);
 799		kfree(dfi->dir_info);
 800		kmem_cache_free(ceph_dir_file_cachep, dfi);
 801	} else {
 802		struct ceph_file_info *fi = file->private_data;
 803		dout("release inode %p regular file %p\n", inode, file);
 804		WARN_ON(!list_empty(&fi->rw_contexts));
 805
 806		ceph_put_fmode(ci, fi->fmode, 1);
 807
 808		kmem_cache_free(ceph_file_cachep, fi);
 809	}
 810
 811	/* wake up anyone waiting for caps on this inode */
 812	wake_up_all(&ci->i_cap_wq);
 813	return 0;
 814}
 815
 816enum {
 817	HAVE_RETRIED = 1,
 818	CHECK_EOF =    2,
 819	READ_INLINE =  3,
 820};
 821
 822/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 823 * Completely synchronous read and write methods.  Direct from __user
 824 * buffer to osd, or directly to user pages (if O_DIRECT).
 825 *
 826 * If the read spans object boundary, just do multiple reads.  (That's not
 827 * atomic, but good enough for now.)
 828 *
 829 * If we get a short result from the OSD, check against i_size; we need to
 830 * only return a short read to the caller if we hit EOF.
 831 */
 832static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 833			      int *retry_op)
 834{
 835	struct file *file = iocb->ki_filp;
 836	struct inode *inode = file_inode(file);
 837	struct ceph_inode_info *ci = ceph_inode(inode);
 838	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 839	struct ceph_osd_client *osdc = &fsc->client->osdc;
 840	ssize_t ret;
 841	u64 off = iocb->ki_pos;
 842	u64 len = iov_iter_count(to);
 
 843
 844	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
 
 845	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 846
 847	if (!len)
 848		return 0;
 849	/*
 850	 * flush any page cache pages in this range.  this
 851	 * will make concurrent normal and sync io slow,
 852	 * but it will at least behave sensibly when they are
 853	 * in sequence.
 854	 */
 855	ret = filemap_write_and_wait_range(inode->i_mapping,
 856					   off, off + len - 1);
 857	if (ret < 0)
 858		return ret;
 859
 860	ret = 0;
 861	while ((len = iov_iter_count(to)) > 0) {
 862		struct ceph_osd_request *req;
 863		struct page **pages;
 864		int num_pages;
 865		size_t page_off;
 866		u64 i_size;
 867		bool more;
 868
 869		req = ceph_osdc_new_request(osdc, &ci->i_layout,
 870					ci->i_vino, off, &len, 0, 1,
 871					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
 872					NULL, ci->i_truncate_seq,
 873					ci->i_truncate_size, false);
 874		if (IS_ERR(req)) {
 875			ret = PTR_ERR(req);
 876			break;
 877		}
 878
 879		more = len < iov_iter_count(to);
 880
 881		if (unlikely(iov_iter_is_pipe(to))) {
 882			ret = iov_iter_get_pages_alloc(to, &pages, len,
 883						       &page_off);
 884			if (ret <= 0) {
 885				ceph_osdc_put_request(req);
 886				ret = -ENOMEM;
 887				break;
 888			}
 889			num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 890			if (ret < len) {
 891				len = ret;
 892				osd_req_op_extent_update(req, 0, len);
 893				more = false;
 894			}
 895		} else {
 896			num_pages = calc_pages_for(off, len);
 897			page_off = off & ~PAGE_MASK;
 898			pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 899			if (IS_ERR(pages)) {
 900				ceph_osdc_put_request(req);
 901				ret = PTR_ERR(pages);
 902				break;
 903			}
 904		}
 905
 906		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
 907						 false, false);
 908		ret = ceph_osdc_start_request(osdc, req, false);
 909		if (!ret)
 910			ret = ceph_osdc_wait_request(osdc, req);
 911
 912		ceph_update_read_latency(&fsc->mdsc->metric,
 913					 req->r_start_latency,
 914					 req->r_end_latency,
 915					 ret);
 916
 917		ceph_osdc_put_request(req);
 918
 919		i_size = i_size_read(inode);
 920		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
 921		     off, len, ret, i_size, (more ? " MORE" : ""));
 922
 923		if (ret == -ENOENT)
 924			ret = 0;
 925		if (ret >= 0 && ret < len && (off + ret < i_size)) {
 926			int zlen = min(len - ret, i_size - off - ret);
 927			int zoff = page_off + ret;
 928			dout("sync_read zero gap %llu~%llu\n",
 929                             off + ret, off + ret + zlen);
 930			ceph_zero_page_vector_range(zoff, zlen, pages);
 931			ret += zlen;
 932		}
 933
 934		if (unlikely(iov_iter_is_pipe(to))) {
 935			if (ret > 0) {
 936				iov_iter_advance(to, ret);
 937				off += ret;
 938			} else {
 939				iov_iter_advance(to, 0);
 940			}
 941			ceph_put_page_vector(pages, num_pages, false);
 942		} else {
 943			int idx = 0;
 944			size_t left = ret > 0 ? ret : 0;
 945			while (left > 0) {
 946				size_t len, copied;
 947				page_off = off & ~PAGE_MASK;
 948				len = min_t(size_t, left, PAGE_SIZE - page_off);
 949				copied = copy_page_to_iter(pages[idx++],
 950							   page_off, len, to);
 951				off += copied;
 952				left -= copied;
 953				if (copied < len) {
 954					ret = -EFAULT;
 955					break;
 956				}
 957			}
 958			ceph_release_page_vector(pages, num_pages);
 959		}
 960
 961		if (ret < 0) {
 962			if (ret == -EBLACKLISTED)
 963				fsc->blacklisted = true;
 964			break;
 965		}
 966
 967		if (off >= i_size || !more)
 968			break;
 969	}
 
 970
 971	if (off > iocb->ki_pos) {
 972		if (ret >= 0 &&
 973		    iov_iter_count(to) > 0 && off >= i_size_read(inode))
 974			*retry_op = CHECK_EOF;
 975		ret = off - iocb->ki_pos;
 976		iocb->ki_pos = off;
 977	}
 978
 979	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
 980	return ret;
 981}
 982
 983struct ceph_aio_request {
 984	struct kiocb *iocb;
 985	size_t total_len;
 986	bool write;
 987	bool should_dirty;
 988	int error;
 989	struct list_head osd_reqs;
 990	unsigned num_reqs;
 991	atomic_t pending_reqs;
 992	struct timespec64 mtime;
 993	struct ceph_cap_flush *prealloc_cf;
 994};
 995
 996struct ceph_aio_work {
 997	struct work_struct work;
 998	struct ceph_osd_request *req;
 999};
1000
1001static void ceph_aio_retry_work(struct work_struct *work);
1002
1003static void ceph_aio_complete(struct inode *inode,
1004			      struct ceph_aio_request *aio_req)
1005{
1006	struct ceph_inode_info *ci = ceph_inode(inode);
1007	int ret;
1008
1009	if (!atomic_dec_and_test(&aio_req->pending_reqs))
1010		return;
1011
1012	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1013		inode_dio_end(inode);
1014
1015	ret = aio_req->error;
1016	if (!ret)
1017		ret = aio_req->total_len;
1018
1019	dout("ceph_aio_complete %p rc %d\n", inode, ret);
1020
1021	if (ret >= 0 && aio_req->write) {
1022		int dirty;
1023
1024		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1025		if (endoff > i_size_read(inode)) {
1026			if (ceph_inode_set_size(inode, endoff))
1027				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1028		}
1029
1030		spin_lock(&ci->i_ceph_lock);
1031		ci->i_inline_version = CEPH_INLINE_NONE;
1032		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1033					       &aio_req->prealloc_cf);
1034		spin_unlock(&ci->i_ceph_lock);
1035		if (dirty)
1036			__mark_inode_dirty(inode, dirty);
1037
1038	}
1039
1040	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1041						CEPH_CAP_FILE_RD));
1042
1043	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
1044
1045	ceph_free_cap_flush(aio_req->prealloc_cf);
1046	kfree(aio_req);
1047}
1048
1049static void ceph_aio_complete_req(struct ceph_osd_request *req)
 
1050{
1051	int rc = req->r_result;
1052	struct inode *inode = req->r_inode;
1053	struct ceph_aio_request *aio_req = req->r_priv;
1054	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1055	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1056	struct ceph_client_metric *metric = &fsc->mdsc->metric;
1057
1058	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1059	BUG_ON(!osd_data->num_bvecs);
1060
1061	dout("ceph_aio_complete_req %p rc %d bytes %u\n",
1062	     inode, rc, osd_data->bvec_pos.iter.bi_size);
1063
1064	/* r_start_latency == 0 means the request was not submitted */
1065	if (req->r_start_latency) {
1066		if (aio_req->write)
1067			ceph_update_write_latency(metric, req->r_start_latency,
1068						  req->r_end_latency, rc);
1069		else
1070			ceph_update_read_latency(metric, req->r_start_latency,
1071						 req->r_end_latency, rc);
1072	}
1073
1074	if (rc == -EOLDSNAPC) {
1075		struct ceph_aio_work *aio_work;
1076		BUG_ON(!aio_req->write);
1077
1078		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1079		if (aio_work) {
1080			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1081			aio_work->req = req;
1082			queue_work(ceph_inode_to_client(inode)->inode_wq,
1083				   &aio_work->work);
1084			return;
1085		}
1086		rc = -ENOMEM;
1087	} else if (!aio_req->write) {
1088		if (rc == -ENOENT)
1089			rc = 0;
1090		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
1091			struct iov_iter i;
1092			int zlen = osd_data->bvec_pos.iter.bi_size - rc;
1093
1094			/*
1095			 * If read is satisfied by single OSD request,
1096			 * it can pass EOF. Otherwise read is within
1097			 * i_size.
1098			 */
1099			if (aio_req->num_reqs == 1) {
1100				loff_t i_size = i_size_read(inode);
1101				loff_t endoff = aio_req->iocb->ki_pos + rc;
1102				if (endoff < i_size)
1103					zlen = min_t(size_t, zlen,
1104						     i_size - endoff);
1105				aio_req->total_len = rc + zlen;
1106			}
1107
1108			iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1109				      osd_data->num_bvecs,
1110				      osd_data->bvec_pos.iter.bi_size);
1111			iov_iter_advance(&i, rc);
1112			iov_iter_zero(zlen, &i);
1113		}
1114	}
1115
1116	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1117		  aio_req->should_dirty);
1118	ceph_osdc_put_request(req);
1119
1120	if (rc < 0)
1121		cmpxchg(&aio_req->error, 0, rc);
1122
1123	ceph_aio_complete(inode, aio_req);
1124	return;
1125}
1126
1127static void ceph_aio_retry_work(struct work_struct *work)
1128{
1129	struct ceph_aio_work *aio_work =
1130		container_of(work, struct ceph_aio_work, work);
1131	struct ceph_osd_request *orig_req = aio_work->req;
1132	struct ceph_aio_request *aio_req = orig_req->r_priv;
1133	struct inode *inode = orig_req->r_inode;
1134	struct ceph_inode_info *ci = ceph_inode(inode);
1135	struct ceph_snap_context *snapc;
1136	struct ceph_osd_request *req;
1137	int ret;
1138
1139	spin_lock(&ci->i_ceph_lock);
1140	if (__ceph_have_pending_cap_snap(ci)) {
1141		struct ceph_cap_snap *capsnap =
1142			list_last_entry(&ci->i_cap_snaps,
1143					struct ceph_cap_snap,
1144					ci_item);
1145		snapc = ceph_get_snap_context(capsnap->context);
1146	} else {
1147		BUG_ON(!ci->i_head_snapc);
1148		snapc = ceph_get_snap_context(ci->i_head_snapc);
1149	}
1150	spin_unlock(&ci->i_ceph_lock);
1151
1152	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1153			false, GFP_NOFS);
1154	if (!req) {
1155		ret = -ENOMEM;
1156		req = orig_req;
1157		goto out;
1158	}
1159
1160	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1161	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1162	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 
 
1163
1164	req->r_ops[0] = orig_req->r_ops[0];
 
1165
1166	req->r_mtime = aio_req->mtime;
1167	req->r_data_offset = req->r_ops[0].extent.offset;
1168
1169	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1170	if (ret) {
1171		ceph_osdc_put_request(req);
1172		req = orig_req;
1173		goto out;
1174	}
1175
1176	ceph_osdc_put_request(orig_req);
1177
1178	req->r_callback = ceph_aio_complete_req;
1179	req->r_inode = inode;
1180	req->r_priv = aio_req;
1181
1182	ret = ceph_osdc_start_request(req->r_osdc, req, false);
1183out:
1184	if (ret < 0) {
1185		req->r_result = ret;
1186		ceph_aio_complete_req(req);
1187	}
1188
1189	ceph_put_snap_context(snapc);
1190	kfree(aio_work);
1191}
1192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193static ssize_t
1194ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1195		       struct ceph_snap_context *snapc,
1196		       struct ceph_cap_flush **pcf)
1197{
1198	struct file *file = iocb->ki_filp;
1199	struct inode *inode = file_inode(file);
1200	struct ceph_inode_info *ci = ceph_inode(inode);
1201	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1202	struct ceph_client_metric *metric = &fsc->mdsc->metric;
1203	struct ceph_vino vino;
1204	struct ceph_osd_request *req;
1205	struct bio_vec *bvecs;
1206	struct ceph_aio_request *aio_req = NULL;
1207	int num_pages = 0;
1208	int flags;
1209	int ret = 0;
1210	struct timespec64 mtime = current_time(inode);
1211	size_t count = iov_iter_count(iter);
1212	loff_t pos = iocb->ki_pos;
1213	bool write = iov_iter_rw(iter) == WRITE;
1214	bool should_dirty = !write && iter_is_iovec(iter);
1215
1216	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1217		return -EROFS;
1218
1219	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1220	     (write ? "write" : "read"), file, pos, (unsigned)count,
1221	     snapc, snapc ? snapc->seq : 0);
 
 
 
1222
1223	if (write) {
1224		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1225					pos >> PAGE_SHIFT,
1226					(pos + count - 1) >> PAGE_SHIFT);
1227		if (ret2 < 0)
1228			dout("invalidate_inode_pages2_range returned %d\n", ret2);
1229
1230		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
1231	} else {
1232		flags = CEPH_OSD_FLAG_READ;
1233	}
1234
1235	while (iov_iter_count(iter) > 0) {
1236		u64 size = iov_iter_count(iter);
 
1237		ssize_t len;
1238
1239		if (write)
1240			size = min_t(u64, size, fsc->mount_options->wsize);
1241		else
1242			size = min_t(u64, size, fsc->mount_options->rsize);
1243
1244		vino = ceph_vino(inode);
1245		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1246					    vino, pos, &size, 0,
1247					    1,
 
1248					    write ? CEPH_OSD_OP_WRITE :
1249						    CEPH_OSD_OP_READ,
1250					    flags, snapc,
1251					    ci->i_truncate_seq,
1252					    ci->i_truncate_size,
1253					    false);
1254		if (IS_ERR(req)) {
1255			ret = PTR_ERR(req);
1256			break;
1257		}
1258
1259		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1260		if (len < 0) {
 
1261			ceph_osdc_put_request(req);
1262			ret = len;
1263			break;
1264		}
1265		if (len != size)
1266			osd_req_op_extent_update(req, 0, len);
1267
1268		/*
1269		 * To simplify error handling, allow AIO when IO within i_size
1270		 * or IO can be satisfied by single OSD request.
1271		 */
1272		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1273		    (len == count || pos + count <= i_size_read(inode))) {
1274			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1275			if (aio_req) {
1276				aio_req->iocb = iocb;
1277				aio_req->write = write;
1278				aio_req->should_dirty = should_dirty;
1279				INIT_LIST_HEAD(&aio_req->osd_reqs);
1280				if (write) {
1281					aio_req->mtime = mtime;
1282					swap(aio_req->prealloc_cf, *pcf);
1283				}
1284			}
1285			/* ignore error */
1286		}
1287
1288		if (write) {
1289			/*
1290			 * throw out any page cache pages in this range. this
1291			 * may block.
1292			 */
1293			truncate_inode_pages_range(inode->i_mapping, pos,
1294						   PAGE_ALIGN(pos + len) - 1);
1295
1296			req->r_mtime = mtime;
1297		}
1298
1299		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
 
 
 
 
1300
1301		if (aio_req) {
1302			aio_req->total_len += len;
1303			aio_req->num_reqs++;
1304			atomic_inc(&aio_req->pending_reqs);
1305
1306			req->r_callback = ceph_aio_complete_req;
1307			req->r_inode = inode;
1308			req->r_priv = aio_req;
1309			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1310
1311			pos += len;
 
1312			continue;
1313		}
1314
1315		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1316		if (!ret)
1317			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1318
1319		if (write)
1320			ceph_update_write_latency(metric, req->r_start_latency,
1321						  req->r_end_latency, ret);
1322		else
1323			ceph_update_read_latency(metric, req->r_start_latency,
1324						 req->r_end_latency, ret);
1325
1326		size = i_size_read(inode);
1327		if (!write) {
1328			if (ret == -ENOENT)
1329				ret = 0;
1330			if (ret >= 0 && ret < len && pos + ret < size) {
1331				struct iov_iter i;
1332				int zlen = min_t(size_t, len - ret,
1333						 size - pos - ret);
1334
1335				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1336				iov_iter_advance(&i, ret);
1337				iov_iter_zero(zlen, &i);
1338				ret += zlen;
1339			}
1340			if (ret >= 0)
1341				len = ret;
1342		}
1343
1344		put_bvecs(bvecs, num_pages, should_dirty);
 
1345		ceph_osdc_put_request(req);
1346		if (ret < 0)
1347			break;
1348
1349		pos += len;
 
 
1350		if (!write && pos >= size)
1351			break;
1352
1353		if (write && pos > size) {
1354			if (ceph_inode_set_size(inode, pos))
1355				ceph_check_caps(ceph_inode(inode),
1356						CHECK_CAPS_AUTHONLY,
1357						NULL);
1358		}
1359	}
1360
1361	if (aio_req) {
1362		LIST_HEAD(osd_reqs);
1363
1364		if (aio_req->num_reqs == 0) {
1365			kfree(aio_req);
1366			return ret;
1367		}
1368
1369		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1370					      CEPH_CAP_FILE_RD);
1371
1372		list_splice(&aio_req->osd_reqs, &osd_reqs);
1373		inode_dio_begin(inode);
1374		while (!list_empty(&osd_reqs)) {
1375			req = list_first_entry(&osd_reqs,
1376					       struct ceph_osd_request,
1377					       r_private_item);
1378			list_del_init(&req->r_private_item);
1379			if (ret >= 0)
1380				ret = ceph_osdc_start_request(req->r_osdc,
1381							      req, false);
1382			if (ret < 0) {
1383				req->r_result = ret;
1384				ceph_aio_complete_req(req);
1385			}
1386		}
1387		return -EIOCBQUEUED;
1388	}
1389
1390	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1391		ret = pos - iocb->ki_pos;
1392		iocb->ki_pos = pos;
1393	}
1394	return ret;
1395}
1396
1397/*
1398 * Synchronous write, straight from __user pointer or user pages.
1399 *
1400 * If write spans object boundary, just do multiple writes.  (For a
1401 * correct atomic write, we should e.g. take write locks on all
1402 * objects, rollback on failure, etc.)
1403 */
1404static ssize_t
1405ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1406		struct ceph_snap_context *snapc)
1407{
1408	struct file *file = iocb->ki_filp;
1409	struct inode *inode = file_inode(file);
1410	struct ceph_inode_info *ci = ceph_inode(inode);
1411	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1412	struct ceph_vino vino;
1413	struct ceph_osd_request *req;
1414	struct page **pages;
1415	u64 len;
1416	int num_pages;
1417	int written = 0;
1418	int flags;
 
1419	int ret;
1420	bool check_caps = false;
1421	struct timespec64 mtime = current_time(inode);
1422	size_t count = iov_iter_count(from);
1423
1424	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1425		return -EROFS;
1426
1427	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1428	     file, pos, (unsigned)count, snapc, snapc->seq);
1429
1430	ret = filemap_write_and_wait_range(inode->i_mapping,
1431					   pos, pos + count - 1);
1432	if (ret < 0)
1433		return ret;
1434
1435	ret = invalidate_inode_pages2_range(inode->i_mapping,
1436					    pos >> PAGE_SHIFT,
1437					    (pos + count - 1) >> PAGE_SHIFT);
1438	if (ret < 0)
1439		dout("invalidate_inode_pages2_range returned %d\n", ret);
1440
1441	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
 
1442
1443	while ((len = iov_iter_count(from)) > 0) {
1444		size_t left;
1445		int n;
1446
1447		vino = ceph_vino(inode);
1448		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1449					    vino, pos, &len, 0, 1,
1450					    CEPH_OSD_OP_WRITE, flags, snapc,
1451					    ci->i_truncate_seq,
1452					    ci->i_truncate_size,
1453					    false);
1454		if (IS_ERR(req)) {
1455			ret = PTR_ERR(req);
1456			break;
1457		}
1458
1459		/*
1460		 * write from beginning of first page,
1461		 * regardless of io alignment
1462		 */
1463		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1464
1465		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1466		if (IS_ERR(pages)) {
1467			ret = PTR_ERR(pages);
1468			goto out;
1469		}
1470
1471		left = len;
1472		for (n = 0; n < num_pages; n++) {
1473			size_t plen = min_t(size_t, left, PAGE_SIZE);
1474			ret = copy_page_from_iter(pages[n], 0, plen, from);
1475			if (ret != plen) {
1476				ret = -EFAULT;
1477				break;
1478			}
1479			left -= ret;
1480		}
1481
1482		if (ret < 0) {
1483			ceph_release_page_vector(pages, num_pages);
1484			goto out;
1485		}
1486
 
 
1487		req->r_inode = inode;
1488
1489		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1490						false, true);
1491
1492		req->r_mtime = mtime;
 
 
1493		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1494		if (!ret)
1495			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1496
1497		ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
1498					  req->r_end_latency, ret);
1499out:
1500		ceph_osdc_put_request(req);
1501		if (ret != 0) {
1502			ceph_set_error_write(ci);
 
 
 
 
 
 
 
 
 
 
1503			break;
1504		}
1505
1506		ceph_clear_error_write(ci);
1507		pos += len;
1508		written += len;
1509		if (pos > i_size_read(inode)) {
1510			check_caps = ceph_inode_set_size(inode, pos);
1511			if (check_caps)
1512				ceph_check_caps(ceph_inode(inode),
1513						CHECK_CAPS_AUTHONLY,
1514						NULL);
1515		}
1516
1517	}
1518
1519	if (ret != -EOLDSNAPC && written > 0) {
1520		ret = written;
1521		iocb->ki_pos = pos;
1522	}
1523	return ret;
1524}
1525
1526/*
1527 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1528 * Atomically grab references, so that those bits are not released
1529 * back to the MDS mid-read.
1530 *
1531 * Hmm, the sync read case isn't actually async... should it be?
1532 */
1533static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1534{
1535	struct file *filp = iocb->ki_filp;
1536	struct ceph_file_info *fi = filp->private_data;
1537	size_t len = iov_iter_count(to);
1538	struct inode *inode = file_inode(filp);
1539	struct ceph_inode_info *ci = ceph_inode(inode);
1540	struct page *pinned_page = NULL;
1541	bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1542	ssize_t ret;
1543	int want, got = 0;
1544	int retry_op = 0, read = 0;
1545
1546again:
1547	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1548	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1549
1550	if (direct_lock)
1551		ceph_start_io_direct(inode);
1552	else
1553		ceph_start_io_read(inode);
1554
1555	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1556		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1557	else
1558		want = CEPH_CAP_FILE_CACHE;
1559	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
1560			    &got, &pinned_page);
1561	if (ret < 0) {
1562		if (iocb->ki_flags & IOCB_DIRECT)
1563			ceph_end_io_direct(inode);
1564		else
1565			ceph_end_io_read(inode);
1566		return ret;
1567	}
1568
1569	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1570	    (iocb->ki_flags & IOCB_DIRECT) ||
1571	    (fi->flags & CEPH_F_SYNC)) {
1572
1573		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1574		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1575		     ceph_cap_string(got));
1576
1577		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1578			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1579				ret = ceph_direct_read_write(iocb, to,
1580							     NULL, NULL);
1581				if (ret >= 0 && ret < len)
1582					retry_op = CHECK_EOF;
1583			} else {
1584				ret = ceph_sync_read(iocb, to, &retry_op);
1585			}
1586		} else {
1587			retry_op = READ_INLINE;
1588		}
1589	} else {
1590		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1591		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1592		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1593		     ceph_cap_string(got));
1594		ceph_add_rw_context(fi, &rw_ctx);
1595		ret = generic_file_read_iter(iocb, to);
1596		ceph_del_rw_context(fi, &rw_ctx);
1597	}
1598
1599	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1600	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1601	if (pinned_page) {
1602		put_page(pinned_page);
1603		pinned_page = NULL;
1604	}
1605	ceph_put_cap_refs(ci, got);
1606
1607	if (direct_lock)
1608		ceph_end_io_direct(inode);
1609	else
1610		ceph_end_io_read(inode);
1611
1612	if (retry_op > HAVE_RETRIED && ret >= 0) {
1613		int statret;
1614		struct page *page = NULL;
1615		loff_t i_size;
1616		if (retry_op == READ_INLINE) {
1617			page = __page_cache_alloc(GFP_KERNEL);
1618			if (!page)
1619				return -ENOMEM;
1620		}
1621
1622		statret = __ceph_do_getattr(inode, page,
1623					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1624		if (statret < 0) {
1625			if (page)
1626				__free_page(page);
1627			if (statret == -ENODATA) {
1628				BUG_ON(retry_op != READ_INLINE);
1629				goto again;
1630			}
1631			return statret;
1632		}
1633
1634		i_size = i_size_read(inode);
1635		if (retry_op == READ_INLINE) {
1636			BUG_ON(ret > 0 || read > 0);
1637			if (iocb->ki_pos < i_size &&
1638			    iocb->ki_pos < PAGE_SIZE) {
1639				loff_t end = min_t(loff_t, i_size,
1640						   iocb->ki_pos + len);
1641				end = min_t(loff_t, end, PAGE_SIZE);
1642				if (statret < end)
1643					zero_user_segment(page, statret, end);
1644				ret = copy_page_to_iter(page,
1645						iocb->ki_pos & ~PAGE_MASK,
1646						end - iocb->ki_pos, to);
1647				iocb->ki_pos += ret;
1648				read += ret;
1649			}
1650			if (iocb->ki_pos < i_size && read < len) {
1651				size_t zlen = min_t(size_t, len - read,
1652						    i_size - iocb->ki_pos);
1653				ret = iov_iter_zero(zlen, to);
1654				iocb->ki_pos += ret;
1655				read += ret;
1656			}
1657			__free_pages(page, 0);
1658			return read;
1659		}
1660
1661		/* hit EOF or hole? */
1662		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1663		    ret < len) {
1664			dout("sync_read hit hole, ppos %lld < size %lld"
1665			     ", reading more\n", iocb->ki_pos, i_size);
1666
1667			read += ret;
1668			len -= ret;
1669			retry_op = HAVE_RETRIED;
1670			goto again;
1671		}
1672	}
1673
1674	if (ret >= 0)
1675		ret += read;
1676
1677	return ret;
1678}
1679
1680/*
1681 * Take cap references to avoid releasing caps to MDS mid-write.
1682 *
1683 * If we are synchronous, and write with an old snap context, the OSD
1684 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1685 * dropping our cap refs and allowing the pending snap to logically
1686 * complete _before_ this write occurs.
1687 *
1688 * If we are near ENOSPC, write synchronously.
1689 */
1690static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1691{
1692	struct file *file = iocb->ki_filp;
1693	struct ceph_file_info *fi = file->private_data;
1694	struct inode *inode = file_inode(file);
1695	struct ceph_inode_info *ci = ceph_inode(inode);
1696	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1697	struct ceph_osd_client *osdc = &fsc->client->osdc;
1698	struct ceph_cap_flush *prealloc_cf;
1699	ssize_t count, written = 0;
1700	int err, want, got;
1701	bool direct_lock = false;
1702	u32 map_flags;
1703	u64 pool_flags;
1704	loff_t pos;
1705	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1706
1707	if (ceph_snap(inode) != CEPH_NOSNAP)
1708		return -EROFS;
1709
1710	prealloc_cf = ceph_alloc_cap_flush();
1711	if (!prealloc_cf)
1712		return -ENOMEM;
1713
1714	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1715		direct_lock = true;
1716
1717retry_snap:
1718	if (direct_lock)
1719		ceph_start_io_direct(inode);
1720	else
1721		ceph_start_io_write(inode);
1722
1723	/* We can write back this queue in page reclaim */
1724	current->backing_dev_info = inode_to_bdi(inode);
1725
1726	if (iocb->ki_flags & IOCB_APPEND) {
1727		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1728		if (err < 0)
1729			goto out;
1730	}
1731
1732	err = generic_write_checks(iocb, from);
1733	if (err <= 0)
1734		goto out;
1735
1736	pos = iocb->ki_pos;
1737	if (unlikely(pos >= limit)) {
1738		err = -EFBIG;
1739		goto out;
1740	} else {
1741		iov_iter_truncate(from, limit - pos);
1742	}
1743
1744	count = iov_iter_count(from);
1745	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1746		err = -EDQUOT;
1747		goto out;
1748	}
1749
1750	err = file_remove_privs(file);
1751	if (err)
1752		goto out;
1753
1754	err = file_update_time(file);
1755	if (err)
1756		goto out;
1757
1758	inode_inc_iversion_raw(inode);
1759
1760	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1761		err = ceph_uninline_data(file, NULL);
1762		if (err < 0)
1763			goto out;
1764	}
1765
1766	down_read(&osdc->lock);
1767	map_flags = osdc->osdmap->flags;
1768	pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1769	up_read(&osdc->lock);
1770	if ((map_flags & CEPH_OSDMAP_FULL) ||
1771	    (pool_flags & CEPH_POOL_FLAG_FULL)) {
1772		err = -ENOSPC;
1773		goto out;
1774	}
1775
1776	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1777	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1778	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1779		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1780	else
1781		want = CEPH_CAP_FILE_BUFFER;
1782	got = 0;
1783	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
1784			    &got, NULL);
1785	if (err < 0)
1786		goto out;
1787
1788	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1789	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1790
1791	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1792	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1793	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1794		struct ceph_snap_context *snapc;
1795		struct iov_iter data;
 
1796
1797		spin_lock(&ci->i_ceph_lock);
1798		if (__ceph_have_pending_cap_snap(ci)) {
1799			struct ceph_cap_snap *capsnap =
1800					list_last_entry(&ci->i_cap_snaps,
1801							struct ceph_cap_snap,
1802							ci_item);
1803			snapc = ceph_get_snap_context(capsnap->context);
1804		} else {
1805			BUG_ON(!ci->i_head_snapc);
1806			snapc = ceph_get_snap_context(ci->i_head_snapc);
1807		}
1808		spin_unlock(&ci->i_ceph_lock);
1809
1810		/* we might need to revert back to that point */
1811		data = *from;
1812		if (iocb->ki_flags & IOCB_DIRECT)
1813			written = ceph_direct_read_write(iocb, &data, snapc,
1814							 &prealloc_cf);
1815		else
1816			written = ceph_sync_write(iocb, &data, pos, snapc);
1817		if (direct_lock)
1818			ceph_end_io_direct(inode);
1819		else
1820			ceph_end_io_write(inode);
 
 
 
 
1821		if (written > 0)
1822			iov_iter_advance(from, written);
1823		ceph_put_snap_context(snapc);
1824	} else {
 
1825		/*
1826		 * No need to acquire the i_truncate_mutex. Because
1827		 * the MDS revokes Fwb caps before sending truncate
1828		 * message to us. We can't get Fwb cap while there
1829		 * are pending vmtruncate. So write and vmtruncate
1830		 * can not run at the same time
1831		 */
1832		written = generic_perform_write(file, from, pos);
1833		if (likely(written >= 0))
1834			iocb->ki_pos = pos + written;
1835		ceph_end_io_write(inode);
 
 
1836	}
1837
1838	if (written >= 0) {
1839		int dirty;
1840
1841		spin_lock(&ci->i_ceph_lock);
1842		ci->i_inline_version = CEPH_INLINE_NONE;
1843		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1844					       &prealloc_cf);
1845		spin_unlock(&ci->i_ceph_lock);
1846		if (dirty)
1847			__mark_inode_dirty(inode, dirty);
1848		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1849			ceph_check_caps(ci, 0, NULL);
1850	}
1851
1852	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1853	     inode, ceph_vinop(inode), pos, (unsigned)count,
1854	     ceph_cap_string(got));
1855	ceph_put_cap_refs(ci, got);
1856
1857	if (written == -EOLDSNAPC) {
1858		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1859		     inode, ceph_vinop(inode), pos, (unsigned)count);
1860		goto retry_snap;
 
 
1861	}
1862
1863	if (written >= 0) {
1864		if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1865		    (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1866			iocb->ki_flags |= IOCB_DSYNC;
1867		written = generic_write_sync(iocb, written);
1868	}
1869
1870	goto out_unlocked;
1871out:
1872	if (direct_lock)
1873		ceph_end_io_direct(inode);
1874	else
1875		ceph_end_io_write(inode);
1876out_unlocked:
1877	ceph_free_cap_flush(prealloc_cf);
1878	current->backing_dev_info = NULL;
1879	return written ? written : err;
1880}
1881
1882/*
1883 * llseek.  be sure to verify file size on SEEK_END.
1884 */
1885static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1886{
1887	struct inode *inode = file->f_mapping->host;
1888	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1889	loff_t i_size;
1890	loff_t ret;
1891
1892	inode_lock(inode);
1893
1894	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1895		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1896		if (ret < 0)
 
1897			goto out;
 
1898	}
1899
1900	i_size = i_size_read(inode);
1901	switch (whence) {
1902	case SEEK_END:
1903		offset += i_size;
1904		break;
1905	case SEEK_CUR:
1906		/*
1907		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1908		 * position-querying operation.  Avoid rewriting the "same"
1909		 * f_pos value back to the file because a concurrent read(),
1910		 * write() or lseek() might have altered it
1911		 */
1912		if (offset == 0) {
1913			ret = file->f_pos;
1914			goto out;
1915		}
1916		offset += file->f_pos;
1917		break;
1918	case SEEK_DATA:
1919		if (offset < 0 || offset >= i_size) {
1920			ret = -ENXIO;
1921			goto out;
1922		}
1923		break;
1924	case SEEK_HOLE:
1925		if (offset < 0 || offset >= i_size) {
1926			ret = -ENXIO;
1927			goto out;
1928		}
1929		offset = i_size;
1930		break;
1931	}
1932
1933	ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1934
1935out:
1936	inode_unlock(inode);
1937	return ret;
1938}
1939
1940static inline void ceph_zero_partial_page(
1941	struct inode *inode, loff_t offset, unsigned size)
1942{
1943	struct page *page;
1944	pgoff_t index = offset >> PAGE_SHIFT;
1945
1946	page = find_lock_page(inode->i_mapping, index);
1947	if (page) {
1948		wait_on_page_writeback(page);
1949		zero_user(page, offset & (PAGE_SIZE - 1), size);
1950		unlock_page(page);
1951		put_page(page);
1952	}
1953}
1954
1955static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1956				      loff_t length)
1957{
1958	loff_t nearly = round_up(offset, PAGE_SIZE);
1959	if (offset < nearly) {
1960		loff_t size = nearly - offset;
1961		if (length < size)
1962			size = length;
1963		ceph_zero_partial_page(inode, offset, size);
1964		offset += size;
1965		length -= size;
1966	}
1967	if (length >= PAGE_SIZE) {
1968		loff_t size = round_down(length, PAGE_SIZE);
1969		truncate_pagecache_range(inode, offset, offset + size - 1);
1970		offset += size;
1971		length -= size;
1972	}
1973	if (length)
1974		ceph_zero_partial_page(inode, offset, length);
1975}
1976
1977static int ceph_zero_partial_object(struct inode *inode,
1978				    loff_t offset, loff_t *length)
1979{
1980	struct ceph_inode_info *ci = ceph_inode(inode);
1981	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1982	struct ceph_osd_request *req;
1983	int ret = 0;
1984	loff_t zero = 0;
1985	int op;
1986
1987	if (!length) {
1988		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1989		length = &zero;
1990	} else {
1991		op = CEPH_OSD_OP_ZERO;
1992	}
1993
1994	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1995					ceph_vino(inode),
1996					offset, length,
1997					0, 1, op,
1998					CEPH_OSD_FLAG_WRITE,
 
1999					NULL, 0, 0, false);
2000	if (IS_ERR(req)) {
2001		ret = PTR_ERR(req);
2002		goto out;
2003	}
2004
2005	req->r_mtime = inode->i_mtime;
 
 
2006	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
2007	if (!ret) {
2008		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2009		if (ret == -ENOENT)
2010			ret = 0;
2011	}
2012	ceph_osdc_put_request(req);
2013
2014out:
2015	return ret;
2016}
2017
2018static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2019{
2020	int ret = 0;
2021	struct ceph_inode_info *ci = ceph_inode(inode);
2022	s32 stripe_unit = ci->i_layout.stripe_unit;
2023	s32 stripe_count = ci->i_layout.stripe_count;
2024	s32 object_size = ci->i_layout.object_size;
2025	u64 object_set_size = object_size * stripe_count;
2026	u64 nearly, t;
2027
2028	/* round offset up to next period boundary */
2029	nearly = offset + object_set_size - 1;
2030	t = nearly;
2031	nearly -= do_div(t, object_set_size);
2032
2033	while (length && offset < nearly) {
2034		loff_t size = length;
2035		ret = ceph_zero_partial_object(inode, offset, &size);
2036		if (ret < 0)
2037			return ret;
2038		offset += size;
2039		length -= size;
2040	}
2041	while (length >= object_set_size) {
2042		int i;
2043		loff_t pos = offset;
2044		for (i = 0; i < stripe_count; ++i) {
2045			ret = ceph_zero_partial_object(inode, pos, NULL);
2046			if (ret < 0)
2047				return ret;
2048			pos += stripe_unit;
2049		}
2050		offset += object_set_size;
2051		length -= object_set_size;
2052	}
2053	while (length) {
2054		loff_t size = length;
2055		ret = ceph_zero_partial_object(inode, offset, &size);
2056		if (ret < 0)
2057			return ret;
2058		offset += size;
2059		length -= size;
2060	}
2061	return ret;
2062}
2063
2064static long ceph_fallocate(struct file *file, int mode,
2065				loff_t offset, loff_t length)
2066{
2067	struct ceph_file_info *fi = file->private_data;
2068	struct inode *inode = file_inode(file);
2069	struct ceph_inode_info *ci = ceph_inode(inode);
 
 
2070	struct ceph_cap_flush *prealloc_cf;
2071	int want, got = 0;
2072	int dirty;
2073	int ret = 0;
2074	loff_t endoff = 0;
2075	loff_t size;
2076
2077	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2078		return -EOPNOTSUPP;
2079
2080	if (!S_ISREG(inode->i_mode))
2081		return -EOPNOTSUPP;
2082
2083	prealloc_cf = ceph_alloc_cap_flush();
2084	if (!prealloc_cf)
2085		return -ENOMEM;
2086
2087	inode_lock(inode);
2088
2089	if (ceph_snap(inode) != CEPH_NOSNAP) {
2090		ret = -EROFS;
2091		goto unlock;
2092	}
2093
 
 
 
 
 
 
2094	if (ci->i_inline_version != CEPH_INLINE_NONE) {
2095		ret = ceph_uninline_data(file, NULL);
2096		if (ret < 0)
2097			goto unlock;
2098	}
2099
2100	size = i_size_read(inode);
2101
2102	/* Are we punching a hole beyond EOF? */
2103	if (offset >= size)
2104		goto unlock;
2105	if ((offset + length) > size)
2106		length = size - offset;
2107
2108	if (fi->fmode & CEPH_FILE_MODE_LAZY)
2109		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2110	else
2111		want = CEPH_CAP_FILE_BUFFER;
2112
2113	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
2114	if (ret < 0)
2115		goto unlock;
2116
2117	ceph_zero_pagecache_range(inode, offset, length);
2118	ret = ceph_zero_objects(inode, offset, length);
 
 
 
 
 
 
 
 
2119
2120	if (!ret) {
2121		spin_lock(&ci->i_ceph_lock);
2122		ci->i_inline_version = CEPH_INLINE_NONE;
2123		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2124					       &prealloc_cf);
2125		spin_unlock(&ci->i_ceph_lock);
2126		if (dirty)
2127			__mark_inode_dirty(inode, dirty);
2128	}
2129
2130	ceph_put_cap_refs(ci, got);
2131unlock:
2132	inode_unlock(inode);
2133	ceph_free_cap_flush(prealloc_cf);
2134	return ret;
2135}
2136
2137/*
2138 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2139 * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2140 * this fails; zero is returned on success.
2141 */
2142static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2143			  struct file *dst_filp,
2144			  loff_t dst_endoff, int *dst_got)
2145{
2146	int ret = 0;
2147	bool retrying = false;
2148
2149retry_caps:
2150	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2151			    dst_endoff, dst_got, NULL);
2152	if (ret < 0)
2153		return ret;
2154
2155	/*
2156	 * Since we're already holding the FILE_WR capability for the dst file,
2157	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2158	 * retry dance instead to try to get both capabilities.
2159	 */
2160	ret = ceph_try_get_caps(file_inode(src_filp),
2161				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2162				false, src_got);
2163	if (ret <= 0) {
2164		/* Start by dropping dst_ci caps and getting src_ci caps */
2165		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2166		if (retrying) {
2167			if (!ret)
2168				/* ceph_try_get_caps masks EAGAIN */
2169				ret = -EAGAIN;
2170			return ret;
2171		}
2172		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2173				    CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
2174		if (ret < 0)
2175			return ret;
2176		/*... drop src_ci caps too, and retry */
2177		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2178		retrying = true;
2179		goto retry_caps;
2180	}
2181	return ret;
2182}
2183
2184static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2185			   struct ceph_inode_info *dst_ci, int dst_got)
2186{
2187	ceph_put_cap_refs(src_ci, src_got);
2188	ceph_put_cap_refs(dst_ci, dst_got);
2189}
2190
2191/*
2192 * This function does several size-related checks, returning an error if:
2193 *  - source file is smaller than off+len
2194 *  - destination file size is not OK (inode_newsize_ok())
2195 *  - max bytes quotas is exceeded
2196 */
2197static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2198			   loff_t src_off, loff_t dst_off, size_t len)
2199{
2200	loff_t size, endoff;
2201
2202	size = i_size_read(src_inode);
2203	/*
2204	 * Don't copy beyond source file EOF.  Instead of simply setting length
2205	 * to (size - src_off), just drop to VFS default implementation, as the
2206	 * local i_size may be stale due to other clients writing to the source
2207	 * inode.
2208	 */
2209	if (src_off + len > size) {
2210		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2211		     src_off, len, size);
2212		return -EOPNOTSUPP;
2213	}
2214	size = i_size_read(dst_inode);
2215
2216	endoff = dst_off + len;
2217	if (inode_newsize_ok(dst_inode, endoff))
2218		return -EOPNOTSUPP;
2219
2220	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2221		return -EDQUOT;
2222
2223	return 0;
2224}
2225
2226static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2227				    struct ceph_inode_info *dst_ci, u64 *dst_off,
2228				    struct ceph_fs_client *fsc,
2229				    size_t len, unsigned int flags)
2230{
2231	struct ceph_object_locator src_oloc, dst_oloc;
2232	struct ceph_object_id src_oid, dst_oid;
2233	size_t bytes = 0;
2234	u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2235	u32 src_objlen, dst_objlen;
2236	u32 object_size = src_ci->i_layout.object_size;
2237	int ret;
2238
2239	src_oloc.pool = src_ci->i_layout.pool_id;
2240	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2241	dst_oloc.pool = dst_ci->i_layout.pool_id;
2242	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2243
2244	while (len >= object_size) {
2245		ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2246					      object_size, &src_objnum,
2247					      &src_objoff, &src_objlen);
2248		ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2249					      object_size, &dst_objnum,
2250					      &dst_objoff, &dst_objlen);
2251		ceph_oid_init(&src_oid);
2252		ceph_oid_printf(&src_oid, "%llx.%08llx",
2253				src_ci->i_vino.ino, src_objnum);
2254		ceph_oid_init(&dst_oid);
2255		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2256				dst_ci->i_vino.ino, dst_objnum);
2257		/* Do an object remote copy */
2258		ret = ceph_osdc_copy_from(&fsc->client->osdc,
2259					  src_ci->i_vino.snap, 0,
2260					  &src_oid, &src_oloc,
2261					  CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2262					  CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2263					  &dst_oid, &dst_oloc,
2264					  CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2265					  CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
2266					  dst_ci->i_truncate_seq,
2267					  dst_ci->i_truncate_size,
2268					  CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2269		if (ret) {
2270			if (ret == -EOPNOTSUPP) {
2271				fsc->have_copy_from2 = false;
2272				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2273			}
2274			dout("ceph_osdc_copy_from returned %d\n", ret);
2275			if (!bytes)
2276				bytes = ret;
2277			goto out;
2278		}
2279		len -= object_size;
2280		bytes += object_size;
2281		*src_off += object_size;
2282		*dst_off += object_size;
2283	}
2284
2285out:
2286	ceph_oloc_destroy(&src_oloc);
2287	ceph_oloc_destroy(&dst_oloc);
2288	return bytes;
2289}
2290
2291static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2292				      struct file *dst_file, loff_t dst_off,
2293				      size_t len, unsigned int flags)
2294{
2295	struct inode *src_inode = file_inode(src_file);
2296	struct inode *dst_inode = file_inode(dst_file);
2297	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2298	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2299	struct ceph_cap_flush *prealloc_cf;
2300	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2301	loff_t size;
2302	ssize_t ret = -EIO, bytes;
2303	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2304	u32 src_objlen, dst_objlen;
2305	int src_got = 0, dst_got = 0, err, dirty;
2306
2307	if (src_inode->i_sb != dst_inode->i_sb) {
2308		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2309
2310		if (ceph_fsid_compare(&src_fsc->client->fsid,
2311				      &dst_fsc->client->fsid)) {
2312			dout("Copying files across clusters: src: %pU dst: %pU\n",
2313			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
2314			return -EXDEV;
2315		}
2316	}
2317	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2318		return -EROFS;
2319
2320	/*
2321	 * Some of the checks below will return -EOPNOTSUPP, which will force a
2322	 * fallback to the default VFS copy_file_range implementation.  This is
2323	 * desirable in several cases (for ex, the 'len' is smaller than the
2324	 * size of the objects, or in cases where that would be more
2325	 * efficient).
2326	 */
2327
2328	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2329		return -EOPNOTSUPP;
2330
2331	if (!src_fsc->have_copy_from2)
2332		return -EOPNOTSUPP;
2333
2334	/*
2335	 * Striped file layouts require that we copy partial objects, but the
2336	 * OSD copy-from operation only supports full-object copies.  Limit
2337	 * this to non-striped file layouts for now.
2338	 */
2339	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2340	    (src_ci->i_layout.stripe_count != 1) ||
2341	    (dst_ci->i_layout.stripe_count != 1) ||
2342	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2343		dout("Invalid src/dst files layout\n");
2344		return -EOPNOTSUPP;
2345	}
2346
2347	if (len < src_ci->i_layout.object_size)
2348		return -EOPNOTSUPP; /* no remote copy will be done */
2349
2350	prealloc_cf = ceph_alloc_cap_flush();
2351	if (!prealloc_cf)
2352		return -ENOMEM;
2353
2354	/* Start by sync'ing the source and destination files */
2355	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2356	if (ret < 0) {
2357		dout("failed to write src file (%zd)\n", ret);
2358		goto out;
2359	}
2360	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2361	if (ret < 0) {
2362		dout("failed to write dst file (%zd)\n", ret);
2363		goto out;
2364	}
2365
2366	/*
2367	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2368	 * clients may have dirty data in their caches.  And OSDs know nothing
2369	 * about caps, so they can't safely do the remote object copies.
2370	 */
2371	err = get_rd_wr_caps(src_file, &src_got,
2372			     dst_file, (dst_off + len), &dst_got);
2373	if (err < 0) {
2374		dout("get_rd_wr_caps returned %d\n", err);
2375		ret = -EOPNOTSUPP;
2376		goto out;
2377	}
2378
2379	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2380	if (ret < 0)
2381		goto out_caps;
2382
2383	/* Drop dst file cached pages */
2384	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2385					    dst_off >> PAGE_SHIFT,
2386					    (dst_off + len) >> PAGE_SHIFT);
2387	if (ret < 0) {
2388		dout("Failed to invalidate inode pages (%zd)\n", ret);
2389		ret = 0; /* XXX */
2390	}
2391	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2392				      src_ci->i_layout.object_size,
2393				      &src_objnum, &src_objoff, &src_objlen);
2394	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2395				      dst_ci->i_layout.object_size,
2396				      &dst_objnum, &dst_objoff, &dst_objlen);
2397	/* object-level offsets need to the same */
2398	if (src_objoff != dst_objoff) {
2399		ret = -EOPNOTSUPP;
2400		goto out_caps;
2401	}
2402
2403	/*
2404	 * Do a manual copy if the object offset isn't object aligned.
2405	 * 'src_objlen' contains the bytes left until the end of the object,
2406	 * starting at the src_off
2407	 */
2408	if (src_objoff) {
2409		dout("Initial partial copy of %u bytes\n", src_objlen);
2410
2411		/*
2412		 * we need to temporarily drop all caps as we'll be calling
2413		 * {read,write}_iter, which will get caps again.
2414		 */
2415		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2416		ret = do_splice_direct(src_file, &src_off, dst_file,
2417				       &dst_off, src_objlen, flags);
2418		/* Abort on short copies or on error */
2419		if (ret < src_objlen) {
2420			dout("Failed partial copy (%zd)\n", ret);
2421			goto out;
2422		}
2423		len -= ret;
2424		err = get_rd_wr_caps(src_file, &src_got,
2425				     dst_file, (dst_off + len), &dst_got);
2426		if (err < 0)
2427			goto out;
2428		err = is_file_size_ok(src_inode, dst_inode,
2429				      src_off, dst_off, len);
2430		if (err < 0)
2431			goto out_caps;
2432	}
2433
2434	size = i_size_read(dst_inode);
2435	bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2436				     src_fsc, len, flags);
2437	if (bytes <= 0) {
2438		if (!ret)
2439			ret = bytes;
2440		goto out_caps;
2441	}
2442	dout("Copied %zu bytes out of %zu\n", bytes, len);
2443	len -= bytes;
2444	ret += bytes;
2445
2446	file_update_time(dst_file);
2447	inode_inc_iversion_raw(dst_inode);
2448
2449	if (dst_off > size) {
2450		/* Let the MDS know about dst file size change */
2451		if (ceph_inode_set_size(dst_inode, dst_off) ||
2452		    ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2453			ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2454	}
2455	/* Mark Fw dirty */
2456	spin_lock(&dst_ci->i_ceph_lock);
2457	dst_ci->i_inline_version = CEPH_INLINE_NONE;
2458	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2459	spin_unlock(&dst_ci->i_ceph_lock);
2460	if (dirty)
2461		__mark_inode_dirty(dst_inode, dirty);
2462
2463out_caps:
2464	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2465
2466	/*
2467	 * Do the final manual copy if we still have some bytes left, unless
2468	 * there were errors in remote object copies (len >= object_size).
2469	 */
2470	if (len && (len < src_ci->i_layout.object_size)) {
2471		dout("Final partial copy of %zu bytes\n", len);
2472		bytes = do_splice_direct(src_file, &src_off, dst_file,
2473					 &dst_off, len, flags);
2474		if (bytes > 0)
2475			ret += bytes;
2476		else
2477			dout("Failed partial copy (%zd)\n", bytes);
2478	}
2479
2480out:
2481	ceph_free_cap_flush(prealloc_cf);
2482
2483	return ret;
2484}
2485
2486static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2487				    struct file *dst_file, loff_t dst_off,
2488				    size_t len, unsigned int flags)
2489{
2490	ssize_t ret;
2491
2492	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2493				     len, flags);
2494
2495	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2496		ret = generic_copy_file_range(src_file, src_off, dst_file,
2497					      dst_off, len, flags);
2498	return ret;
2499}
2500
2501const struct file_operations ceph_file_fops = {
2502	.open = ceph_open,
2503	.release = ceph_release,
2504	.llseek = ceph_llseek,
2505	.read_iter = ceph_read_iter,
2506	.write_iter = ceph_write_iter,
2507	.mmap = ceph_mmap,
2508	.fsync = ceph_fsync,
2509	.lock = ceph_lock,
2510	.setlease = simple_nosetlease,
2511	.flock = ceph_flock,
2512	.splice_read = generic_file_splice_read,
2513	.splice_write = iter_file_splice_write,
2514	.unlocked_ioctl = ceph_ioctl,
2515	.compat_ioctl = compat_ptr_ioctl,
2516	.fallocate	= ceph_fallocate,
2517	.copy_file_range = ceph_copy_file_range,
2518};