Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/module.h>
   4#include <linux/sched.h>
   5#include <linux/slab.h>
   6#include <linux/file.h>
   7#include <linux/mount.h>
   8#include <linux/namei.h>
   9#include <linux/writeback.h>
  10#include <linux/falloc.h>
  11
  12#include "super.h"
  13#include "mds_client.h"
  14#include "cache.h"
  15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  16/*
  17 * Ceph file operations
  18 *
  19 * Implement basic open/close functionality, and implement
  20 * read/write.
  21 *
  22 * We implement three modes of file I/O:
  23 *  - buffered uses the generic_file_aio_{read,write} helpers
  24 *
  25 *  - synchronous is used when there is multi-client read/write
  26 *    sharing, avoids the page cache, and synchronously waits for an
  27 *    ack from the OSD.
  28 *
  29 *  - direct io takes the variant of the sync path that references
  30 *    user pages directly.
  31 *
  32 * fsync() flushes and waits on dirty pages, but just queues metadata
  33 * for writeback: since the MDS can recover size and mtime there is no
  34 * need to wait for MDS acknowledgement.
  35 */
  36
  37/*
  38 * Calculate the length sum of direct io vectors that can
  39 * be combined into one page vector.
  40 */
  41static size_t dio_get_pagev_size(const struct iov_iter *it)
 
 
 
  42{
  43    const struct iovec *iov = it->iov;
  44    const struct iovec *iovend = iov + it->nr_segs;
  45    size_t size;
  46
  47    size = iov->iov_len - it->iov_offset;
  48    /*
  49     * An iov can be page vectored when both the current tail
  50     * and the next base are page aligned.
  51     */
  52    while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
  53           (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
  54        size += iov->iov_len;
  55    }
  56    dout("dio_get_pagevlen len = %zu\n", size);
  57    return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58}
  59
  60/*
  61 * Allocate a page vector based on (@it, @nbytes).
  62 * The return value is the tuple describing a page vector,
  63 * that is (@pages, @page_align, @num_pages).
 
 
 
  64 */
  65static struct page **
  66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
  67		    size_t *page_align, int *num_pages)
  68{
  69	struct iov_iter tmp_it = *it;
  70	size_t align;
  71	struct page **pages;
  72	int ret = 0, idx, npages;
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74	align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
  75		(PAGE_SIZE - 1);
  76	npages = calc_pages_for(align, nbytes);
  77	pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
  78	if (!pages) {
  79		pages = vmalloc(sizeof(*pages) * npages);
  80		if (!pages)
  81			return ERR_PTR(-ENOMEM);
  82	}
  83
  84	for (idx = 0; idx < npages; ) {
  85		size_t start;
  86		ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
  87					 npages - idx, &start);
  88		if (ret < 0)
  89			goto fail;
  90
  91		iov_iter_advance(&tmp_it, ret);
  92		nbytes -= ret;
  93		idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
  94	}
  95
  96	BUG_ON(nbytes != 0);
  97	*num_pages = npages;
  98	*page_align = align;
  99	dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
 100	return pages;
 101fail:
 102	ceph_put_page_vector(pages, idx, false);
 103	return ERR_PTR(ret);
 104}
 105
 106/*
 107 * Prepare an open request.  Preallocate ceph_cap to avoid an
 108 * inopportune ENOMEM later.
 109 */
 110static struct ceph_mds_request *
 111prepare_open_request(struct super_block *sb, int flags, int create_mode)
 112{
 113	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 114	struct ceph_mds_client *mdsc = fsc->mdsc;
 115	struct ceph_mds_request *req;
 116	int want_auth = USE_ANY_MDS;
 117	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 118
 119	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 120		want_auth = USE_AUTH_MDS;
 121
 122	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 123	if (IS_ERR(req))
 124		goto out;
 125	req->r_fmode = ceph_flags_to_mode(flags);
 126	req->r_args.open.flags = cpu_to_le32(flags);
 127	req->r_args.open.mode = cpu_to_le32(create_mode);
 128out:
 129	return req;
 130}
 131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132/*
 133 * initialize private struct file data.
 134 * if we fail, clean up by dropping fmode reference on the ceph_inode
 135 */
 136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 137{
 138	struct ceph_file_info *cf;
 139	int ret = 0;
 140
 141	switch (inode->i_mode & S_IFMT) {
 142	case S_IFREG:
 143		ceph_fscache_register_inode_cookie(inode);
 144		ceph_fscache_file_set_cookie(inode, file);
 145	case S_IFDIR:
 146		dout("init_file %p %p 0%o (regular)\n", inode, file,
 147		     inode->i_mode);
 148		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 149		if (cf == NULL) {
 150			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 151			return -ENOMEM;
 152		}
 153		cf->fmode = fmode;
 154		cf->next_offset = 2;
 155		cf->readdir_cache_idx = -1;
 156		file->private_data = cf;
 157		BUG_ON(inode->i_fop->release != ceph_release);
 158		break;
 159
 160	case S_IFLNK:
 161		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 162		     inode->i_mode);
 163		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 164		break;
 165
 166	default:
 167		dout("init_file %p %p 0%o (special)\n", inode, file,
 168		     inode->i_mode);
 169		/*
 170		 * we need to drop the open ref now, since we don't
 171		 * have .release set to ceph_release.
 172		 */
 173		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 174		BUG_ON(inode->i_fop->release == ceph_release);
 175
 176		/* call the proper open fop */
 177		ret = inode->i_fop->open(inode, file);
 178	}
 179	return ret;
 180}
 181
 182/*
 183 * try renew caps after session gets killed.
 184 */
 185int ceph_renew_caps(struct inode *inode)
 186{
 187	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 188	struct ceph_inode_info *ci = ceph_inode(inode);
 189	struct ceph_mds_request *req;
 190	int err, flags, wanted;
 191
 192	spin_lock(&ci->i_ceph_lock);
 193	wanted = __ceph_caps_file_wanted(ci);
 194	if (__ceph_is_any_real_caps(ci) &&
 195	    (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
 196		int issued = __ceph_caps_issued(ci, NULL);
 197		spin_unlock(&ci->i_ceph_lock);
 198		dout("renew caps %p want %s issued %s updating mds_wanted\n",
 199		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 200		ceph_check_caps(ci, 0, NULL);
 201		return 0;
 202	}
 203	spin_unlock(&ci->i_ceph_lock);
 204
 205	flags = 0;
 206	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 207		flags = O_RDWR;
 208	else if (wanted & CEPH_CAP_FILE_RD)
 209		flags = O_RDONLY;
 210	else if (wanted & CEPH_CAP_FILE_WR)
 211		flags = O_WRONLY;
 212#ifdef O_LAZY
 213	if (wanted & CEPH_CAP_FILE_LAZYIO)
 214		flags |= O_LAZY;
 215#endif
 216
 217	req = prepare_open_request(inode->i_sb, flags, 0);
 218	if (IS_ERR(req)) {
 219		err = PTR_ERR(req);
 220		goto out;
 221	}
 222
 223	req->r_inode = inode;
 224	ihold(inode);
 225	req->r_num_caps = 1;
 226	req->r_fmode = -1;
 227
 228	err = ceph_mdsc_do_request(mdsc, NULL, req);
 229	ceph_mdsc_put_request(req);
 230out:
 231	dout("renew caps %p open result=%d\n", inode, err);
 232	return err < 0 ? err : 0;
 233}
 234
 235/*
 236 * If we already have the requisite capabilities, we can satisfy
 237 * the open request locally (no need to request new caps from the
 238 * MDS).  We do, however, need to inform the MDS (asynchronously)
 239 * if our wanted caps set expands.
 240 */
 241int ceph_open(struct inode *inode, struct file *file)
 242{
 243	struct ceph_inode_info *ci = ceph_inode(inode);
 244	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 245	struct ceph_mds_client *mdsc = fsc->mdsc;
 246	struct ceph_mds_request *req;
 247	struct ceph_file_info *cf = file->private_data;
 248	int err;
 249	int flags, fmode, wanted;
 250
 251	if (cf) {
 252		dout("open file %p is already opened\n", file);
 253		return 0;
 254	}
 255
 256	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 257	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 258	if (S_ISDIR(inode->i_mode))
 259		flags = O_DIRECTORY;  /* mds likes to know */
 260
 261	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 262	     ceph_vinop(inode), file, flags, file->f_flags);
 263	fmode = ceph_flags_to_mode(flags);
 264	wanted = ceph_caps_for_mode(fmode);
 265
 266	/* snapped files are read-only */
 267	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 268		return -EROFS;
 269
 270	/* trivially open snapdir */
 271	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 272		spin_lock(&ci->i_ceph_lock);
 273		__ceph_get_fmode(ci, fmode);
 274		spin_unlock(&ci->i_ceph_lock);
 275		return ceph_init_file(inode, file, fmode);
 276	}
 277
 278	/*
 279	 * No need to block if we have caps on the auth MDS (for
 280	 * write) or any MDS (for read).  Update wanted set
 281	 * asynchronously.
 282	 */
 283	spin_lock(&ci->i_ceph_lock);
 284	if (__ceph_is_any_real_caps(ci) &&
 285	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 286		int mds_wanted = __ceph_caps_mds_wanted(ci);
 287		int issued = __ceph_caps_issued(ci, NULL);
 288
 289		dout("open %p fmode %d want %s issued %s using existing\n",
 290		     inode, fmode, ceph_cap_string(wanted),
 291		     ceph_cap_string(issued));
 292		__ceph_get_fmode(ci, fmode);
 293		spin_unlock(&ci->i_ceph_lock);
 294
 295		/* adjust wanted? */
 296		if ((issued & wanted) != wanted &&
 297		    (mds_wanted & wanted) != wanted &&
 298		    ceph_snap(inode) != CEPH_SNAPDIR)
 299			ceph_check_caps(ci, 0, NULL);
 300
 301		return ceph_init_file(inode, file, fmode);
 302	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 303		   (ci->i_snap_caps & wanted) == wanted) {
 304		__ceph_get_fmode(ci, fmode);
 305		spin_unlock(&ci->i_ceph_lock);
 306		return ceph_init_file(inode, file, fmode);
 307	}
 308
 309	spin_unlock(&ci->i_ceph_lock);
 310
 311	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 312	req = prepare_open_request(inode->i_sb, flags, 0);
 313	if (IS_ERR(req)) {
 314		err = PTR_ERR(req);
 315		goto out;
 316	}
 317	req->r_inode = inode;
 318	ihold(inode);
 319
 320	req->r_num_caps = 1;
 321	err = ceph_mdsc_do_request(mdsc, NULL, req);
 322	if (!err)
 323		err = ceph_init_file(inode, file, req->r_fmode);
 324	ceph_mdsc_put_request(req);
 325	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 326out:
 327	return err;
 328}
 329
 330
 331/*
 332 * Do a lookup + open with a single request.  If we get a non-existent
 333 * file or symlink, return 1 so the VFS can retry.
 334 */
 335int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 336		     struct file *file, unsigned flags, umode_t mode,
 337		     int *opened)
 338{
 339	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 340	struct ceph_mds_client *mdsc = fsc->mdsc;
 341	struct ceph_mds_request *req;
 342	struct dentry *dn;
 343	struct ceph_acls_info acls = {};
 344       int mask;
 345	int err;
 346
 347	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 348	     dir, dentry, dentry,
 349	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 350
 351	if (dentry->d_name.len > NAME_MAX)
 352		return -ENAMETOOLONG;
 353
 354	if (flags & O_CREAT) {
 
 
 355		err = ceph_pre_init_acls(dir, &mode, &acls);
 356		if (err < 0)
 357			return err;
 358	}
 359
 360	/* do the open */
 361	req = prepare_open_request(dir->i_sb, flags, mode);
 362	if (IS_ERR(req)) {
 363		err = PTR_ERR(req);
 364		goto out_acl;
 365	}
 366	req->r_dentry = dget(dentry);
 367	req->r_num_caps = 2;
 368	if (flags & O_CREAT) {
 369		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
 370		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 371		if (acls.pagelist) {
 372			req->r_pagelist = acls.pagelist;
 373			acls.pagelist = NULL;
 374		}
 375	}
 376
 377       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 378       if (ceph_security_xattr_wanted(dir))
 379               mask |= CEPH_CAP_XATTR_SHARED;
 380       req->r_args.open.mask = cpu_to_le32(mask);
 381
 382	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
 
 383	err = ceph_mdsc_do_request(mdsc,
 384				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 385				   req);
 386	err = ceph_handle_snapdir(req, dentry, err);
 387	if (err)
 388		goto out_req;
 389
 390	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 391		err = ceph_handle_notrace_create(dir, dentry);
 392
 393	if (d_in_lookup(dentry)) {
 394		dn = ceph_finish_lookup(req, dentry, err);
 395		if (IS_ERR(dn))
 396			err = PTR_ERR(dn);
 397	} else {
 398		/* we were given a hashed negative dentry */
 399		dn = NULL;
 400	}
 401	if (err)
 402		goto out_req;
 403	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 404		/* make vfs retry on splice, ENOENT, or symlink */
 405		dout("atomic_open finish_no_open on dn %p\n", dn);
 406		err = finish_no_open(file, dn);
 407	} else {
 408		dout("atomic_open finish_open on dn %p\n", dn);
 409		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 410			ceph_init_inode_acls(d_inode(dentry), &acls);
 411			*opened |= FILE_CREATED;
 412		}
 413		err = finish_open(file, dentry, ceph_open, opened);
 414	}
 415out_req:
 416	if (!req->r_err && req->r_target_inode)
 417		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 418	ceph_mdsc_put_request(req);
 419out_acl:
 420	ceph_release_acls_info(&acls);
 421	dout("atomic_open result=%d\n", err);
 422	return err;
 423}
 424
 425int ceph_release(struct inode *inode, struct file *file)
 426{
 427	struct ceph_inode_info *ci = ceph_inode(inode);
 428	struct ceph_file_info *cf = file->private_data;
 429
 430	dout("release inode %p file %p\n", inode, file);
 431	ceph_put_fmode(ci, cf->fmode);
 432	if (cf->last_readdir)
 433		ceph_mdsc_put_request(cf->last_readdir);
 434	kfree(cf->last_name);
 435	kfree(cf->dir_info);
 436	kmem_cache_free(ceph_file_cachep, cf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	/* wake up anyone waiting for caps on this inode */
 439	wake_up_all(&ci->i_cap_wq);
 440	return 0;
 441}
 442
 443enum {
 444	HAVE_RETRIED = 1,
 445	CHECK_EOF =    2,
 446	READ_INLINE =  3,
 447};
 448
 449/*
 450 * Read a range of bytes striped over one or more objects.  Iterate over
 451 * objects we stripe over.  (That's not atomic, but good enough for now.)
 452 *
 453 * If we get a short result from the OSD, check against i_size; we need to
 454 * only return a short read to the caller if we hit EOF.
 455 */
 456static int striped_read(struct inode *inode,
 457			u64 pos, u64 len,
 458			struct page **pages, int num_pages,
 459			int page_align, int *checkeof)
 460{
 461	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 462	struct ceph_inode_info *ci = ceph_inode(inode);
 463	u64 this_len;
 464	loff_t i_size;
 465	int page_idx;
 466	int ret, read = 0;
 467	bool hit_stripe, was_short;
 468
 469	/*
 470	 * we may need to do multiple reads.  not atomic, unfortunately.
 471	 */
 472more:
 473	this_len = len;
 474	page_idx = (page_align + read) >> PAGE_SHIFT;
 475	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
 476				  &ci->i_layout, pos, &this_len,
 477				  ci->i_truncate_seq, ci->i_truncate_size,
 478				  pages + page_idx, num_pages - page_idx,
 479				  ((page_align + read) & ~PAGE_MASK));
 480	if (ret == -ENOENT)
 481		ret = 0;
 482	hit_stripe = this_len < len;
 483	was_short = ret >= 0 && ret < this_len;
 484	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
 485	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 486
 487	i_size = i_size_read(inode);
 488	if (ret >= 0) {
 489		if (was_short && (pos + ret < i_size)) {
 490			int zlen = min(this_len - ret, i_size - pos - ret);
 491			int zoff = page_align + read + ret;
 492			dout(" zero gap %llu to %llu\n",
 493			     pos + ret, pos + ret + zlen);
 494			ceph_zero_page_vector_range(zoff, zlen, pages);
 495			ret += zlen;
 496		}
 497
 498		read += ret;
 499		pos += ret;
 500		len -= ret;
 501
 502		/* hit stripe and need continue*/
 503		if (len && hit_stripe && pos < i_size)
 504			goto more;
 505	}
 506
 507	if (read > 0) {
 508		ret = read;
 509		/* did we bounce off eof? */
 510		if (pos + len > i_size)
 511			*checkeof = CHECK_EOF;
 512	}
 513
 514	dout("striped_read returns %d\n", ret);
 515	return ret;
 516}
 517
 518/*
 519 * Completely synchronous read and write methods.  Direct from __user
 520 * buffer to osd, or directly to user pages (if O_DIRECT).
 521 *
 522 * If the read spans object boundary, just do multiple reads.
 523 */
 524static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 525			      int *checkeof)
 526{
 527	struct file *file = iocb->ki_filp;
 528	struct inode *inode = file_inode(file);
 529	struct page **pages;
 530	u64 off = iocb->ki_pos;
 531	int num_pages;
 532	ssize_t ret;
 533	size_t len = iov_iter_count(to);
 534
 535	dout("sync_read on file %p %llu~%u %s\n", file, off,
 536	     (unsigned)len,
 537	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 538
 539	if (!len)
 540		return 0;
 541	/*
 542	 * flush any page cache pages in this range.  this
 543	 * will make concurrent normal and sync io slow,
 544	 * but it will at least behave sensibly when they are
 545	 * in sequence.
 546	 */
 547	ret = filemap_write_and_wait_range(inode->i_mapping, off,
 548						off + len);
 549	if (ret < 0)
 550		return ret;
 551
 552	if (unlikely(to->type & ITER_PIPE)) {
 553		size_t page_off;
 554		ret = iov_iter_get_pages_alloc(to, &pages, len,
 555					       &page_off);
 556		if (ret <= 0)
 557			return -ENOMEM;
 558		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 559
 560		ret = striped_read(inode, off, ret, pages, num_pages,
 561				   page_off, checkeof);
 562		if (ret > 0) {
 563			iov_iter_advance(to, ret);
 564			off += ret;
 565		} else {
 566			iov_iter_advance(to, 0);
 567		}
 568		ceph_put_page_vector(pages, num_pages, false);
 569	} else {
 570		num_pages = calc_pages_for(off, len);
 571		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 572		if (IS_ERR(pages))
 573			return PTR_ERR(pages);
 574
 575		ret = striped_read(inode, off, len, pages, num_pages,
 576				   (off & ~PAGE_MASK), checkeof);
 577		if (ret > 0) {
 578			int l, k = 0;
 579			size_t left = ret;
 580
 581			while (left) {
 582				size_t page_off = off & ~PAGE_MASK;
 583				size_t copy = min_t(size_t, left,
 584						    PAGE_SIZE - page_off);
 585				l = copy_page_to_iter(pages[k++], page_off,
 586						      copy, to);
 587				off += l;
 588				left -= l;
 589				if (l < copy)
 590					break;
 591			}
 592		}
 593		ceph_release_page_vector(pages, num_pages);
 594	}
 595
 596	if (off > iocb->ki_pos) {
 597		ret = off - iocb->ki_pos;
 598		iocb->ki_pos = off;
 599	}
 600
 601	dout("sync_read result %zd\n", ret);
 602	return ret;
 603}
 604
 605struct ceph_aio_request {
 606	struct kiocb *iocb;
 607	size_t total_len;
 608	int write;
 
 609	int error;
 610	struct list_head osd_reqs;
 611	unsigned num_reqs;
 612	atomic_t pending_reqs;
 613	struct timespec mtime;
 614	struct ceph_cap_flush *prealloc_cf;
 615};
 616
 617struct ceph_aio_work {
 618	struct work_struct work;
 619	struct ceph_osd_request *req;
 620};
 621
 622static void ceph_aio_retry_work(struct work_struct *work);
 623
 624static void ceph_aio_complete(struct inode *inode,
 625			      struct ceph_aio_request *aio_req)
 626{
 627	struct ceph_inode_info *ci = ceph_inode(inode);
 628	int ret;
 629
 630	if (!atomic_dec_and_test(&aio_req->pending_reqs))
 631		return;
 632
 633	ret = aio_req->error;
 634	if (!ret)
 635		ret = aio_req->total_len;
 636
 637	dout("ceph_aio_complete %p rc %d\n", inode, ret);
 638
 639	if (ret >= 0 && aio_req->write) {
 640		int dirty;
 641
 642		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 643		if (endoff > i_size_read(inode)) {
 644			if (ceph_inode_set_size(inode, endoff))
 645				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 646		}
 647
 648		spin_lock(&ci->i_ceph_lock);
 649		ci->i_inline_version = CEPH_INLINE_NONE;
 650		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 651					       &aio_req->prealloc_cf);
 652		spin_unlock(&ci->i_ceph_lock);
 653		if (dirty)
 654			__mark_inode_dirty(inode, dirty);
 655
 656	}
 657
 658	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 659						CEPH_CAP_FILE_RD));
 660
 661	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 662
 663	ceph_free_cap_flush(aio_req->prealloc_cf);
 664	kfree(aio_req);
 665}
 666
 667static void ceph_aio_complete_req(struct ceph_osd_request *req)
 668{
 669	int rc = req->r_result;
 670	struct inode *inode = req->r_inode;
 671	struct ceph_aio_request *aio_req = req->r_priv;
 672	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 673	int num_pages = calc_pages_for((u64)osd_data->alignment,
 674				       osd_data->length);
 675
 676	dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
 677	     inode, rc, osd_data->length);
 
 
 
 678
 679	if (rc == -EOLDSNAPC) {
 680		struct ceph_aio_work *aio_work;
 681		BUG_ON(!aio_req->write);
 682
 683		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 684		if (aio_work) {
 685			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 686			aio_work->req = req;
 687			queue_work(ceph_inode_to_client(inode)->wb_wq,
 688				   &aio_work->work);
 689			return;
 690		}
 691		rc = -ENOMEM;
 692	} else if (!aio_req->write) {
 693		if (rc == -ENOENT)
 694			rc = 0;
 695		if (rc >= 0 && osd_data->length > rc) {
 696			int zoff = osd_data->alignment + rc;
 697			int zlen = osd_data->length - rc;
 
 698			/*
 699			 * If read is satisfied by single OSD request,
 700			 * it can pass EOF. Otherwise read is within
 701			 * i_size.
 702			 */
 703			if (aio_req->num_reqs == 1) {
 704				loff_t i_size = i_size_read(inode);
 705				loff_t endoff = aio_req->iocb->ki_pos + rc;
 706				if (endoff < i_size)
 707					zlen = min_t(size_t, zlen,
 708						     i_size - endoff);
 709				aio_req->total_len = rc + zlen;
 710			}
 711
 712			if (zlen > 0)
 713				ceph_zero_page_vector_range(zoff, zlen,
 714							    osd_data->pages);
 
 
 715		}
 716	}
 717
 718	ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
 
 719	ceph_osdc_put_request(req);
 720
 721	if (rc < 0)
 722		cmpxchg(&aio_req->error, 0, rc);
 723
 724	ceph_aio_complete(inode, aio_req);
 725	return;
 726}
 727
 728static void ceph_aio_retry_work(struct work_struct *work)
 729{
 730	struct ceph_aio_work *aio_work =
 731		container_of(work, struct ceph_aio_work, work);
 732	struct ceph_osd_request *orig_req = aio_work->req;
 733	struct ceph_aio_request *aio_req = orig_req->r_priv;
 734	struct inode *inode = orig_req->r_inode;
 735	struct ceph_inode_info *ci = ceph_inode(inode);
 736	struct ceph_snap_context *snapc;
 737	struct ceph_osd_request *req;
 738	int ret;
 739
 740	spin_lock(&ci->i_ceph_lock);
 741	if (__ceph_have_pending_cap_snap(ci)) {
 742		struct ceph_cap_snap *capsnap =
 743			list_last_entry(&ci->i_cap_snaps,
 744					struct ceph_cap_snap,
 745					ci_item);
 746		snapc = ceph_get_snap_context(capsnap->context);
 747	} else {
 748		BUG_ON(!ci->i_head_snapc);
 749		snapc = ceph_get_snap_context(ci->i_head_snapc);
 750	}
 751	spin_unlock(&ci->i_ceph_lock);
 752
 753	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
 754			false, GFP_NOFS);
 755	if (!req) {
 756		ret = -ENOMEM;
 757		req = orig_req;
 758		goto out;
 759	}
 760
 761	req->r_flags =	CEPH_OSD_FLAG_ORDERSNAP |
 762			CEPH_OSD_FLAG_ONDISK |
 763			CEPH_OSD_FLAG_WRITE;
 764	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
 765	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 766
 767	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
 768	if (ret) {
 769		ceph_osdc_put_request(req);
 770		req = orig_req;
 771		goto out;
 772	}
 773
 774	req->r_ops[0] = orig_req->r_ops[0];
 775	osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 776
 777	req->r_mtime = aio_req->mtime;
 778	req->r_data_offset = req->r_ops[0].extent.offset;
 779
 780	ceph_osdc_put_request(orig_req);
 781
 782	req->r_callback = ceph_aio_complete_req;
 783	req->r_inode = inode;
 784	req->r_priv = aio_req;
 
 785
 786	ret = ceph_osdc_start_request(req->r_osdc, req, false);
 787out:
 788	if (ret < 0) {
 789		req->r_result = ret;
 790		ceph_aio_complete_req(req);
 791	}
 792
 793	ceph_put_snap_context(snapc);
 794	kfree(aio_work);
 795}
 796
 797/*
 798 * Write commit request unsafe callback, called to tell us when a
 799 * request is unsafe (that is, in flight--has been handed to the
 800 * messenger to send to its target osd).  It is called again when
 801 * we've received a response message indicating the request is
 802 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
 803 * is completed early (and unsuccessfully) due to a timeout or
 804 * interrupt.
 805 *
 806 * This is used if we requested both an ACK and ONDISK commit reply
 807 * from the OSD.
 808 */
 809static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
 810{
 811	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
 812
 813	dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
 814		unsafe ? "un" : "");
 815	if (unsafe) {
 816		ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
 817		spin_lock(&ci->i_unsafe_lock);
 818		list_add_tail(&req->r_unsafe_item,
 819			      &ci->i_unsafe_writes);
 820		spin_unlock(&ci->i_unsafe_lock);
 821
 822		complete_all(&req->r_completion);
 823	} else {
 824		spin_lock(&ci->i_unsafe_lock);
 825		list_del_init(&req->r_unsafe_item);
 826		spin_unlock(&ci->i_unsafe_lock);
 827		ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
 828	}
 829}
 830
 831/*
 832 * Wait on any unsafe replies for the given inode.  First wait on the
 833 * newest request, and make that the upper bound.  Then, if there are
 834 * more requests, keep waiting on the oldest as long as it is still older
 835 * than the original request.
 836 */
 837void ceph_sync_write_wait(struct inode *inode)
 838{
 839	struct ceph_inode_info *ci = ceph_inode(inode);
 840	struct list_head *head = &ci->i_unsafe_writes;
 841	struct ceph_osd_request *req;
 842	u64 last_tid;
 843
 844	if (!S_ISREG(inode->i_mode))
 845		return;
 846
 847	spin_lock(&ci->i_unsafe_lock);
 848	if (list_empty(head))
 849		goto out;
 850
 851	/* set upper bound as _last_ entry in chain */
 852
 853	req = list_last_entry(head, struct ceph_osd_request,
 854			      r_unsafe_item);
 855	last_tid = req->r_tid;
 856
 857	do {
 858		ceph_osdc_get_request(req);
 859		spin_unlock(&ci->i_unsafe_lock);
 860
 861		dout("sync_write_wait on tid %llu (until %llu)\n",
 862		     req->r_tid, last_tid);
 863		wait_for_completion(&req->r_done_completion);
 864		ceph_osdc_put_request(req);
 865
 866		spin_lock(&ci->i_unsafe_lock);
 867		/*
 868		 * from here on look at first entry in chain, since we
 869		 * only want to wait for anything older than last_tid
 870		 */
 871		if (list_empty(head))
 872			break;
 873		req = list_first_entry(head, struct ceph_osd_request,
 874				       r_unsafe_item);
 875	} while (req->r_tid < last_tid);
 876out:
 877	spin_unlock(&ci->i_unsafe_lock);
 878}
 879
 880static ssize_t
 881ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 882		       struct ceph_snap_context *snapc,
 883		       struct ceph_cap_flush **pcf)
 884{
 885	struct file *file = iocb->ki_filp;
 886	struct inode *inode = file_inode(file);
 887	struct ceph_inode_info *ci = ceph_inode(inode);
 888	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 889	struct ceph_vino vino;
 890	struct ceph_osd_request *req;
 891	struct page **pages;
 892	struct ceph_aio_request *aio_req = NULL;
 893	int num_pages = 0;
 894	int flags;
 895	int ret;
 896	struct timespec mtime = current_time(inode);
 897	size_t count = iov_iter_count(iter);
 898	loff_t pos = iocb->ki_pos;
 899	bool write = iov_iter_rw(iter) == WRITE;
 
 900
 901	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 902		return -EROFS;
 903
 904	dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
 905	     (write ? "write" : "read"), file, pos, (unsigned)count);
 
 906
 907	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
 908	if (ret < 0)
 909		return ret;
 910
 911	if (write) {
 912		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 913					pos >> PAGE_SHIFT,
 914					(pos + count) >> PAGE_SHIFT);
 915		if (ret2 < 0)
 916			dout("invalidate_inode_pages2_range returned %d\n", ret2);
 917
 918		flags = CEPH_OSD_FLAG_ORDERSNAP |
 919			CEPH_OSD_FLAG_ONDISK |
 920			CEPH_OSD_FLAG_WRITE;
 921	} else {
 922		flags = CEPH_OSD_FLAG_READ;
 923	}
 924
 925	while (iov_iter_count(iter) > 0) {
 926		u64 size = dio_get_pagev_size(iter);
 927		size_t start = 0;
 928		ssize_t len;
 929
 
 
 
 
 
 930		vino = ceph_vino(inode);
 931		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 932					    vino, pos, &size, 0,
 933					    /*include a 'startsync' command*/
 934					    write ? 2 : 1,
 935					    write ? CEPH_OSD_OP_WRITE :
 936						    CEPH_OSD_OP_READ,
 937					    flags, snapc,
 938					    ci->i_truncate_seq,
 939					    ci->i_truncate_size,
 940					    false);
 941		if (IS_ERR(req)) {
 942			ret = PTR_ERR(req);
 943			break;
 944		}
 945
 946		len = size;
 947		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
 948		if (IS_ERR(pages)) {
 949			ceph_osdc_put_request(req);
 950			ret = PTR_ERR(pages);
 951			break;
 952		}
 
 
 953
 954		/*
 955		 * To simplify error handling, allow AIO when IO within i_size
 956		 * or IO can be satisfied by single OSD request.
 957		 */
 958		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
 959		    (len == count || pos + count <= i_size_read(inode))) {
 960			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
 961			if (aio_req) {
 962				aio_req->iocb = iocb;
 963				aio_req->write = write;
 
 964				INIT_LIST_HEAD(&aio_req->osd_reqs);
 965				if (write) {
 966					aio_req->mtime = mtime;
 967					swap(aio_req->prealloc_cf, *pcf);
 968				}
 969			}
 970			/* ignore error */
 971		}
 972
 973		if (write) {
 974			/*
 975			 * throw out any page cache pages in this range. this
 976			 * may block.
 977			 */
 978			truncate_inode_pages_range(inode->i_mapping, pos,
 979					(pos+len) | (PAGE_SIZE - 1));
 980
 981			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 982			req->r_mtime = mtime;
 983		}
 984
 985		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
 986						 false, false);
 987
 988		if (aio_req) {
 989			aio_req->total_len += len;
 990			aio_req->num_reqs++;
 991			atomic_inc(&aio_req->pending_reqs);
 992
 993			req->r_callback = ceph_aio_complete_req;
 994			req->r_inode = inode;
 995			req->r_priv = aio_req;
 996			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
 997
 998			pos += len;
 999			iov_iter_advance(iter, len);
1000			continue;
1001		}
1002
1003		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1004		if (!ret)
1005			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1006
1007		size = i_size_read(inode);
1008		if (!write) {
1009			if (ret == -ENOENT)
1010				ret = 0;
1011			if (ret >= 0 && ret < len && pos + ret < size) {
 
1012				int zlen = min_t(size_t, len - ret,
1013						 size - pos - ret);
1014				ceph_zero_page_vector_range(start + ret, zlen,
1015							    pages);
 
 
 
1016				ret += zlen;
1017			}
1018			if (ret >= 0)
1019				len = ret;
1020		}
1021
1022		ceph_put_page_vector(pages, num_pages, !write);
1023
1024		ceph_osdc_put_request(req);
1025		if (ret < 0)
1026			break;
1027
1028		pos += len;
1029		iov_iter_advance(iter, len);
1030
1031		if (!write && pos >= size)
1032			break;
1033
1034		if (write && pos > size) {
1035			if (ceph_inode_set_size(inode, pos))
1036				ceph_check_caps(ceph_inode(inode),
1037						CHECK_CAPS_AUTHONLY,
1038						NULL);
1039		}
1040	}
1041
1042	if (aio_req) {
1043		LIST_HEAD(osd_reqs);
1044
1045		if (aio_req->num_reqs == 0) {
1046			kfree(aio_req);
1047			return ret;
1048		}
1049
1050		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1051					      CEPH_CAP_FILE_RD);
1052
1053		list_splice(&aio_req->osd_reqs, &osd_reqs);
1054		while (!list_empty(&osd_reqs)) {
1055			req = list_first_entry(&osd_reqs,
1056					       struct ceph_osd_request,
1057					       r_unsafe_item);
1058			list_del_init(&req->r_unsafe_item);
1059			if (ret >= 0)
1060				ret = ceph_osdc_start_request(req->r_osdc,
1061							      req, false);
1062			if (ret < 0) {
1063				req->r_result = ret;
1064				ceph_aio_complete_req(req);
1065			}
1066		}
1067		return -EIOCBQUEUED;
1068	}
1069
1070	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1071		ret = pos - iocb->ki_pos;
1072		iocb->ki_pos = pos;
1073	}
1074	return ret;
1075}
1076
1077/*
1078 * Synchronous write, straight from __user pointer or user pages.
1079 *
1080 * If write spans object boundary, just do multiple writes.  (For a
1081 * correct atomic write, we should e.g. take write locks on all
1082 * objects, rollback on failure, etc.)
1083 */
1084static ssize_t
1085ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1086		struct ceph_snap_context *snapc)
1087{
1088	struct file *file = iocb->ki_filp;
1089	struct inode *inode = file_inode(file);
1090	struct ceph_inode_info *ci = ceph_inode(inode);
1091	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1092	struct ceph_vino vino;
1093	struct ceph_osd_request *req;
1094	struct page **pages;
1095	u64 len;
1096	int num_pages;
1097	int written = 0;
1098	int flags;
1099	int check_caps = 0;
1100	int ret;
 
1101	struct timespec mtime = current_time(inode);
1102	size_t count = iov_iter_count(from);
1103
1104	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1105		return -EROFS;
1106
1107	dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
 
1108
1109	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1110	if (ret < 0)
1111		return ret;
1112
1113	ret = invalidate_inode_pages2_range(inode->i_mapping,
1114					    pos >> PAGE_SHIFT,
1115					    (pos + count) >> PAGE_SHIFT);
1116	if (ret < 0)
1117		dout("invalidate_inode_pages2_range returned %d\n", ret);
1118
1119	flags = CEPH_OSD_FLAG_ORDERSNAP |
1120		CEPH_OSD_FLAG_ONDISK |
1121		CEPH_OSD_FLAG_WRITE |
1122		CEPH_OSD_FLAG_ACK;
1123
1124	while ((len = iov_iter_count(from)) > 0) {
1125		size_t left;
1126		int n;
1127
1128		vino = ceph_vino(inode);
1129		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1130					    vino, pos, &len, 0, 1,
1131					    CEPH_OSD_OP_WRITE, flags, snapc,
1132					    ci->i_truncate_seq,
1133					    ci->i_truncate_size,
1134					    false);
1135		if (IS_ERR(req)) {
1136			ret = PTR_ERR(req);
1137			break;
1138		}
1139
1140		/*
1141		 * write from beginning of first page,
1142		 * regardless of io alignment
1143		 */
1144		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145
1146		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1147		if (IS_ERR(pages)) {
1148			ret = PTR_ERR(pages);
1149			goto out;
1150		}
1151
1152		left = len;
1153		for (n = 0; n < num_pages; n++) {
1154			size_t plen = min_t(size_t, left, PAGE_SIZE);
1155			ret = copy_page_from_iter(pages[n], 0, plen, from);
1156			if (ret != plen) {
1157				ret = -EFAULT;
1158				break;
1159			}
1160			left -= ret;
1161		}
1162
1163		if (ret < 0) {
1164			ceph_release_page_vector(pages, num_pages);
1165			goto out;
1166		}
1167
1168		/* get a second commit callback */
1169		req->r_unsafe_callback = ceph_sync_write_unsafe;
1170		req->r_inode = inode;
1171
1172		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1173						false, true);
1174
1175		req->r_mtime = mtime;
1176		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1177		if (!ret)
1178			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1179
1180out:
1181		ceph_osdc_put_request(req);
1182		if (ret == 0) {
1183			pos += len;
1184			written += len;
1185
1186			if (pos > i_size_read(inode)) {
1187				check_caps = ceph_inode_set_size(inode, pos);
1188				if (check_caps)
1189					ceph_check_caps(ceph_inode(inode),
1190							CHECK_CAPS_AUTHONLY,
1191							NULL);
1192			}
1193		} else
1194			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
1195	}
1196
1197	if (ret != -EOLDSNAPC && written > 0) {
1198		ret = written;
1199		iocb->ki_pos = pos;
1200	}
1201	return ret;
1202}
1203
1204/*
1205 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1206 * Atomically grab references, so that those bits are not released
1207 * back to the MDS mid-read.
1208 *
1209 * Hmm, the sync read case isn't actually async... should it be?
1210 */
1211static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1212{
1213	struct file *filp = iocb->ki_filp;
1214	struct ceph_file_info *fi = filp->private_data;
1215	size_t len = iov_iter_count(to);
1216	struct inode *inode = file_inode(filp);
1217	struct ceph_inode_info *ci = ceph_inode(inode);
1218	struct page *pinned_page = NULL;
1219	ssize_t ret;
1220	int want, got = 0;
1221	int retry_op = 0, read = 0;
1222
1223again:
1224	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1225	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1226
1227	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1228		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1229	else
1230		want = CEPH_CAP_FILE_CACHE;
1231	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1232	if (ret < 0)
1233		return ret;
1234
1235	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1236	    (iocb->ki_flags & IOCB_DIRECT) ||
1237	    (fi->flags & CEPH_F_SYNC)) {
1238
1239		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1240		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1241		     ceph_cap_string(got));
1242
1243		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1244			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1245				ret = ceph_direct_read_write(iocb, to,
1246							     NULL, NULL);
1247				if (ret >= 0 && ret < len)
1248					retry_op = CHECK_EOF;
1249			} else {
1250				ret = ceph_sync_read(iocb, to, &retry_op);
1251			}
1252		} else {
1253			retry_op = READ_INLINE;
1254		}
1255	} else {
 
1256		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1257		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1258		     ceph_cap_string(got));
1259		current->journal_info = filp;
1260		ret = generic_file_read_iter(iocb, to);
1261		current->journal_info = NULL;
1262	}
1263	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1264	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1265	if (pinned_page) {
1266		put_page(pinned_page);
1267		pinned_page = NULL;
1268	}
1269	ceph_put_cap_refs(ci, got);
1270	if (retry_op > HAVE_RETRIED && ret >= 0) {
1271		int statret;
1272		struct page *page = NULL;
1273		loff_t i_size;
1274		if (retry_op == READ_INLINE) {
1275			page = __page_cache_alloc(GFP_KERNEL);
1276			if (!page)
1277				return -ENOMEM;
1278		}
1279
1280		statret = __ceph_do_getattr(inode, page,
1281					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1282		if (statret < 0) {
1283			if (page)
1284				__free_page(page);
1285			if (statret == -ENODATA) {
1286				BUG_ON(retry_op != READ_INLINE);
1287				goto again;
1288			}
1289			return statret;
1290		}
1291
1292		i_size = i_size_read(inode);
1293		if (retry_op == READ_INLINE) {
1294			BUG_ON(ret > 0 || read > 0);
1295			if (iocb->ki_pos < i_size &&
1296			    iocb->ki_pos < PAGE_SIZE) {
1297				loff_t end = min_t(loff_t, i_size,
1298						   iocb->ki_pos + len);
1299				end = min_t(loff_t, end, PAGE_SIZE);
1300				if (statret < end)
1301					zero_user_segment(page, statret, end);
1302				ret = copy_page_to_iter(page,
1303						iocb->ki_pos & ~PAGE_MASK,
1304						end - iocb->ki_pos, to);
1305				iocb->ki_pos += ret;
1306				read += ret;
1307			}
1308			if (iocb->ki_pos < i_size && read < len) {
1309				size_t zlen = min_t(size_t, len - read,
1310						    i_size - iocb->ki_pos);
1311				ret = iov_iter_zero(zlen, to);
1312				iocb->ki_pos += ret;
1313				read += ret;
1314			}
1315			__free_pages(page, 0);
1316			return read;
1317		}
1318
1319		/* hit EOF or hole? */
1320		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1321		    ret < len) {
1322			dout("sync_read hit hole, ppos %lld < size %lld"
1323			     ", reading more\n", iocb->ki_pos, i_size);
1324
1325			read += ret;
1326			len -= ret;
1327			retry_op = HAVE_RETRIED;
1328			goto again;
1329		}
1330	}
1331
1332	if (ret >= 0)
1333		ret += read;
1334
1335	return ret;
1336}
1337
1338/*
1339 * Take cap references to avoid releasing caps to MDS mid-write.
1340 *
1341 * If we are synchronous, and write with an old snap context, the OSD
1342 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1343 * dropping our cap refs and allowing the pending snap to logically
1344 * complete _before_ this write occurs.
1345 *
1346 * If we are near ENOSPC, write synchronously.
1347 */
1348static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1349{
1350	struct file *file = iocb->ki_filp;
1351	struct ceph_file_info *fi = file->private_data;
1352	struct inode *inode = file_inode(file);
1353	struct ceph_inode_info *ci = ceph_inode(inode);
1354	struct ceph_osd_client *osdc =
1355		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1356	struct ceph_cap_flush *prealloc_cf;
1357	ssize_t count, written = 0;
1358	int err, want, got;
1359	loff_t pos;
1360
1361	if (ceph_snap(inode) != CEPH_NOSNAP)
1362		return -EROFS;
1363
1364	prealloc_cf = ceph_alloc_cap_flush();
1365	if (!prealloc_cf)
1366		return -ENOMEM;
1367
 
1368	inode_lock(inode);
1369
1370	/* We can write back this queue in page reclaim */
1371	current->backing_dev_info = inode_to_bdi(inode);
1372
1373	if (iocb->ki_flags & IOCB_APPEND) {
1374		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1375		if (err < 0)
1376			goto out;
1377	}
1378
1379	err = generic_write_checks(iocb, from);
1380	if (err <= 0)
1381		goto out;
1382
1383	pos = iocb->ki_pos;
1384	count = iov_iter_count(from);
 
 
 
 
 
1385	err = file_remove_privs(file);
1386	if (err)
1387		goto out;
1388
1389	err = file_update_time(file);
1390	if (err)
1391		goto out;
1392
1393	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1394		err = ceph_uninline_data(file, NULL);
1395		if (err < 0)
1396			goto out;
1397	}
1398
1399retry_snap:
1400	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1401		err = -ENOSPC;
1402		goto out;
1403	}
1404
1405	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1406	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1407	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1408		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1409	else
1410		want = CEPH_CAP_FILE_BUFFER;
1411	got = 0;
1412	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1413			    &got, NULL);
1414	if (err < 0)
1415		goto out;
1416
1417	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1418	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1419
1420	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1421	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
 
1422		struct ceph_snap_context *snapc;
1423		struct iov_iter data;
1424		inode_unlock(inode);
1425
1426		spin_lock(&ci->i_ceph_lock);
1427		if (__ceph_have_pending_cap_snap(ci)) {
1428			struct ceph_cap_snap *capsnap =
1429					list_last_entry(&ci->i_cap_snaps,
1430							struct ceph_cap_snap,
1431							ci_item);
1432			snapc = ceph_get_snap_context(capsnap->context);
1433		} else {
1434			BUG_ON(!ci->i_head_snapc);
1435			snapc = ceph_get_snap_context(ci->i_head_snapc);
1436		}
1437		spin_unlock(&ci->i_ceph_lock);
1438
1439		/* we might need to revert back to that point */
1440		data = *from;
1441		if (iocb->ki_flags & IOCB_DIRECT)
1442			written = ceph_direct_read_write(iocb, &data, snapc,
1443							 &prealloc_cf);
1444		else
1445			written = ceph_sync_write(iocb, &data, pos, snapc);
1446		if (written == -EOLDSNAPC) {
1447			dout("aio_write %p %llx.%llx %llu~%u"
1448				"got EOLDSNAPC, retrying\n",
1449				inode, ceph_vinop(inode),
1450				pos, (unsigned)count);
1451			inode_lock(inode);
1452			goto retry_snap;
1453		}
1454		if (written > 0)
1455			iov_iter_advance(from, written);
1456		ceph_put_snap_context(snapc);
1457	} else {
1458		/*
1459		 * No need to acquire the i_truncate_mutex. Because
1460		 * the MDS revokes Fwb caps before sending truncate
1461		 * message to us. We can't get Fwb cap while there
1462		 * are pending vmtruncate. So write and vmtruncate
1463		 * can not run at the same time
1464		 */
1465		written = generic_perform_write(file, from, pos);
1466		if (likely(written >= 0))
1467			iocb->ki_pos = pos + written;
1468		inode_unlock(inode);
1469	}
1470
1471	if (written >= 0) {
1472		int dirty;
 
1473		spin_lock(&ci->i_ceph_lock);
1474		ci->i_inline_version = CEPH_INLINE_NONE;
1475		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1476					       &prealloc_cf);
1477		spin_unlock(&ci->i_ceph_lock);
1478		if (dirty)
1479			__mark_inode_dirty(inode, dirty);
 
 
1480	}
1481
1482	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1483	     inode, ceph_vinop(inode), pos, (unsigned)count,
1484	     ceph_cap_string(got));
1485	ceph_put_cap_refs(ci, got);
1486
 
 
 
 
 
 
1487	if (written >= 0) {
1488		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1489			iocb->ki_flags |= IOCB_DSYNC;
1490
1491		written = generic_write_sync(iocb, written);
1492	}
1493
1494	goto out_unlocked;
1495
1496out:
1497	inode_unlock(inode);
1498out_unlocked:
1499	ceph_free_cap_flush(prealloc_cf);
1500	current->backing_dev_info = NULL;
1501	return written ? written : err;
1502}
1503
1504/*
1505 * llseek.  be sure to verify file size on SEEK_END.
1506 */
1507static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1508{
1509	struct inode *inode = file->f_mapping->host;
1510	loff_t i_size;
1511	loff_t ret;
1512
1513	inode_lock(inode);
1514
1515	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1516		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1517		if (ret < 0)
1518			goto out;
1519	}
1520
1521	i_size = i_size_read(inode);
1522	switch (whence) {
1523	case SEEK_END:
1524		offset += i_size;
1525		break;
1526	case SEEK_CUR:
1527		/*
1528		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1529		 * position-querying operation.  Avoid rewriting the "same"
1530		 * f_pos value back to the file because a concurrent read(),
1531		 * write() or lseek() might have altered it
1532		 */
1533		if (offset == 0) {
1534			ret = file->f_pos;
1535			goto out;
1536		}
1537		offset += file->f_pos;
1538		break;
1539	case SEEK_DATA:
1540		if (offset >= i_size) {
1541			ret = -ENXIO;
1542			goto out;
1543		}
1544		break;
1545	case SEEK_HOLE:
1546		if (offset >= i_size) {
1547			ret = -ENXIO;
1548			goto out;
1549		}
1550		offset = i_size;
1551		break;
1552	}
1553
1554	ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1555
1556out:
1557	inode_unlock(inode);
1558	return ret;
1559}
1560
1561static inline void ceph_zero_partial_page(
1562	struct inode *inode, loff_t offset, unsigned size)
1563{
1564	struct page *page;
1565	pgoff_t index = offset >> PAGE_SHIFT;
1566
1567	page = find_lock_page(inode->i_mapping, index);
1568	if (page) {
1569		wait_on_page_writeback(page);
1570		zero_user(page, offset & (PAGE_SIZE - 1), size);
1571		unlock_page(page);
1572		put_page(page);
1573	}
1574}
1575
1576static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1577				      loff_t length)
1578{
1579	loff_t nearly = round_up(offset, PAGE_SIZE);
1580	if (offset < nearly) {
1581		loff_t size = nearly - offset;
1582		if (length < size)
1583			size = length;
1584		ceph_zero_partial_page(inode, offset, size);
1585		offset += size;
1586		length -= size;
1587	}
1588	if (length >= PAGE_SIZE) {
1589		loff_t size = round_down(length, PAGE_SIZE);
1590		truncate_pagecache_range(inode, offset, offset + size - 1);
1591		offset += size;
1592		length -= size;
1593	}
1594	if (length)
1595		ceph_zero_partial_page(inode, offset, length);
1596}
1597
1598static int ceph_zero_partial_object(struct inode *inode,
1599				    loff_t offset, loff_t *length)
1600{
1601	struct ceph_inode_info *ci = ceph_inode(inode);
1602	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1603	struct ceph_osd_request *req;
1604	int ret = 0;
1605	loff_t zero = 0;
1606	int op;
1607
1608	if (!length) {
1609		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1610		length = &zero;
1611	} else {
1612		op = CEPH_OSD_OP_ZERO;
1613	}
1614
1615	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1616					ceph_vino(inode),
1617					offset, length,
1618					0, 1, op,
1619					CEPH_OSD_FLAG_WRITE |
1620					CEPH_OSD_FLAG_ONDISK,
1621					NULL, 0, 0, false);
1622	if (IS_ERR(req)) {
1623		ret = PTR_ERR(req);
1624		goto out;
1625	}
1626
1627	req->r_mtime = inode->i_mtime;
1628	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1629	if (!ret) {
1630		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1631		if (ret == -ENOENT)
1632			ret = 0;
1633	}
1634	ceph_osdc_put_request(req);
1635
1636out:
1637	return ret;
1638}
1639
1640static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1641{
1642	int ret = 0;
1643	struct ceph_inode_info *ci = ceph_inode(inode);
1644	s32 stripe_unit = ci->i_layout.stripe_unit;
1645	s32 stripe_count = ci->i_layout.stripe_count;
1646	s32 object_size = ci->i_layout.object_size;
1647	u64 object_set_size = object_size * stripe_count;
1648	u64 nearly, t;
1649
1650	/* round offset up to next period boundary */
1651	nearly = offset + object_set_size - 1;
1652	t = nearly;
1653	nearly -= do_div(t, object_set_size);
1654
1655	while (length && offset < nearly) {
1656		loff_t size = length;
1657		ret = ceph_zero_partial_object(inode, offset, &size);
1658		if (ret < 0)
1659			return ret;
1660		offset += size;
1661		length -= size;
1662	}
1663	while (length >= object_set_size) {
1664		int i;
1665		loff_t pos = offset;
1666		for (i = 0; i < stripe_count; ++i) {
1667			ret = ceph_zero_partial_object(inode, pos, NULL);
1668			if (ret < 0)
1669				return ret;
1670			pos += stripe_unit;
1671		}
1672		offset += object_set_size;
1673		length -= object_set_size;
1674	}
1675	while (length) {
1676		loff_t size = length;
1677		ret = ceph_zero_partial_object(inode, offset, &size);
1678		if (ret < 0)
1679			return ret;
1680		offset += size;
1681		length -= size;
1682	}
1683	return ret;
1684}
1685
1686static long ceph_fallocate(struct file *file, int mode,
1687				loff_t offset, loff_t length)
1688{
1689	struct ceph_file_info *fi = file->private_data;
1690	struct inode *inode = file_inode(file);
1691	struct ceph_inode_info *ci = ceph_inode(inode);
1692	struct ceph_osd_client *osdc =
1693		&ceph_inode_to_client(inode)->client->osdc;
1694	struct ceph_cap_flush *prealloc_cf;
1695	int want, got = 0;
1696	int dirty;
1697	int ret = 0;
1698	loff_t endoff = 0;
1699	loff_t size;
1700
1701	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1702		return -EOPNOTSUPP;
1703
1704	if (!S_ISREG(inode->i_mode))
1705		return -EOPNOTSUPP;
1706
1707	prealloc_cf = ceph_alloc_cap_flush();
1708	if (!prealloc_cf)
1709		return -ENOMEM;
1710
1711	inode_lock(inode);
1712
1713	if (ceph_snap(inode) != CEPH_NOSNAP) {
1714		ret = -EROFS;
1715		goto unlock;
1716	}
1717
 
 
 
 
 
 
1718	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1719	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
1720		ret = -ENOSPC;
1721		goto unlock;
1722	}
1723
1724	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1725		ret = ceph_uninline_data(file, NULL);
1726		if (ret < 0)
1727			goto unlock;
1728	}
1729
1730	size = i_size_read(inode);
1731	if (!(mode & FALLOC_FL_KEEP_SIZE))
1732		endoff = offset + length;
 
 
 
 
1733
1734	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1735		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1736	else
1737		want = CEPH_CAP_FILE_BUFFER;
1738
1739	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1740	if (ret < 0)
1741		goto unlock;
1742
1743	if (mode & FALLOC_FL_PUNCH_HOLE) {
1744		if (offset < size)
1745			ceph_zero_pagecache_range(inode, offset, length);
1746		ret = ceph_zero_objects(inode, offset, length);
1747	} else if (endoff > size) {
1748		truncate_pagecache_range(inode, size, -1);
1749		if (ceph_inode_set_size(inode, endoff))
1750			ceph_check_caps(ceph_inode(inode),
1751				CHECK_CAPS_AUTHONLY, NULL);
1752	}
1753
1754	if (!ret) {
1755		spin_lock(&ci->i_ceph_lock);
1756		ci->i_inline_version = CEPH_INLINE_NONE;
1757		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1758					       &prealloc_cf);
1759		spin_unlock(&ci->i_ceph_lock);
1760		if (dirty)
1761			__mark_inode_dirty(inode, dirty);
 
 
 
1762	}
1763
1764	ceph_put_cap_refs(ci, got);
1765unlock:
1766	inode_unlock(inode);
1767	ceph_free_cap_flush(prealloc_cf);
1768	return ret;
1769}
1770
1771const struct file_operations ceph_file_fops = {
1772	.open = ceph_open,
1773	.release = ceph_release,
1774	.llseek = ceph_llseek,
1775	.read_iter = ceph_read_iter,
1776	.write_iter = ceph_write_iter,
1777	.mmap = ceph_mmap,
1778	.fsync = ceph_fsync,
1779	.lock = ceph_lock,
1780	.flock = ceph_flock,
1781	.splice_read = generic_file_splice_read,
1782	.splice_write = iter_file_splice_write,
1783	.unlocked_ioctl = ceph_ioctl,
1784	.compat_ioctl	= ceph_ioctl,
1785	.fallocate	= ceph_fallocate,
1786};
1787
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ceph/ceph_debug.h>
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/slab.h>
   7#include <linux/file.h>
   8#include <linux/mount.h>
   9#include <linux/namei.h>
  10#include <linux/writeback.h>
  11#include <linux/falloc.h>
  12
  13#include "super.h"
  14#include "mds_client.h"
  15#include "cache.h"
  16
  17static __le32 ceph_flags_sys2wire(u32 flags)
  18{
  19	u32 wire_flags = 0;
  20
  21	switch (flags & O_ACCMODE) {
  22	case O_RDONLY:
  23		wire_flags |= CEPH_O_RDONLY;
  24		break;
  25	case O_WRONLY:
  26		wire_flags |= CEPH_O_WRONLY;
  27		break;
  28	case O_RDWR:
  29		wire_flags |= CEPH_O_RDWR;
  30		break;
  31	}
  32
  33	flags &= ~O_ACCMODE;
  34
  35#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
  36
  37	ceph_sys2wire(O_CREAT);
  38	ceph_sys2wire(O_EXCL);
  39	ceph_sys2wire(O_TRUNC);
  40	ceph_sys2wire(O_DIRECTORY);
  41	ceph_sys2wire(O_NOFOLLOW);
  42
  43#undef ceph_sys2wire
  44
  45	if (flags)
  46		dout("unused open flags: %x\n", flags);
  47
  48	return cpu_to_le32(wire_flags);
  49}
  50
  51/*
  52 * Ceph file operations
  53 *
  54 * Implement basic open/close functionality, and implement
  55 * read/write.
  56 *
  57 * We implement three modes of file I/O:
  58 *  - buffered uses the generic_file_aio_{read,write} helpers
  59 *
  60 *  - synchronous is used when there is multi-client read/write
  61 *    sharing, avoids the page cache, and synchronously waits for an
  62 *    ack from the OSD.
  63 *
  64 *  - direct io takes the variant of the sync path that references
  65 *    user pages directly.
  66 *
  67 * fsync() flushes and waits on dirty pages, but just queues metadata
  68 * for writeback: since the MDS can recover size and mtime there is no
  69 * need to wait for MDS acknowledgement.
  70 */
  71
  72/*
  73 * How many pages to get in one call to iov_iter_get_pages().  This
  74 * determines the size of the on-stack array used as a buffer.
  75 */
  76#define ITER_GET_BVECS_PAGES	64
  77
  78static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
  79				struct bio_vec *bvecs)
  80{
  81	size_t size = 0;
  82	int bvec_idx = 0;
  83
  84	if (maxsize > iov_iter_count(iter))
  85		maxsize = iov_iter_count(iter);
  86
  87	while (size < maxsize) {
  88		struct page *pages[ITER_GET_BVECS_PAGES];
  89		ssize_t bytes;
  90		size_t start;
  91		int idx = 0;
  92
  93		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
  94					   ITER_GET_BVECS_PAGES, &start);
  95		if (bytes < 0)
  96			return size ?: bytes;
  97
  98		iov_iter_advance(iter, bytes);
  99		size += bytes;
 100
 101		for ( ; bytes; idx++, bvec_idx++) {
 102			struct bio_vec bv = {
 103				.bv_page = pages[idx],
 104				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
 105				.bv_offset = start,
 106			};
 107
 108			bvecs[bvec_idx] = bv;
 109			bytes -= bv.bv_len;
 110			start = 0;
 111		}
 112	}
 113
 114	return size;
 115}
 116
 117/*
 118 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 119 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 120 * page.
 121 *
 122 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 123 * Return the number of bytes in the created bio_vec array, or an error.
 124 */
 125static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 126				    struct bio_vec **bvecs, int *num_bvecs)
 
 127{
 128	struct bio_vec *bv;
 129	size_t orig_count = iov_iter_count(iter);
 130	ssize_t bytes;
 131	int npages;
 132
 133	iov_iter_truncate(iter, maxsize);
 134	npages = iov_iter_npages(iter, INT_MAX);
 135	iov_iter_reexpand(iter, orig_count);
 136
 137	/*
 138	 * __iter_get_bvecs() may populate only part of the array -- zero it
 139	 * out.
 140	 */
 141	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
 142	if (!bv)
 143		return -ENOMEM;
 144
 145	bytes = __iter_get_bvecs(iter, maxsize, bv);
 146	if (bytes < 0) {
 147		/*
 148		 * No pages were pinned -- just free the array.
 149		 */
 150		kvfree(bv);
 151		return bytes;
 
 152	}
 153
 154	*bvecs = bv;
 155	*num_bvecs = npages;
 156	return bytes;
 157}
 
 
 158
 159static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
 160{
 161	int i;
 
 162
 163	for (i = 0; i < num_bvecs; i++) {
 164		if (bvecs[i].bv_page) {
 165			if (should_dirty)
 166				set_page_dirty_lock(bvecs[i].bv_page);
 167			put_page(bvecs[i].bv_page);
 168		}
 169	}
 170	kvfree(bvecs);
 171}
 172
 173/*
 174 * Prepare an open request.  Preallocate ceph_cap to avoid an
 175 * inopportune ENOMEM later.
 176 */
 177static struct ceph_mds_request *
 178prepare_open_request(struct super_block *sb, int flags, int create_mode)
 179{
 180	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 181	struct ceph_mds_client *mdsc = fsc->mdsc;
 182	struct ceph_mds_request *req;
 183	int want_auth = USE_ANY_MDS;
 184	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
 185
 186	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
 187		want_auth = USE_AUTH_MDS;
 188
 189	req = ceph_mdsc_create_request(mdsc, op, want_auth);
 190	if (IS_ERR(req))
 191		goto out;
 192	req->r_fmode = ceph_flags_to_mode(flags);
 193	req->r_args.open.flags = ceph_flags_sys2wire(flags);
 194	req->r_args.open.mode = cpu_to_le32(create_mode);
 195out:
 196	return req;
 197}
 198
 199static int ceph_init_file_info(struct inode *inode, struct file *file,
 200					int fmode, bool isdir)
 201{
 202	struct ceph_file_info *fi;
 203
 204	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
 205			inode->i_mode, isdir ? "dir" : "regular");
 206	BUG_ON(inode->i_fop->release != ceph_release);
 207
 208	if (isdir) {
 209		struct ceph_dir_file_info *dfi =
 210			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
 211		if (!dfi) {
 212			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 213			return -ENOMEM;
 214		}
 215
 216		file->private_data = dfi;
 217		fi = &dfi->file_info;
 218		dfi->next_offset = 2;
 219		dfi->readdir_cache_idx = -1;
 220	} else {
 221		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
 222		if (!fi) {
 223			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 224			return -ENOMEM;
 225		}
 226
 227		file->private_data = fi;
 228	}
 229
 230	fi->fmode = fmode;
 231	spin_lock_init(&fi->rw_contexts_lock);
 232	INIT_LIST_HEAD(&fi->rw_contexts);
 233
 234	return 0;
 235}
 236
 237/*
 238 * initialize private struct file data.
 239 * if we fail, clean up by dropping fmode reference on the ceph_inode
 240 */
 241static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 242{
 
 243	int ret = 0;
 244
 245	switch (inode->i_mode & S_IFMT) {
 246	case S_IFREG:
 247		ceph_fscache_register_inode_cookie(inode);
 248		ceph_fscache_file_set_cookie(inode, file);
 249	case S_IFDIR:
 250		ret = ceph_init_file_info(inode, file, fmode,
 251						S_ISDIR(inode->i_mode));
 252		if (ret)
 253			return ret;
 
 
 
 
 
 
 
 
 254		break;
 255
 256	case S_IFLNK:
 257		dout("init_file %p %p 0%o (symlink)\n", inode, file,
 258		     inode->i_mode);
 259		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 260		break;
 261
 262	default:
 263		dout("init_file %p %p 0%o (special)\n", inode, file,
 264		     inode->i_mode);
 265		/*
 266		 * we need to drop the open ref now, since we don't
 267		 * have .release set to ceph_release.
 268		 */
 269		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 270		BUG_ON(inode->i_fop->release == ceph_release);
 271
 272		/* call the proper open fop */
 273		ret = inode->i_fop->open(inode, file);
 274	}
 275	return ret;
 276}
 277
 278/*
 279 * try renew caps after session gets killed.
 280 */
 281int ceph_renew_caps(struct inode *inode)
 282{
 283	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 284	struct ceph_inode_info *ci = ceph_inode(inode);
 285	struct ceph_mds_request *req;
 286	int err, flags, wanted;
 287
 288	spin_lock(&ci->i_ceph_lock);
 289	wanted = __ceph_caps_file_wanted(ci);
 290	if (__ceph_is_any_real_caps(ci) &&
 291	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
 292		int issued = __ceph_caps_issued(ci, NULL);
 293		spin_unlock(&ci->i_ceph_lock);
 294		dout("renew caps %p want %s issued %s updating mds_wanted\n",
 295		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
 296		ceph_check_caps(ci, 0, NULL);
 297		return 0;
 298	}
 299	spin_unlock(&ci->i_ceph_lock);
 300
 301	flags = 0;
 302	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
 303		flags = O_RDWR;
 304	else if (wanted & CEPH_CAP_FILE_RD)
 305		flags = O_RDONLY;
 306	else if (wanted & CEPH_CAP_FILE_WR)
 307		flags = O_WRONLY;
 308#ifdef O_LAZY
 309	if (wanted & CEPH_CAP_FILE_LAZYIO)
 310		flags |= O_LAZY;
 311#endif
 312
 313	req = prepare_open_request(inode->i_sb, flags, 0);
 314	if (IS_ERR(req)) {
 315		err = PTR_ERR(req);
 316		goto out;
 317	}
 318
 319	req->r_inode = inode;
 320	ihold(inode);
 321	req->r_num_caps = 1;
 322	req->r_fmode = -1;
 323
 324	err = ceph_mdsc_do_request(mdsc, NULL, req);
 325	ceph_mdsc_put_request(req);
 326out:
 327	dout("renew caps %p open result=%d\n", inode, err);
 328	return err < 0 ? err : 0;
 329}
 330
 331/*
 332 * If we already have the requisite capabilities, we can satisfy
 333 * the open request locally (no need to request new caps from the
 334 * MDS).  We do, however, need to inform the MDS (asynchronously)
 335 * if our wanted caps set expands.
 336 */
 337int ceph_open(struct inode *inode, struct file *file)
 338{
 339	struct ceph_inode_info *ci = ceph_inode(inode);
 340	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 341	struct ceph_mds_client *mdsc = fsc->mdsc;
 342	struct ceph_mds_request *req;
 343	struct ceph_file_info *fi = file->private_data;
 344	int err;
 345	int flags, fmode, wanted;
 346
 347	if (fi) {
 348		dout("open file %p is already opened\n", file);
 349		return 0;
 350	}
 351
 352	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 353	flags = file->f_flags & ~(O_CREAT|O_EXCL);
 354	if (S_ISDIR(inode->i_mode))
 355		flags = O_DIRECTORY;  /* mds likes to know */
 356
 357	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 358	     ceph_vinop(inode), file, flags, file->f_flags);
 359	fmode = ceph_flags_to_mode(flags);
 360	wanted = ceph_caps_for_mode(fmode);
 361
 362	/* snapped files are read-only */
 363	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 364		return -EROFS;
 365
 366	/* trivially open snapdir */
 367	if (ceph_snap(inode) == CEPH_SNAPDIR) {
 368		spin_lock(&ci->i_ceph_lock);
 369		__ceph_get_fmode(ci, fmode);
 370		spin_unlock(&ci->i_ceph_lock);
 371		return ceph_init_file(inode, file, fmode);
 372	}
 373
 374	/*
 375	 * No need to block if we have caps on the auth MDS (for
 376	 * write) or any MDS (for read).  Update wanted set
 377	 * asynchronously.
 378	 */
 379	spin_lock(&ci->i_ceph_lock);
 380	if (__ceph_is_any_real_caps(ci) &&
 381	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 382		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
 383		int issued = __ceph_caps_issued(ci, NULL);
 384
 385		dout("open %p fmode %d want %s issued %s using existing\n",
 386		     inode, fmode, ceph_cap_string(wanted),
 387		     ceph_cap_string(issued));
 388		__ceph_get_fmode(ci, fmode);
 389		spin_unlock(&ci->i_ceph_lock);
 390
 391		/* adjust wanted? */
 392		if ((issued & wanted) != wanted &&
 393		    (mds_wanted & wanted) != wanted &&
 394		    ceph_snap(inode) != CEPH_SNAPDIR)
 395			ceph_check_caps(ci, 0, NULL);
 396
 397		return ceph_init_file(inode, file, fmode);
 398	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 399		   (ci->i_snap_caps & wanted) == wanted) {
 400		__ceph_get_fmode(ci, fmode);
 401		spin_unlock(&ci->i_ceph_lock);
 402		return ceph_init_file(inode, file, fmode);
 403	}
 404
 405	spin_unlock(&ci->i_ceph_lock);
 406
 407	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 408	req = prepare_open_request(inode->i_sb, flags, 0);
 409	if (IS_ERR(req)) {
 410		err = PTR_ERR(req);
 411		goto out;
 412	}
 413	req->r_inode = inode;
 414	ihold(inode);
 415
 416	req->r_num_caps = 1;
 417	err = ceph_mdsc_do_request(mdsc, NULL, req);
 418	if (!err)
 419		err = ceph_init_file(inode, file, req->r_fmode);
 420	ceph_mdsc_put_request(req);
 421	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 422out:
 423	return err;
 424}
 425
 426
 427/*
 428 * Do a lookup + open with a single request.  If we get a non-existent
 429 * file or symlink, return 1 so the VFS can retry.
 430 */
 431int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 432		     struct file *file, unsigned flags, umode_t mode,
 433		     int *opened)
 434{
 435	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 436	struct ceph_mds_client *mdsc = fsc->mdsc;
 437	struct ceph_mds_request *req;
 438	struct dentry *dn;
 439	struct ceph_acls_info acls = {};
 440	int mask;
 441	int err;
 442
 443	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
 444	     dir, dentry, dentry,
 445	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 446
 447	if (dentry->d_name.len > NAME_MAX)
 448		return -ENAMETOOLONG;
 449
 450	if (flags & O_CREAT) {
 451		if (ceph_quota_is_max_files_exceeded(dir))
 452			return -EDQUOT;
 453		err = ceph_pre_init_acls(dir, &mode, &acls);
 454		if (err < 0)
 455			return err;
 456	}
 457
 458	/* do the open */
 459	req = prepare_open_request(dir->i_sb, flags, mode);
 460	if (IS_ERR(req)) {
 461		err = PTR_ERR(req);
 462		goto out_acl;
 463	}
 464	req->r_dentry = dget(dentry);
 465	req->r_num_caps = 2;
 466	if (flags & O_CREAT) {
 467		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
 468		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 469		if (acls.pagelist) {
 470			req->r_pagelist = acls.pagelist;
 471			acls.pagelist = NULL;
 472		}
 473	}
 474
 475       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
 476       if (ceph_security_xattr_wanted(dir))
 477               mask |= CEPH_CAP_XATTR_SHARED;
 478       req->r_args.open.mask = cpu_to_le32(mask);
 479
 480	req->r_parent = dir;
 481	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
 482	err = ceph_mdsc_do_request(mdsc,
 483				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 484				   req);
 485	err = ceph_handle_snapdir(req, dentry, err);
 486	if (err)
 487		goto out_req;
 488
 489	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 490		err = ceph_handle_notrace_create(dir, dentry);
 491
 492	if (d_in_lookup(dentry)) {
 493		dn = ceph_finish_lookup(req, dentry, err);
 494		if (IS_ERR(dn))
 495			err = PTR_ERR(dn);
 496	} else {
 497		/* we were given a hashed negative dentry */
 498		dn = NULL;
 499	}
 500	if (err)
 501		goto out_req;
 502	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
 503		/* make vfs retry on splice, ENOENT, or symlink */
 504		dout("atomic_open finish_no_open on dn %p\n", dn);
 505		err = finish_no_open(file, dn);
 506	} else {
 507		dout("atomic_open finish_open on dn %p\n", dn);
 508		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 509			ceph_init_inode_acls(d_inode(dentry), &acls);
 510			*opened |= FILE_CREATED;
 511		}
 512		err = finish_open(file, dentry, ceph_open, opened);
 513	}
 514out_req:
 515	if (!req->r_err && req->r_target_inode)
 516		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
 517	ceph_mdsc_put_request(req);
 518out_acl:
 519	ceph_release_acls_info(&acls);
 520	dout("atomic_open result=%d\n", err);
 521	return err;
 522}
 523
 524int ceph_release(struct inode *inode, struct file *file)
 525{
 526	struct ceph_inode_info *ci = ceph_inode(inode);
 
 527
 528	if (S_ISDIR(inode->i_mode)) {
 529		struct ceph_dir_file_info *dfi = file->private_data;
 530		dout("release inode %p dir file %p\n", inode, file);
 531		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 532
 533		ceph_put_fmode(ci, dfi->file_info.fmode);
 534
 535		if (dfi->last_readdir)
 536			ceph_mdsc_put_request(dfi->last_readdir);
 537		kfree(dfi->last_name);
 538		kfree(dfi->dir_info);
 539		kmem_cache_free(ceph_dir_file_cachep, dfi);
 540	} else {
 541		struct ceph_file_info *fi = file->private_data;
 542		dout("release inode %p regular file %p\n", inode, file);
 543		WARN_ON(!list_empty(&fi->rw_contexts));
 544
 545		ceph_put_fmode(ci, fi->fmode);
 546		kmem_cache_free(ceph_file_cachep, fi);
 547	}
 548
 549	/* wake up anyone waiting for caps on this inode */
 550	wake_up_all(&ci->i_cap_wq);
 551	return 0;
 552}
 553
 554enum {
 555	HAVE_RETRIED = 1,
 556	CHECK_EOF =    2,
 557	READ_INLINE =  3,
 558};
 559
 560/*
 561 * Read a range of bytes striped over one or more objects.  Iterate over
 562 * objects we stripe over.  (That's not atomic, but good enough for now.)
 563 *
 564 * If we get a short result from the OSD, check against i_size; we need to
 565 * only return a short read to the caller if we hit EOF.
 566 */
 567static int striped_read(struct inode *inode,
 568			u64 pos, u64 len,
 569			struct page **pages, int num_pages,
 570			int page_align, int *checkeof)
 571{
 572	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 573	struct ceph_inode_info *ci = ceph_inode(inode);
 574	u64 this_len;
 575	loff_t i_size;
 576	int page_idx;
 577	int ret, read = 0;
 578	bool hit_stripe, was_short;
 579
 580	/*
 581	 * we may need to do multiple reads.  not atomic, unfortunately.
 582	 */
 583more:
 584	this_len = len;
 585	page_idx = (page_align + read) >> PAGE_SHIFT;
 586	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
 587				  &ci->i_layout, pos, &this_len,
 588				  ci->i_truncate_seq, ci->i_truncate_size,
 589				  pages + page_idx, num_pages - page_idx,
 590				  ((page_align + read) & ~PAGE_MASK));
 591	if (ret == -ENOENT)
 592		ret = 0;
 593	hit_stripe = this_len < len;
 594	was_short = ret >= 0 && ret < this_len;
 595	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
 596	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 597
 598	i_size = i_size_read(inode);
 599	if (ret >= 0) {
 600		if (was_short && (pos + ret < i_size)) {
 601			int zlen = min(this_len - ret, i_size - pos - ret);
 602			int zoff = page_align + read + ret;
 603			dout(" zero gap %llu to %llu\n",
 604			     pos + ret, pos + ret + zlen);
 605			ceph_zero_page_vector_range(zoff, zlen, pages);
 606			ret += zlen;
 607		}
 608
 609		read += ret;
 610		pos += ret;
 611		len -= ret;
 612
 613		/* hit stripe and need continue*/
 614		if (len && hit_stripe && pos < i_size)
 615			goto more;
 616	}
 617
 618	if (read > 0) {
 619		ret = read;
 620		/* did we bounce off eof? */
 621		if (pos + len > i_size)
 622			*checkeof = CHECK_EOF;
 623	}
 624
 625	dout("striped_read returns %d\n", ret);
 626	return ret;
 627}
 628
 629/*
 630 * Completely synchronous read and write methods.  Direct from __user
 631 * buffer to osd, or directly to user pages (if O_DIRECT).
 632 *
 633 * If the read spans object boundary, just do multiple reads.
 634 */
 635static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 636			      int *checkeof)
 637{
 638	struct file *file = iocb->ki_filp;
 639	struct inode *inode = file_inode(file);
 640	struct page **pages;
 641	u64 off = iocb->ki_pos;
 642	int num_pages;
 643	ssize_t ret;
 644	size_t len = iov_iter_count(to);
 645
 646	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
 
 647	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 648
 649	if (!len)
 650		return 0;
 651	/*
 652	 * flush any page cache pages in this range.  this
 653	 * will make concurrent normal and sync io slow,
 654	 * but it will at least behave sensibly when they are
 655	 * in sequence.
 656	 */
 657	ret = filemap_write_and_wait_range(inode->i_mapping, off,
 658						off + len);
 659	if (ret < 0)
 660		return ret;
 661
 662	if (unlikely(to->type & ITER_PIPE)) {
 663		size_t page_off;
 664		ret = iov_iter_get_pages_alloc(to, &pages, len,
 665					       &page_off);
 666		if (ret <= 0)
 667			return -ENOMEM;
 668		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 669
 670		ret = striped_read(inode, off, ret, pages, num_pages,
 671				   page_off, checkeof);
 672		if (ret > 0) {
 673			iov_iter_advance(to, ret);
 674			off += ret;
 675		} else {
 676			iov_iter_advance(to, 0);
 677		}
 678		ceph_put_page_vector(pages, num_pages, false);
 679	} else {
 680		num_pages = calc_pages_for(off, len);
 681		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 682		if (IS_ERR(pages))
 683			return PTR_ERR(pages);
 684
 685		ret = striped_read(inode, off, len, pages, num_pages,
 686				   (off & ~PAGE_MASK), checkeof);
 687		if (ret > 0) {
 688			int l, k = 0;
 689			size_t left = ret;
 690
 691			while (left) {
 692				size_t page_off = off & ~PAGE_MASK;
 693				size_t copy = min_t(size_t, left,
 694						    PAGE_SIZE - page_off);
 695				l = copy_page_to_iter(pages[k++], page_off,
 696						      copy, to);
 697				off += l;
 698				left -= l;
 699				if (l < copy)
 700					break;
 701			}
 702		}
 703		ceph_release_page_vector(pages, num_pages);
 704	}
 705
 706	if (off > iocb->ki_pos) {
 707		ret = off - iocb->ki_pos;
 708		iocb->ki_pos = off;
 709	}
 710
 711	dout("sync_read result %zd\n", ret);
 712	return ret;
 713}
 714
 715struct ceph_aio_request {
 716	struct kiocb *iocb;
 717	size_t total_len;
 718	bool write;
 719	bool should_dirty;
 720	int error;
 721	struct list_head osd_reqs;
 722	unsigned num_reqs;
 723	atomic_t pending_reqs;
 724	struct timespec mtime;
 725	struct ceph_cap_flush *prealloc_cf;
 726};
 727
 728struct ceph_aio_work {
 729	struct work_struct work;
 730	struct ceph_osd_request *req;
 731};
 732
 733static void ceph_aio_retry_work(struct work_struct *work);
 734
 735static void ceph_aio_complete(struct inode *inode,
 736			      struct ceph_aio_request *aio_req)
 737{
 738	struct ceph_inode_info *ci = ceph_inode(inode);
 739	int ret;
 740
 741	if (!atomic_dec_and_test(&aio_req->pending_reqs))
 742		return;
 743
 744	ret = aio_req->error;
 745	if (!ret)
 746		ret = aio_req->total_len;
 747
 748	dout("ceph_aio_complete %p rc %d\n", inode, ret);
 749
 750	if (ret >= 0 && aio_req->write) {
 751		int dirty;
 752
 753		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
 754		if (endoff > i_size_read(inode)) {
 755			if (ceph_inode_set_size(inode, endoff))
 756				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 757		}
 758
 759		spin_lock(&ci->i_ceph_lock);
 760		ci->i_inline_version = CEPH_INLINE_NONE;
 761		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
 762					       &aio_req->prealloc_cf);
 763		spin_unlock(&ci->i_ceph_lock);
 764		if (dirty)
 765			__mark_inode_dirty(inode, dirty);
 766
 767	}
 768
 769	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
 770						CEPH_CAP_FILE_RD));
 771
 772	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
 773
 774	ceph_free_cap_flush(aio_req->prealloc_cf);
 775	kfree(aio_req);
 776}
 777
 778static void ceph_aio_complete_req(struct ceph_osd_request *req)
 779{
 780	int rc = req->r_result;
 781	struct inode *inode = req->r_inode;
 782	struct ceph_aio_request *aio_req = req->r_priv;
 783	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
 
 
 784
 785	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
 786	BUG_ON(!osd_data->num_bvecs);
 787
 788	dout("ceph_aio_complete_req %p rc %d bytes %u\n",
 789	     inode, rc, osd_data->bvec_pos.iter.bi_size);
 790
 791	if (rc == -EOLDSNAPC) {
 792		struct ceph_aio_work *aio_work;
 793		BUG_ON(!aio_req->write);
 794
 795		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 796		if (aio_work) {
 797			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
 798			aio_work->req = req;
 799			queue_work(ceph_inode_to_client(inode)->wb_wq,
 800				   &aio_work->work);
 801			return;
 802		}
 803		rc = -ENOMEM;
 804	} else if (!aio_req->write) {
 805		if (rc == -ENOENT)
 806			rc = 0;
 807		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
 808			struct iov_iter i;
 809			int zlen = osd_data->bvec_pos.iter.bi_size - rc;
 810
 811			/*
 812			 * If read is satisfied by single OSD request,
 813			 * it can pass EOF. Otherwise read is within
 814			 * i_size.
 815			 */
 816			if (aio_req->num_reqs == 1) {
 817				loff_t i_size = i_size_read(inode);
 818				loff_t endoff = aio_req->iocb->ki_pos + rc;
 819				if (endoff < i_size)
 820					zlen = min_t(size_t, zlen,
 821						     i_size - endoff);
 822				aio_req->total_len = rc + zlen;
 823			}
 824
 825			iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
 826				      osd_data->num_bvecs,
 827				      osd_data->bvec_pos.iter.bi_size);
 828			iov_iter_advance(&i, rc);
 829			iov_iter_zero(zlen, &i);
 830		}
 831	}
 832
 833	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
 834		  aio_req->should_dirty);
 835	ceph_osdc_put_request(req);
 836
 837	if (rc < 0)
 838		cmpxchg(&aio_req->error, 0, rc);
 839
 840	ceph_aio_complete(inode, aio_req);
 841	return;
 842}
 843
 844static void ceph_aio_retry_work(struct work_struct *work)
 845{
 846	struct ceph_aio_work *aio_work =
 847		container_of(work, struct ceph_aio_work, work);
 848	struct ceph_osd_request *orig_req = aio_work->req;
 849	struct ceph_aio_request *aio_req = orig_req->r_priv;
 850	struct inode *inode = orig_req->r_inode;
 851	struct ceph_inode_info *ci = ceph_inode(inode);
 852	struct ceph_snap_context *snapc;
 853	struct ceph_osd_request *req;
 854	int ret;
 855
 856	spin_lock(&ci->i_ceph_lock);
 857	if (__ceph_have_pending_cap_snap(ci)) {
 858		struct ceph_cap_snap *capsnap =
 859			list_last_entry(&ci->i_cap_snaps,
 860					struct ceph_cap_snap,
 861					ci_item);
 862		snapc = ceph_get_snap_context(capsnap->context);
 863	} else {
 864		BUG_ON(!ci->i_head_snapc);
 865		snapc = ceph_get_snap_context(ci->i_head_snapc);
 866	}
 867	spin_unlock(&ci->i_ceph_lock);
 868
 869	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
 870			false, GFP_NOFS);
 871	if (!req) {
 872		ret = -ENOMEM;
 873		req = orig_req;
 874		goto out;
 875	}
 876
 877	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
 878	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
 879	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
 880
 881	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
 882	if (ret) {
 883		ceph_osdc_put_request(req);
 884		req = orig_req;
 885		goto out;
 886	}
 887
 888	req->r_ops[0] = orig_req->r_ops[0];
 
 889
 890	req->r_mtime = aio_req->mtime;
 891	req->r_data_offset = req->r_ops[0].extent.offset;
 892
 893	ceph_osdc_put_request(orig_req);
 894
 895	req->r_callback = ceph_aio_complete_req;
 896	req->r_inode = inode;
 897	req->r_priv = aio_req;
 898	req->r_abort_on_full = true;
 899
 900	ret = ceph_osdc_start_request(req->r_osdc, req, false);
 901out:
 902	if (ret < 0) {
 903		req->r_result = ret;
 904		ceph_aio_complete_req(req);
 905	}
 906
 907	ceph_put_snap_context(snapc);
 908	kfree(aio_work);
 909}
 910
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 911static ssize_t
 912ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 913		       struct ceph_snap_context *snapc,
 914		       struct ceph_cap_flush **pcf)
 915{
 916	struct file *file = iocb->ki_filp;
 917	struct inode *inode = file_inode(file);
 918	struct ceph_inode_info *ci = ceph_inode(inode);
 919	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 920	struct ceph_vino vino;
 921	struct ceph_osd_request *req;
 922	struct bio_vec *bvecs;
 923	struct ceph_aio_request *aio_req = NULL;
 924	int num_pages = 0;
 925	int flags;
 926	int ret;
 927	struct timespec mtime = current_time(inode);
 928	size_t count = iov_iter_count(iter);
 929	loff_t pos = iocb->ki_pos;
 930	bool write = iov_iter_rw(iter) == WRITE;
 931	bool should_dirty = !write && iter_is_iovec(iter);
 932
 933	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 934		return -EROFS;
 935
 936	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
 937	     (write ? "write" : "read"), file, pos, (unsigned)count,
 938	     snapc, snapc->seq);
 939
 940	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
 941	if (ret < 0)
 942		return ret;
 943
 944	if (write) {
 945		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 946					pos >> PAGE_SHIFT,
 947					(pos + count) >> PAGE_SHIFT);
 948		if (ret2 < 0)
 949			dout("invalidate_inode_pages2_range returned %d\n", ret2);
 950
 951		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
 952	} else {
 953		flags = CEPH_OSD_FLAG_READ;
 954	}
 955
 956	while (iov_iter_count(iter) > 0) {
 957		u64 size = iov_iter_count(iter);
 
 958		ssize_t len;
 959
 960		if (write)
 961			size = min_t(u64, size, fsc->mount_options->wsize);
 962		else
 963			size = min_t(u64, size, fsc->mount_options->rsize);
 964
 965		vino = ceph_vino(inode);
 966		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 967					    vino, pos, &size, 0,
 968					    1,
 
 969					    write ? CEPH_OSD_OP_WRITE :
 970						    CEPH_OSD_OP_READ,
 971					    flags, snapc,
 972					    ci->i_truncate_seq,
 973					    ci->i_truncate_size,
 974					    false);
 975		if (IS_ERR(req)) {
 976			ret = PTR_ERR(req);
 977			break;
 978		}
 979
 980		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
 981		if (len < 0) {
 
 982			ceph_osdc_put_request(req);
 983			ret = len;
 984			break;
 985		}
 986		if (len != size)
 987			osd_req_op_extent_update(req, 0, len);
 988
 989		/*
 990		 * To simplify error handling, allow AIO when IO within i_size
 991		 * or IO can be satisfied by single OSD request.
 992		 */
 993		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
 994		    (len == count || pos + count <= i_size_read(inode))) {
 995			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
 996			if (aio_req) {
 997				aio_req->iocb = iocb;
 998				aio_req->write = write;
 999				aio_req->should_dirty = should_dirty;
1000				INIT_LIST_HEAD(&aio_req->osd_reqs);
1001				if (write) {
1002					aio_req->mtime = mtime;
1003					swap(aio_req->prealloc_cf, *pcf);
1004				}
1005			}
1006			/* ignore error */
1007		}
1008
1009		if (write) {
1010			/*
1011			 * throw out any page cache pages in this range. this
1012			 * may block.
1013			 */
1014			truncate_inode_pages_range(inode->i_mapping, pos,
1015					(pos+len) | (PAGE_SIZE - 1));
1016
 
1017			req->r_mtime = mtime;
1018		}
1019
1020		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
 
1021
1022		if (aio_req) {
1023			aio_req->total_len += len;
1024			aio_req->num_reqs++;
1025			atomic_inc(&aio_req->pending_reqs);
1026
1027			req->r_callback = ceph_aio_complete_req;
1028			req->r_inode = inode;
1029			req->r_priv = aio_req;
1030			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
1031
1032			pos += len;
 
1033			continue;
1034		}
1035
1036		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1037		if (!ret)
1038			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1039
1040		size = i_size_read(inode);
1041		if (!write) {
1042			if (ret == -ENOENT)
1043				ret = 0;
1044			if (ret >= 0 && ret < len && pos + ret < size) {
1045				struct iov_iter i;
1046				int zlen = min_t(size_t, len - ret,
1047						 size - pos - ret);
1048
1049				iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
1050					      len);
1051				iov_iter_advance(&i, ret);
1052				iov_iter_zero(zlen, &i);
1053				ret += zlen;
1054			}
1055			if (ret >= 0)
1056				len = ret;
1057		}
1058
1059		put_bvecs(bvecs, num_pages, should_dirty);
 
1060		ceph_osdc_put_request(req);
1061		if (ret < 0)
1062			break;
1063
1064		pos += len;
 
 
1065		if (!write && pos >= size)
1066			break;
1067
1068		if (write && pos > size) {
1069			if (ceph_inode_set_size(inode, pos))
1070				ceph_check_caps(ceph_inode(inode),
1071						CHECK_CAPS_AUTHONLY,
1072						NULL);
1073		}
1074	}
1075
1076	if (aio_req) {
1077		LIST_HEAD(osd_reqs);
1078
1079		if (aio_req->num_reqs == 0) {
1080			kfree(aio_req);
1081			return ret;
1082		}
1083
1084		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1085					      CEPH_CAP_FILE_RD);
1086
1087		list_splice(&aio_req->osd_reqs, &osd_reqs);
1088		while (!list_empty(&osd_reqs)) {
1089			req = list_first_entry(&osd_reqs,
1090					       struct ceph_osd_request,
1091					       r_unsafe_item);
1092			list_del_init(&req->r_unsafe_item);
1093			if (ret >= 0)
1094				ret = ceph_osdc_start_request(req->r_osdc,
1095							      req, false);
1096			if (ret < 0) {
1097				req->r_result = ret;
1098				ceph_aio_complete_req(req);
1099			}
1100		}
1101		return -EIOCBQUEUED;
1102	}
1103
1104	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1105		ret = pos - iocb->ki_pos;
1106		iocb->ki_pos = pos;
1107	}
1108	return ret;
1109}
1110
1111/*
1112 * Synchronous write, straight from __user pointer or user pages.
1113 *
1114 * If write spans object boundary, just do multiple writes.  (For a
1115 * correct atomic write, we should e.g. take write locks on all
1116 * objects, rollback on failure, etc.)
1117 */
1118static ssize_t
1119ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1120		struct ceph_snap_context *snapc)
1121{
1122	struct file *file = iocb->ki_filp;
1123	struct inode *inode = file_inode(file);
1124	struct ceph_inode_info *ci = ceph_inode(inode);
1125	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1126	struct ceph_vino vino;
1127	struct ceph_osd_request *req;
1128	struct page **pages;
1129	u64 len;
1130	int num_pages;
1131	int written = 0;
1132	int flags;
 
1133	int ret;
1134	bool check_caps = false;
1135	struct timespec mtime = current_time(inode);
1136	size_t count = iov_iter_count(from);
1137
1138	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1139		return -EROFS;
1140
1141	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1142	     file, pos, (unsigned)count, snapc, snapc->seq);
1143
1144	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1145	if (ret < 0)
1146		return ret;
1147
1148	ret = invalidate_inode_pages2_range(inode->i_mapping,
1149					    pos >> PAGE_SHIFT,
1150					    (pos + count) >> PAGE_SHIFT);
1151	if (ret < 0)
1152		dout("invalidate_inode_pages2_range returned %d\n", ret);
1153
1154	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
 
 
 
1155
1156	while ((len = iov_iter_count(from)) > 0) {
1157		size_t left;
1158		int n;
1159
1160		vino = ceph_vino(inode);
1161		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1162					    vino, pos, &len, 0, 1,
1163					    CEPH_OSD_OP_WRITE, flags, snapc,
1164					    ci->i_truncate_seq,
1165					    ci->i_truncate_size,
1166					    false);
1167		if (IS_ERR(req)) {
1168			ret = PTR_ERR(req);
1169			break;
1170		}
1171
1172		/*
1173		 * write from beginning of first page,
1174		 * regardless of io alignment
1175		 */
1176		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177
1178		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1179		if (IS_ERR(pages)) {
1180			ret = PTR_ERR(pages);
1181			goto out;
1182		}
1183
1184		left = len;
1185		for (n = 0; n < num_pages; n++) {
1186			size_t plen = min_t(size_t, left, PAGE_SIZE);
1187			ret = copy_page_from_iter(pages[n], 0, plen, from);
1188			if (ret != plen) {
1189				ret = -EFAULT;
1190				break;
1191			}
1192			left -= ret;
1193		}
1194
1195		if (ret < 0) {
1196			ceph_release_page_vector(pages, num_pages);
1197			goto out;
1198		}
1199
 
 
1200		req->r_inode = inode;
1201
1202		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1203						false, true);
1204
1205		req->r_mtime = mtime;
1206		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1207		if (!ret)
1208			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1209
1210out:
1211		ceph_osdc_put_request(req);
1212		if (ret != 0) {
1213			ceph_set_error_write(ci);
 
 
 
 
 
 
 
 
 
 
1214			break;
1215		}
1216
1217		ceph_clear_error_write(ci);
1218		pos += len;
1219		written += len;
1220		if (pos > i_size_read(inode)) {
1221			check_caps = ceph_inode_set_size(inode, pos);
1222			if (check_caps)
1223				ceph_check_caps(ceph_inode(inode),
1224						CHECK_CAPS_AUTHONLY,
1225						NULL);
1226		}
1227
1228	}
1229
1230	if (ret != -EOLDSNAPC && written > 0) {
1231		ret = written;
1232		iocb->ki_pos = pos;
1233	}
1234	return ret;
1235}
1236
1237/*
1238 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1239 * Atomically grab references, so that those bits are not released
1240 * back to the MDS mid-read.
1241 *
1242 * Hmm, the sync read case isn't actually async... should it be?
1243 */
1244static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1245{
1246	struct file *filp = iocb->ki_filp;
1247	struct ceph_file_info *fi = filp->private_data;
1248	size_t len = iov_iter_count(to);
1249	struct inode *inode = file_inode(filp);
1250	struct ceph_inode_info *ci = ceph_inode(inode);
1251	struct page *pinned_page = NULL;
1252	ssize_t ret;
1253	int want, got = 0;
1254	int retry_op = 0, read = 0;
1255
1256again:
1257	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1258	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1259
1260	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1261		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1262	else
1263		want = CEPH_CAP_FILE_CACHE;
1264	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1265	if (ret < 0)
1266		return ret;
1267
1268	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1269	    (iocb->ki_flags & IOCB_DIRECT) ||
1270	    (fi->flags & CEPH_F_SYNC)) {
1271
1272		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1273		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1274		     ceph_cap_string(got));
1275
1276		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1277			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1278				ret = ceph_direct_read_write(iocb, to,
1279							     NULL, NULL);
1280				if (ret >= 0 && ret < len)
1281					retry_op = CHECK_EOF;
1282			} else {
1283				ret = ceph_sync_read(iocb, to, &retry_op);
1284			}
1285		} else {
1286			retry_op = READ_INLINE;
1287		}
1288	} else {
1289		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1290		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1291		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1292		     ceph_cap_string(got));
1293		ceph_add_rw_context(fi, &rw_ctx);
1294		ret = generic_file_read_iter(iocb, to);
1295		ceph_del_rw_context(fi, &rw_ctx);
1296	}
1297	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1298	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1299	if (pinned_page) {
1300		put_page(pinned_page);
1301		pinned_page = NULL;
1302	}
1303	ceph_put_cap_refs(ci, got);
1304	if (retry_op > HAVE_RETRIED && ret >= 0) {
1305		int statret;
1306		struct page *page = NULL;
1307		loff_t i_size;
1308		if (retry_op == READ_INLINE) {
1309			page = __page_cache_alloc(GFP_KERNEL);
1310			if (!page)
1311				return -ENOMEM;
1312		}
1313
1314		statret = __ceph_do_getattr(inode, page,
1315					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1316		if (statret < 0) {
1317			if (page)
1318				__free_page(page);
1319			if (statret == -ENODATA) {
1320				BUG_ON(retry_op != READ_INLINE);
1321				goto again;
1322			}
1323			return statret;
1324		}
1325
1326		i_size = i_size_read(inode);
1327		if (retry_op == READ_INLINE) {
1328			BUG_ON(ret > 0 || read > 0);
1329			if (iocb->ki_pos < i_size &&
1330			    iocb->ki_pos < PAGE_SIZE) {
1331				loff_t end = min_t(loff_t, i_size,
1332						   iocb->ki_pos + len);
1333				end = min_t(loff_t, end, PAGE_SIZE);
1334				if (statret < end)
1335					zero_user_segment(page, statret, end);
1336				ret = copy_page_to_iter(page,
1337						iocb->ki_pos & ~PAGE_MASK,
1338						end - iocb->ki_pos, to);
1339				iocb->ki_pos += ret;
1340				read += ret;
1341			}
1342			if (iocb->ki_pos < i_size && read < len) {
1343				size_t zlen = min_t(size_t, len - read,
1344						    i_size - iocb->ki_pos);
1345				ret = iov_iter_zero(zlen, to);
1346				iocb->ki_pos += ret;
1347				read += ret;
1348			}
1349			__free_pages(page, 0);
1350			return read;
1351		}
1352
1353		/* hit EOF or hole? */
1354		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1355		    ret < len) {
1356			dout("sync_read hit hole, ppos %lld < size %lld"
1357			     ", reading more\n", iocb->ki_pos, i_size);
1358
1359			read += ret;
1360			len -= ret;
1361			retry_op = HAVE_RETRIED;
1362			goto again;
1363		}
1364	}
1365
1366	if (ret >= 0)
1367		ret += read;
1368
1369	return ret;
1370}
1371
1372/*
1373 * Take cap references to avoid releasing caps to MDS mid-write.
1374 *
1375 * If we are synchronous, and write with an old snap context, the OSD
1376 * may return EOLDSNAPC.  In that case, retry the write.. _after_
1377 * dropping our cap refs and allowing the pending snap to logically
1378 * complete _before_ this write occurs.
1379 *
1380 * If we are near ENOSPC, write synchronously.
1381 */
1382static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1383{
1384	struct file *file = iocb->ki_filp;
1385	struct ceph_file_info *fi = file->private_data;
1386	struct inode *inode = file_inode(file);
1387	struct ceph_inode_info *ci = ceph_inode(inode);
1388	struct ceph_osd_client *osdc =
1389		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1390	struct ceph_cap_flush *prealloc_cf;
1391	ssize_t count, written = 0;
1392	int err, want, got;
1393	loff_t pos;
1394
1395	if (ceph_snap(inode) != CEPH_NOSNAP)
1396		return -EROFS;
1397
1398	prealloc_cf = ceph_alloc_cap_flush();
1399	if (!prealloc_cf)
1400		return -ENOMEM;
1401
1402retry_snap:
1403	inode_lock(inode);
1404
1405	/* We can write back this queue in page reclaim */
1406	current->backing_dev_info = inode_to_bdi(inode);
1407
1408	if (iocb->ki_flags & IOCB_APPEND) {
1409		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1410		if (err < 0)
1411			goto out;
1412	}
1413
1414	err = generic_write_checks(iocb, from);
1415	if (err <= 0)
1416		goto out;
1417
1418	pos = iocb->ki_pos;
1419	count = iov_iter_count(from);
1420	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1421		err = -EDQUOT;
1422		goto out;
1423	}
1424
1425	err = file_remove_privs(file);
1426	if (err)
1427		goto out;
1428
1429	err = file_update_time(file);
1430	if (err)
1431		goto out;
1432
1433	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1434		err = ceph_uninline_data(file, NULL);
1435		if (err < 0)
1436			goto out;
1437	}
1438
1439	/* FIXME: not complete since it doesn't account for being at quota */
1440	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1441		err = -ENOSPC;
1442		goto out;
1443	}
1444
1445	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1446	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1447	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1448		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1449	else
1450		want = CEPH_CAP_FILE_BUFFER;
1451	got = 0;
1452	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1453			    &got, NULL);
1454	if (err < 0)
1455		goto out;
1456
1457	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1458	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1459
1460	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1461	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1462	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1463		struct ceph_snap_context *snapc;
1464		struct iov_iter data;
1465		inode_unlock(inode);
1466
1467		spin_lock(&ci->i_ceph_lock);
1468		if (__ceph_have_pending_cap_snap(ci)) {
1469			struct ceph_cap_snap *capsnap =
1470					list_last_entry(&ci->i_cap_snaps,
1471							struct ceph_cap_snap,
1472							ci_item);
1473			snapc = ceph_get_snap_context(capsnap->context);
1474		} else {
1475			BUG_ON(!ci->i_head_snapc);
1476			snapc = ceph_get_snap_context(ci->i_head_snapc);
1477		}
1478		spin_unlock(&ci->i_ceph_lock);
1479
1480		/* we might need to revert back to that point */
1481		data = *from;
1482		if (iocb->ki_flags & IOCB_DIRECT)
1483			written = ceph_direct_read_write(iocb, &data, snapc,
1484							 &prealloc_cf);
1485		else
1486			written = ceph_sync_write(iocb, &data, pos, snapc);
 
 
 
 
 
 
 
 
1487		if (written > 0)
1488			iov_iter_advance(from, written);
1489		ceph_put_snap_context(snapc);
1490	} else {
1491		/*
1492		 * No need to acquire the i_truncate_mutex. Because
1493		 * the MDS revokes Fwb caps before sending truncate
1494		 * message to us. We can't get Fwb cap while there
1495		 * are pending vmtruncate. So write and vmtruncate
1496		 * can not run at the same time
1497		 */
1498		written = generic_perform_write(file, from, pos);
1499		if (likely(written >= 0))
1500			iocb->ki_pos = pos + written;
1501		inode_unlock(inode);
1502	}
1503
1504	if (written >= 0) {
1505		int dirty;
1506
1507		spin_lock(&ci->i_ceph_lock);
1508		ci->i_inline_version = CEPH_INLINE_NONE;
1509		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1510					       &prealloc_cf);
1511		spin_unlock(&ci->i_ceph_lock);
1512		if (dirty)
1513			__mark_inode_dirty(inode, dirty);
1514		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1515			ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1516	}
1517
1518	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1519	     inode, ceph_vinop(inode), pos, (unsigned)count,
1520	     ceph_cap_string(got));
1521	ceph_put_cap_refs(ci, got);
1522
1523	if (written == -EOLDSNAPC) {
1524		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1525		     inode, ceph_vinop(inode), pos, (unsigned)count);
1526		goto retry_snap;
1527	}
1528
1529	if (written >= 0) {
1530		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1531			iocb->ki_flags |= IOCB_DSYNC;
 
1532		written = generic_write_sync(iocb, written);
1533	}
1534
1535	goto out_unlocked;
1536
1537out:
1538	inode_unlock(inode);
1539out_unlocked:
1540	ceph_free_cap_flush(prealloc_cf);
1541	current->backing_dev_info = NULL;
1542	return written ? written : err;
1543}
1544
1545/*
1546 * llseek.  be sure to verify file size on SEEK_END.
1547 */
1548static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1549{
1550	struct inode *inode = file->f_mapping->host;
1551	loff_t i_size;
1552	loff_t ret;
1553
1554	inode_lock(inode);
1555
1556	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1557		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1558		if (ret < 0)
1559			goto out;
1560	}
1561
1562	i_size = i_size_read(inode);
1563	switch (whence) {
1564	case SEEK_END:
1565		offset += i_size;
1566		break;
1567	case SEEK_CUR:
1568		/*
1569		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1570		 * position-querying operation.  Avoid rewriting the "same"
1571		 * f_pos value back to the file because a concurrent read(),
1572		 * write() or lseek() might have altered it
1573		 */
1574		if (offset == 0) {
1575			ret = file->f_pos;
1576			goto out;
1577		}
1578		offset += file->f_pos;
1579		break;
1580	case SEEK_DATA:
1581		if (offset < 0 || offset >= i_size) {
1582			ret = -ENXIO;
1583			goto out;
1584		}
1585		break;
1586	case SEEK_HOLE:
1587		if (offset < 0 || offset >= i_size) {
1588			ret = -ENXIO;
1589			goto out;
1590		}
1591		offset = i_size;
1592		break;
1593	}
1594
1595	ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1596
1597out:
1598	inode_unlock(inode);
1599	return ret;
1600}
1601
1602static inline void ceph_zero_partial_page(
1603	struct inode *inode, loff_t offset, unsigned size)
1604{
1605	struct page *page;
1606	pgoff_t index = offset >> PAGE_SHIFT;
1607
1608	page = find_lock_page(inode->i_mapping, index);
1609	if (page) {
1610		wait_on_page_writeback(page);
1611		zero_user(page, offset & (PAGE_SIZE - 1), size);
1612		unlock_page(page);
1613		put_page(page);
1614	}
1615}
1616
1617static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1618				      loff_t length)
1619{
1620	loff_t nearly = round_up(offset, PAGE_SIZE);
1621	if (offset < nearly) {
1622		loff_t size = nearly - offset;
1623		if (length < size)
1624			size = length;
1625		ceph_zero_partial_page(inode, offset, size);
1626		offset += size;
1627		length -= size;
1628	}
1629	if (length >= PAGE_SIZE) {
1630		loff_t size = round_down(length, PAGE_SIZE);
1631		truncate_pagecache_range(inode, offset, offset + size - 1);
1632		offset += size;
1633		length -= size;
1634	}
1635	if (length)
1636		ceph_zero_partial_page(inode, offset, length);
1637}
1638
1639static int ceph_zero_partial_object(struct inode *inode,
1640				    loff_t offset, loff_t *length)
1641{
1642	struct ceph_inode_info *ci = ceph_inode(inode);
1643	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1644	struct ceph_osd_request *req;
1645	int ret = 0;
1646	loff_t zero = 0;
1647	int op;
1648
1649	if (!length) {
1650		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1651		length = &zero;
1652	} else {
1653		op = CEPH_OSD_OP_ZERO;
1654	}
1655
1656	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1657					ceph_vino(inode),
1658					offset, length,
1659					0, 1, op,
1660					CEPH_OSD_FLAG_WRITE,
 
1661					NULL, 0, 0, false);
1662	if (IS_ERR(req)) {
1663		ret = PTR_ERR(req);
1664		goto out;
1665	}
1666
1667	req->r_mtime = inode->i_mtime;
1668	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1669	if (!ret) {
1670		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1671		if (ret == -ENOENT)
1672			ret = 0;
1673	}
1674	ceph_osdc_put_request(req);
1675
1676out:
1677	return ret;
1678}
1679
1680static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1681{
1682	int ret = 0;
1683	struct ceph_inode_info *ci = ceph_inode(inode);
1684	s32 stripe_unit = ci->i_layout.stripe_unit;
1685	s32 stripe_count = ci->i_layout.stripe_count;
1686	s32 object_size = ci->i_layout.object_size;
1687	u64 object_set_size = object_size * stripe_count;
1688	u64 nearly, t;
1689
1690	/* round offset up to next period boundary */
1691	nearly = offset + object_set_size - 1;
1692	t = nearly;
1693	nearly -= do_div(t, object_set_size);
1694
1695	while (length && offset < nearly) {
1696		loff_t size = length;
1697		ret = ceph_zero_partial_object(inode, offset, &size);
1698		if (ret < 0)
1699			return ret;
1700		offset += size;
1701		length -= size;
1702	}
1703	while (length >= object_set_size) {
1704		int i;
1705		loff_t pos = offset;
1706		for (i = 0; i < stripe_count; ++i) {
1707			ret = ceph_zero_partial_object(inode, pos, NULL);
1708			if (ret < 0)
1709				return ret;
1710			pos += stripe_unit;
1711		}
1712		offset += object_set_size;
1713		length -= object_set_size;
1714	}
1715	while (length) {
1716		loff_t size = length;
1717		ret = ceph_zero_partial_object(inode, offset, &size);
1718		if (ret < 0)
1719			return ret;
1720		offset += size;
1721		length -= size;
1722	}
1723	return ret;
1724}
1725
1726static long ceph_fallocate(struct file *file, int mode,
1727				loff_t offset, loff_t length)
1728{
1729	struct ceph_file_info *fi = file->private_data;
1730	struct inode *inode = file_inode(file);
1731	struct ceph_inode_info *ci = ceph_inode(inode);
1732	struct ceph_osd_client *osdc =
1733		&ceph_inode_to_client(inode)->client->osdc;
1734	struct ceph_cap_flush *prealloc_cf;
1735	int want, got = 0;
1736	int dirty;
1737	int ret = 0;
1738	loff_t endoff = 0;
1739	loff_t size;
1740
1741	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1742		return -EOPNOTSUPP;
1743
1744	if (!S_ISREG(inode->i_mode))
1745		return -EOPNOTSUPP;
1746
1747	prealloc_cf = ceph_alloc_cap_flush();
1748	if (!prealloc_cf)
1749		return -ENOMEM;
1750
1751	inode_lock(inode);
1752
1753	if (ceph_snap(inode) != CEPH_NOSNAP) {
1754		ret = -EROFS;
1755		goto unlock;
1756	}
1757
1758	if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
1759	    ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
1760		ret = -EDQUOT;
1761		goto unlock;
1762	}
1763
1764	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1765	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
1766		ret = -ENOSPC;
1767		goto unlock;
1768	}
1769
1770	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1771		ret = ceph_uninline_data(file, NULL);
1772		if (ret < 0)
1773			goto unlock;
1774	}
1775
1776	size = i_size_read(inode);
1777	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
1778		endoff = offset + length;
1779		ret = inode_newsize_ok(inode, endoff);
1780		if (ret)
1781			goto unlock;
1782	}
1783
1784	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1785		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1786	else
1787		want = CEPH_CAP_FILE_BUFFER;
1788
1789	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1790	if (ret < 0)
1791		goto unlock;
1792
1793	if (mode & FALLOC_FL_PUNCH_HOLE) {
1794		if (offset < size)
1795			ceph_zero_pagecache_range(inode, offset, length);
1796		ret = ceph_zero_objects(inode, offset, length);
1797	} else if (endoff > size) {
1798		truncate_pagecache_range(inode, size, -1);
1799		if (ceph_inode_set_size(inode, endoff))
1800			ceph_check_caps(ceph_inode(inode),
1801				CHECK_CAPS_AUTHONLY, NULL);
1802	}
1803
1804	if (!ret) {
1805		spin_lock(&ci->i_ceph_lock);
1806		ci->i_inline_version = CEPH_INLINE_NONE;
1807		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1808					       &prealloc_cf);
1809		spin_unlock(&ci->i_ceph_lock);
1810		if (dirty)
1811			__mark_inode_dirty(inode, dirty);
1812		if ((endoff > size) &&
1813		    ceph_quota_is_max_bytes_approaching(inode, endoff))
1814			ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1815	}
1816
1817	ceph_put_cap_refs(ci, got);
1818unlock:
1819	inode_unlock(inode);
1820	ceph_free_cap_flush(prealloc_cf);
1821	return ret;
1822}
1823
1824const struct file_operations ceph_file_fops = {
1825	.open = ceph_open,
1826	.release = ceph_release,
1827	.llseek = ceph_llseek,
1828	.read_iter = ceph_read_iter,
1829	.write_iter = ceph_write_iter,
1830	.mmap = ceph_mmap,
1831	.fsync = ceph_fsync,
1832	.lock = ceph_lock,
1833	.flock = ceph_flock,
1834	.splice_read = generic_file_splice_read,
1835	.splice_write = iter_file_splice_write,
1836	.unlocked_ioctl = ceph_ioctl,
1837	.compat_ioctl	= ceph_ioctl,
1838	.fallocate	= ceph_fallocate,
1839};
1840